blob: a31b0e575a5539771705e3460c1c478230b2229a [file] [log] [blame]
developer1d9da7d2023-04-15 12:45:34 +08001From ed9230087e1ef8314f3364b6a3db65165abf8934 Mon Sep 17 00:00:00 2001
developer6100db22023-04-05 13:22:26 +08002From: Evelyn Tsai <evelyn.tsai@mediatek.com>
3Date: Sat, 1 Apr 2023 08:18:17 +0800
developer5bea7322023-04-13 18:50:55 +08004Subject: [PATCH] wifi: mt76: mt7915: build pass for Linux Kernel 5.4 fixes
developer6100db22023-04-05 13:22:26 +08005
6---
7 debugfs.c | 2 ++
8 dma.c | 74 ++++++++++++++++++++++++-----------------------
9 eeprom.c | 8 ++++-
10 mac80211.c | 57 ------------------------------------
11 mcu.c | 1 +
12 mt76.h | 22 +-------------
13 mt7615/mcu.c | 1 +
14 mt76_connac.h | 2 --
15 mt76_connac_mcu.c | 47 +-----------------------------
16 mt76_connac_mcu.h | 4 ---
17 mt7915/main.c | 26 +++++++----------
18 mt7915/mcu.c | 1 +
19 mt7915/mmio.c | 55 +++++++++++++++++++++--------------
20 mt7921/main.c | 31 +++-----------------
21 usb.c | 43 +++++++++++++--------------
22 15 files changed, 122 insertions(+), 252 deletions(-)
23
24diff --git a/debugfs.c b/debugfs.c
developer1d9da7d2023-04-15 12:45:34 +080025index 79064a4..4a8e186 100644
developer6100db22023-04-05 13:22:26 +080026--- a/debugfs.c
27+++ b/debugfs.c
28@@ -33,8 +33,10 @@ mt76_napi_threaded_set(void *data, u64 val)
29 if (!mt76_is_mmio(dev))
30 return -EOPNOTSUPP;
31
32+#if 0 /* disable in backport 5.15 */
33 if (dev->napi_dev.threaded != val)
34 return dev_set_threaded(&dev->napi_dev, val);
35+#endif
36
37 return 0;
38 }
39diff --git a/dma.c b/dma.c
developer1d9da7d2023-04-15 12:45:34 +080040index c22ea64..e1b73a1 100644
developer6100db22023-04-05 13:22:26 +080041--- a/dma.c
42+++ b/dma.c
43@@ -173,7 +173,7 @@ mt76_free_pending_rxwi(struct mt76_dev *dev)
44 local_bh_disable();
45 while ((t = __mt76_get_rxwi(dev)) != NULL) {
46 if (t->ptr)
47- mt76_put_page_pool_buf(t->ptr, false);
48+ skb_free_frag(t->ptr);
49 kfree(t);
50 }
51 local_bh_enable();
52@@ -409,9 +409,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
53 if (!t)
54 return NULL;
55
56- dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
57- SKB_WITH_OVERHEAD(q->buf_size),
58- page_pool_get_dma_dir(q->page_pool));
59+ dma_unmap_single(dev->dma_dev, t->dma_addr,
60+ SKB_WITH_OVERHEAD(q->buf_size),
61+ DMA_FROM_DEVICE);
62
63 buf = t->ptr;
64 t->dma_addr = 0;
65@@ -430,9 +430,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
66 } else {
67 buf = e->buf;
68 e->buf = NULL;
69- dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
70- SKB_WITH_OVERHEAD(q->buf_size),
71- page_pool_get_dma_dir(q->page_pool));
72+ dma_unmap_single(dev->dma_dev, e->dma_addr[0],
73+ SKB_WITH_OVERHEAD(q->buf_size),
74+ DMA_FROM_DEVICE);
75 }
76
77 return buf;
developer2324aa22023-04-12 11:30:15 +080078@@ -590,11 +590,11 @@ free_skb:
developer6100db22023-04-05 13:22:26 +080079 }
80
81 static int
82-mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
83- bool allow_direct)
84+mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
85 {
86 int len = SKB_WITH_OVERHEAD(q->buf_size);
87- int frames = 0;
88+ int frames = 0, offset = q->buf_offset;
89+ dma_addr_t addr;
90
91 if (!q->ndesc)
92 return 0;
developer2324aa22023-04-12 11:30:15 +080093@@ -602,25 +602,26 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
developer6100db22023-04-05 13:22:26 +080094 spin_lock_bh(&q->lock);
95
96 while (q->queued < q->ndesc - 1) {
97- enum dma_data_direction dir;
98 struct mt76_queue_buf qbuf;
99- dma_addr_t addr;
100- int offset;
101- void *buf;
102+ void *buf = NULL;
103
104- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
105+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
106 if (!buf)
107 break;
108
109- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
110- dir = page_pool_get_dma_dir(q->page_pool);
111- dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
112+ addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
113+ if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
114+ skb_free_frag(buf);
115+ break;
116+ }
117
118- qbuf.addr = addr + q->buf_offset;
119- qbuf.len = len - q->buf_offset;
120+ qbuf.addr = addr + offset;
121+ qbuf.len = len - offset;
122 qbuf.skip_unmap = false;
123 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
124- mt76_put_page_pool_buf(buf, allow_direct);
125+ dma_unmap_single(dev->dma_dev, addr, len,
126+ DMA_FROM_DEVICE);
127+ skb_free_frag(buf);
128 break;
129 }
130 frames++;
developer2324aa22023-04-12 11:30:15 +0800131@@ -664,7 +665,7 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
developer6100db22023-04-05 13:22:26 +0800132 /* WED txfree queue needs ring to be initialized before setup */
133 q->flags = 0;
134 mt76_dma_queue_reset(dev, q);
135- mt76_dma_rx_fill(dev, q, false);
136+ mt76_dma_rx_fill(dev, q);
137 q->flags = flags;
138
139 ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
developer2324aa22023-04-12 11:30:15 +0800140@@ -712,10 +713,6 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developer6100db22023-04-05 13:22:26 +0800141 if (!q->entry)
142 return -ENOMEM;
143
144- ret = mt76_create_page_pool(dev, q);
145- if (ret)
146- return ret;
147-
148 ret = mt76_dma_wed_setup(dev, q, false);
149 if (ret)
150 return ret;
developer2324aa22023-04-12 11:30:15 +0800151@@ -729,6 +726,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developer6100db22023-04-05 13:22:26 +0800152 static void
153 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
154 {
155+ struct page *page;
156 void *buf;
157 bool more;
158
developer2324aa22023-04-12 11:30:15 +0800159@@ -742,7 +740,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
developer6100db22023-04-05 13:22:26 +0800160 if (!buf)
161 break;
162
163- mt76_put_page_pool_buf(buf, false);
164+ skb_free_frag(buf);
165 } while (1);
166
167 if (q->rx_head) {
developer2324aa22023-04-12 11:30:15 +0800168@@ -751,6 +749,13 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
developer6100db22023-04-05 13:22:26 +0800169 }
170
171 spin_unlock_bh(&q->lock);
172+
173+ if (!q->rx_page.va)
174+ return;
175+
176+ page = virt_to_page(q->rx_page.va);
177+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
178+ memset(&q->rx_page, 0, sizeof(q->rx_page));
179 }
180
181 static void
developer2324aa22023-04-12 11:30:15 +0800182@@ -771,7 +776,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
developer6100db22023-04-05 13:22:26 +0800183 mt76_dma_wed_setup(dev, q, true);
184 if (q->flags != MT_WED_Q_TXFREE) {
185 mt76_dma_sync_idx(dev, q);
186- mt76_dma_rx_fill(dev, q, false);
187+ mt76_dma_rx_fill(dev, q);
188 }
189 }
190
developer2324aa22023-04-12 11:30:15 +0800191@@ -789,7 +794,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
developer6100db22023-04-05 13:22:26 +0800192
193 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
194 } else {
195- mt76_put_page_pool_buf(data, true);
196+ skb_free_frag(data);
197 }
198
199 if (more)
developer2324aa22023-04-12 11:30:15 +0800200@@ -857,12 +862,11 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
developer6100db22023-04-05 13:22:26 +0800201 !(dev->drv->rx_check(dev, data, len)))
202 goto free_frag;
203
204- skb = napi_build_skb(data, q->buf_size);
205+ skb = build_skb(data, q->buf_size);
206 if (!skb)
207 goto free_frag;
208
209 skb_reserve(skb, q->buf_offset);
210- skb_mark_for_recycle(skb);
211
212 *(u32 *)skb->cb = info;
213
developer2324aa22023-04-12 11:30:15 +0800214@@ -878,10 +882,10 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
developer6100db22023-04-05 13:22:26 +0800215 continue;
216
217 free_frag:
218- mt76_put_page_pool_buf(data, true);
219+ skb_free_frag(data);
220 }
221
222- mt76_dma_rx_fill(dev, q, true);
223+ mt76_dma_rx_fill(dev, q);
224 return done;
225 }
226
developer2324aa22023-04-12 11:30:15 +0800227@@ -926,7 +930,7 @@ mt76_dma_init(struct mt76_dev *dev,
developer6100db22023-04-05 13:22:26 +0800228
229 mt76_for_each_q_rx(dev, i) {
230 netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
231- mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
232+ mt76_dma_rx_fill(dev, &dev->q_rx[i]);
233 napi_enable(&dev->napi[i]);
234 }
235
developer2324aa22023-04-12 11:30:15 +0800236@@ -977,8 +981,6 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
developer6100db22023-04-05 13:22:26 +0800237
238 netif_napi_del(&dev->napi[i]);
239 mt76_dma_rx_cleanup(dev, q);
240-
241- page_pool_destroy(q->page_pool);
242 }
243
244 mt76_free_pending_txwi(dev);
245diff --git a/eeprom.c b/eeprom.c
developer1d9da7d2023-04-15 12:45:34 +0800246index ea54b7a..90d36c8 100644
developer6100db22023-04-05 13:22:26 +0800247--- a/eeprom.c
248+++ b/eeprom.c
249@@ -106,9 +106,15 @@ void
250 mt76_eeprom_override(struct mt76_phy *phy)
251 {
252 struct mt76_dev *dev = phy->dev;
253+#ifdef CONFIG_OF
254 struct device_node *np = dev->dev->of_node;
255+ const u8 *mac = NULL;
256
257- of_get_mac_address(np, phy->macaddr);
258+ if (np)
259+ mac = of_get_mac_address(np);
260+ if (!IS_ERR_OR_NULL(mac))
261+ ether_addr_copy(phy->macaddr, mac);
262+#endif
263
264 if (!is_valid_ether_addr(phy->macaddr)) {
265 eth_random_addr(phy->macaddr);
266diff --git a/mac80211.c b/mac80211.c
developer1d9da7d2023-04-15 12:45:34 +0800267index 87902f4..577d81a 100644
developer6100db22023-04-05 13:22:26 +0800268--- a/mac80211.c
269+++ b/mac80211.c
270@@ -4,7 +4,6 @@
271 */
272 #include <linux/sched.h>
273 #include <linux/of.h>
274-#include <net/page_pool.h>
275 #include "mt76.h"
276
277 #define CHAN2G(_idx, _freq) { \
278@@ -562,47 +561,6 @@ void mt76_unregister_phy(struct mt76_phy *phy)
279 }
280 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
281
282-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
283-{
284- struct page_pool_params pp_params = {
285- .order = 0,
286- .flags = PP_FLAG_PAGE_FRAG,
287- .nid = NUMA_NO_NODE,
288- .dev = dev->dma_dev,
289- };
290- int idx = q - dev->q_rx;
291-
292- switch (idx) {
293- case MT_RXQ_MAIN:
294- case MT_RXQ_BAND1:
295- case MT_RXQ_BAND2:
296- pp_params.pool_size = 256;
297- break;
298- default:
299- pp_params.pool_size = 16;
300- break;
301- }
302-
303- if (mt76_is_mmio(dev)) {
304- /* rely on page_pool for DMA mapping */
305- pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
306- pp_params.dma_dir = DMA_FROM_DEVICE;
307- pp_params.max_len = PAGE_SIZE;
308- pp_params.offset = 0;
309- }
310-
311- q->page_pool = page_pool_create(&pp_params);
312- if (IS_ERR(q->page_pool)) {
313- int err = PTR_ERR(q->page_pool);
314-
315- q->page_pool = NULL;
316- return err;
317- }
318-
319- return 0;
320-}
321-EXPORT_SYMBOL_GPL(mt76_create_page_pool);
322-
323 struct mt76_dev *
324 mt76_alloc_device(struct device *pdev, unsigned int size,
325 const struct ieee80211_ops *ops,
326@@ -1741,21 +1699,6 @@ void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
327 }
328 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
329
330-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
331-{
332-#ifdef CONFIG_PAGE_POOL_STATS
333- struct page_pool_stats stats = {};
334- int i;
335-
336- mt76_for_each_q_rx(dev, i)
337- page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
338-
339- page_pool_ethtool_stats_get(data, &stats);
340- *index += page_pool_ethtool_stats_get_count();
341-#endif
342-}
343-EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
344-
345 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
346 {
347 struct ieee80211_hw *hw = phy->hw;
348diff --git a/mcu.c b/mcu.c
developer1d9da7d2023-04-15 12:45:34 +0800349index a8cafa3..fa4b054 100644
developer6100db22023-04-05 13:22:26 +0800350--- a/mcu.c
351+++ b/mcu.c
352@@ -4,6 +4,7 @@
353 */
354
355 #include "mt76.h"
356+#include <linux/moduleparam.h>
357
358 struct sk_buff *
359 __mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
360diff --git a/mt76.h b/mt76.h
developer1d9da7d2023-04-15 12:45:34 +0800361index 11d4936..ffa9595 100644
developer6100db22023-04-05 13:22:26 +0800362--- a/mt76.h
363+++ b/mt76.h
364@@ -202,7 +202,7 @@ struct mt76_queue {
365
366 dma_addr_t desc_dma;
367 struct sk_buff *rx_head;
368- struct page_pool *page_pool;
369+ struct page_frag_cache rx_page;
370 };
371
372 struct mt76_mcu_ops {
developer5bea7322023-04-13 18:50:55 +0800373@@ -1323,7 +1323,6 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
developer6100db22023-04-05 13:22:26 +0800374 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
375 }
376
377-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index);
378 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
379 struct mt76_sta_stats *stats, bool eht);
380 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
developer5bea7322023-04-13 18:50:55 +0800381@@ -1435,25 +1434,6 @@ void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
developer6100db22023-04-05 13:22:26 +0800382 struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
383 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
384 struct mt76_txwi_cache *r, dma_addr_t phys);
385-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q);
386-static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct)
387-{
388- struct page *page = virt_to_head_page(buf);
389-
390- page_pool_put_full_page(page->pp, page, allow_direct);
391-}
392-
393-static inline void *
394-mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size)
395-{
396- struct page *page;
397-
398- page = page_pool_dev_alloc_frag(q->page_pool, offset, size);
399- if (!page)
400- return NULL;
401-
402- return page_address(page) + *offset;
403-}
404
405 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
406 {
407diff --git a/mt7615/mcu.c b/mt7615/mcu.c
developer1d9da7d2023-04-15 12:45:34 +0800408index eea398c..4593b2e 100644
developer6100db22023-04-05 13:22:26 +0800409--- a/mt7615/mcu.c
410+++ b/mt7615/mcu.c
411@@ -10,6 +10,7 @@
412 #include "mcu.h"
413 #include "mac.h"
414 #include "eeprom.h"
415+#include <linux/moduleparam.h>
416
417 static bool prefer_offload_fw = true;
418 module_param(prefer_offload_fw, bool, 0644);
419diff --git a/mt76_connac.h b/mt76_connac.h
developer1d9da7d2023-04-15 12:45:34 +0800420index b339c50..2ee9a3c 100644
developer6100db22023-04-05 13:22:26 +0800421--- a/mt76_connac.h
422+++ b/mt76_connac.h
423@@ -42,7 +42,6 @@ enum {
424 CMD_CBW_10MHZ,
425 CMD_CBW_5MHZ,
426 CMD_CBW_8080MHZ,
427- CMD_CBW_320MHZ,
428
429 CMD_HE_MCS_BW80 = 0,
430 CMD_HE_MCS_BW160,
431@@ -240,7 +239,6 @@ static inline u8 mt76_connac_chan_bw(struct cfg80211_chan_def *chandef)
432 [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
433 [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
434 [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
435- [NL80211_CHAN_WIDTH_320] = CMD_CBW_320MHZ,
436 };
437
438 if (chandef->width >= ARRAY_SIZE(width_to_bw))
439diff --git a/mt76_connac_mcu.c b/mt76_connac_mcu.c
developer1d9da7d2023-04-15 12:45:34 +0800440index efb9bfa..b0dcc5a 100644
developer6100db22023-04-05 13:22:26 +0800441--- a/mt76_connac_mcu.c
442+++ b/mt76_connac_mcu.c
443@@ -4,6 +4,7 @@
444 #include <linux/firmware.h>
445 #include "mt76_connac2_mac.h"
446 #include "mt76_connac_mcu.h"
447+#include <linux/module.h>
448
449 int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option)
450 {
451@@ -1329,40 +1330,6 @@ u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
452 }
453 EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode);
454
455-u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
456- enum nl80211_band band)
457-{
458- const struct ieee80211_sta_eht_cap *eht_cap;
459- struct ieee80211_supported_band *sband;
460- u8 mode = 0;
461-
462- if (band == NL80211_BAND_6GHZ)
463- mode |= PHY_MODE_AX_6G;
464-
465- sband = phy->hw->wiphy->bands[band];
466- eht_cap = ieee80211_get_eht_iftype_cap(sband, vif->type);
467-
468- if (!eht_cap || !eht_cap->has_eht)
469- return mode;
470-
471- switch (band) {
472- case NL80211_BAND_6GHZ:
473- mode |= PHY_MODE_BE_6G;
474- break;
475- case NL80211_BAND_5GHZ:
476- mode |= PHY_MODE_BE_5G;
477- break;
478- case NL80211_BAND_2GHZ:
479- mode |= PHY_MODE_BE_24G;
480- break;
481- default:
482- break;
483- }
484-
485- return mode;
486-}
487-EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode_ext);
488-
489 const struct ieee80211_sta_he_cap *
490 mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
491 {
492@@ -1375,18 +1342,6 @@ mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
493 }
494 EXPORT_SYMBOL_GPL(mt76_connac_get_he_phy_cap);
495
496-const struct ieee80211_sta_eht_cap *
497-mt76_connac_get_eht_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
498-{
499- enum nl80211_band band = phy->chandef.chan->band;
500- struct ieee80211_supported_band *sband;
501-
502- sband = phy->hw->wiphy->bands[band];
503-
504- return ieee80211_get_eht_iftype_cap(sband, vif->type);
505-}
506-EXPORT_SYMBOL_GPL(mt76_connac_get_eht_phy_cap);
507-
508 #define DEFAULT_HE_PE_DURATION 4
509 #define DEFAULT_HE_DURATION_RTS_THRES 1023
510 static void
511diff --git a/mt76_connac_mcu.h b/mt76_connac_mcu.h
developer1d9da7d2023-04-15 12:45:34 +0800512index 40a99e0..d5fb7a6 100644
developer6100db22023-04-05 13:22:26 +0800513--- a/mt76_connac_mcu.h
514+++ b/mt76_connac_mcu.h
515@@ -1871,12 +1871,8 @@ void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val);
516
517 const struct ieee80211_sta_he_cap *
518 mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
519-const struct ieee80211_sta_eht_cap *
520-mt76_connac_get_eht_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
521 u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
522 enum nl80211_band band, struct ieee80211_sta *sta);
523-u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
524- enum nl80211_band band);
525
526 int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
527 struct mt76_connac_sta_key_conf *sta_key_conf,
528diff --git a/mt7915/main.c b/mt7915/main.c
developer1d9da7d2023-04-15 12:45:34 +0800529index 870b7b2..546a0c9 100644
developer6100db22023-04-05 13:22:26 +0800530--- a/mt7915/main.c
531+++ b/mt7915/main.c
developer5bea7322023-04-13 18:50:55 +0800532@@ -1300,22 +1300,19 @@ void mt7915_get_et_strings(struct ieee80211_hw *hw,
developer6100db22023-04-05 13:22:26 +0800533 struct ieee80211_vif *vif,
534 u32 sset, u8 *data)
535 {
536- if (sset != ETH_SS_STATS)
537- return;
538-
539- memcpy(data, *mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats));
540- data += sizeof(mt7915_gstrings_stats);
541- page_pool_ethtool_stats_get_strings(data);
542+ if (sset == ETH_SS_STATS)
543+ memcpy(data, *mt7915_gstrings_stats,
544+ sizeof(mt7915_gstrings_stats));
545 }
546
547 static
548 int mt7915_get_et_sset_count(struct ieee80211_hw *hw,
549 struct ieee80211_vif *vif, int sset)
550 {
551- if (sset != ETH_SS_STATS)
552- return 0;
553+ if (sset == ETH_SS_STATS)
554+ return MT7915_SSTATS_LEN;
555
556- return MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
557+ return 0;
558 }
559
560 static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
developer5bea7322023-04-13 18:50:55 +0800561@@ -1343,7 +1340,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
developer6100db22023-04-05 13:22:26 +0800562 };
563 struct mib_stats *mib = &phy->mib;
564 /* See mt7915_ampdu_stat_read_phy, etc */
565- int i, ei = 0, stats_size;
566+ int i, ei = 0;
567
568 mutex_lock(&dev->mt76.mutex);
569
developer5bea7322023-04-13 18:50:55 +0800570@@ -1424,12 +1421,9 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
developer6100db22023-04-05 13:22:26 +0800571 return;
572
573 ei += wi.worker_stat_count;
574-
575- mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
576-
577- stats_size = MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
578- if (ei != stats_size)
579- dev_err(dev->mt76.dev, "ei: %d size: %d", ei, stats_size);
580+ if (ei != MT7915_SSTATS_LEN)
581+ dev_err(dev->mt76.dev, "ei: %d MT7915_SSTATS_LEN: %d",
582+ ei, (int)MT7915_SSTATS_LEN);
583 }
584
585 static void
586diff --git a/mt7915/mcu.c b/mt7915/mcu.c
developer1d9da7d2023-04-15 12:45:34 +0800587index a9f8a88..173dc35 100644
developer6100db22023-04-05 13:22:26 +0800588--- a/mt7915/mcu.c
589+++ b/mt7915/mcu.c
590@@ -6,6 +6,7 @@
591 #include "mcu.h"
592 #include "mac.h"
593 #include "eeprom.h"
594+#include <linux/moduleparam.h>
595
596 #define fw_name(_dev, name, ...) ({ \
597 char *_fw; \
598diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developer1d9da7d2023-04-15 12:45:34 +0800599index 6f0c0e2..5ef43c4 100644
developer6100db22023-04-05 13:22:26 +0800600--- a/mt7915/mmio.c
601+++ b/mt7915/mmio.c
602@@ -596,9 +596,13 @@ static void mt7915_mmio_wed_offload_disable(struct mtk_wed_device *wed)
603 static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
604 {
605 struct mt7915_dev *dev;
606+ u32 length;
607 int i;
608
609 dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
610+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
611+ sizeof(struct skb_shared_info));
612+
613 for (i = 0; i < dev->mt76.rx_token_size; i++) {
614 struct mt76_txwi_cache *t;
615
616@@ -606,7 +610,9 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
617 if (!t || !t->ptr)
618 continue;
619
620- mt76_put_page_pool_buf(t->ptr, false);
621+ dma_unmap_single(dev->mt76.dma_dev, t->dma_addr,
622+ wed->wlan.rx_size, DMA_FROM_DEVICE);
623+ __free_pages(virt_to_page(t->ptr), get_order(length));
624 t->ptr = NULL;
625
626 mt76_put_rxwi(&dev->mt76, t);
627@@ -618,38 +624,47 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
628 static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
629 {
630 struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
631- struct mt76_txwi_cache *t = NULL;
632 struct mt7915_dev *dev;
633- struct mt76_queue *q;
634- int i, len;
635+ u32 length;
636+ int i;
637
638 dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
639- q = &dev->mt76.q_rx[MT_RXQ_MAIN];
640- len = SKB_WITH_OVERHEAD(q->buf_size);
641+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
642+ sizeof(struct skb_shared_info));
643
644 for (i = 0; i < size; i++) {
645- enum dma_data_direction dir;
646- dma_addr_t addr;
647- u32 offset;
648+ struct mt76_txwi_cache *t = mt76_get_rxwi(&dev->mt76);
649+ dma_addr_t phy_addr;
650+ struct page *page;
651 int token;
652- void *buf;
653+ void *ptr;
654
655- t = mt76_get_rxwi(&dev->mt76);
656 if (!t)
657 goto unmap;
658
659- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
660- if (!buf)
661+ page = __dev_alloc_pages(GFP_KERNEL, get_order(length));
662+ if (!page) {
663+ mt76_put_rxwi(&dev->mt76, t);
664 goto unmap;
665+ }
666
667- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
668- dir = page_pool_get_dma_dir(q->page_pool);
669- dma_sync_single_for_device(dev->mt76.dma_dev, addr, len, dir);
670+ ptr = page_address(page);
671+ phy_addr = dma_map_single(dev->mt76.dma_dev, ptr,
672+ wed->wlan.rx_size,
673+ DMA_TO_DEVICE);
674+ if (unlikely(dma_mapping_error(dev->mt76.dev, phy_addr))) {
675+ __free_pages(page, get_order(length));
676+ mt76_put_rxwi(&dev->mt76, t);
677+ goto unmap;
678+ }
679
680- desc->buf0 = cpu_to_le32(addr);
681- token = mt76_rx_token_consume(&dev->mt76, buf, t, addr);
682+ desc->buf0 = cpu_to_le32(phy_addr);
683+ token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr);
684 if (token < 0) {
685- mt76_put_page_pool_buf(buf, false);
686+ dma_unmap_single(dev->mt76.dma_dev, phy_addr,
687+ wed->wlan.rx_size, DMA_TO_DEVICE);
688+ __free_pages(page, get_order(length));
689+ mt76_put_rxwi(&dev->mt76, t);
690 goto unmap;
691 }
692
693@@ -661,8 +676,6 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
694 return 0;
695
696 unmap:
697- if (t)
698- mt76_put_rxwi(&dev->mt76, t);
699 mt7915_mmio_wed_release_rx_buf(wed);
700 return -ENOMEM;
701 }
702diff --git a/mt7921/main.c b/mt7921/main.c
developer1d9da7d2023-04-15 12:45:34 +0800703index a72964e..4c40022 100644
developer6100db22023-04-05 13:22:26 +0800704--- a/mt7921/main.c
705+++ b/mt7921/main.c
706@@ -1090,34 +1090,17 @@ static void
707 mt7921_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
708 u32 sset, u8 *data)
709 {
710- struct mt7921_dev *dev = mt7921_hw_dev(hw);
711-
712 if (sset != ETH_SS_STATS)
713 return;
714
715 memcpy(data, *mt7921_gstrings_stats, sizeof(mt7921_gstrings_stats));
716-
717- if (mt76_is_sdio(&dev->mt76))
718- return;
719-
720- data += sizeof(mt7921_gstrings_stats);
721- page_pool_ethtool_stats_get_strings(data);
722 }
723
724 static int
725 mt7921_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
726 int sset)
727 {
728- struct mt7921_dev *dev = mt7921_hw_dev(hw);
729-
730- if (sset != ETH_SS_STATS)
731- return 0;
732-
733- if (mt76_is_sdio(&dev->mt76))
734- return ARRAY_SIZE(mt7921_gstrings_stats);
735-
736- return ARRAY_SIZE(mt7921_gstrings_stats) +
737- page_pool_ethtool_stats_get_count();
738+ return sset == ETH_SS_STATS ? ARRAY_SIZE(mt7921_gstrings_stats) : 0;
739 }
740
741 static void
742@@ -1137,7 +1120,6 @@ void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
743 struct ethtool_stats *stats, u64 *data)
744 {
745 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
746- int stats_size = ARRAY_SIZE(mt7921_gstrings_stats);
747 struct mt7921_phy *phy = mt7921_hw_phy(hw);
748 struct mt7921_dev *dev = phy->dev;
749 struct mib_stats *mib = &phy->mib;
750@@ -1193,14 +1175,9 @@ void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
751 return;
752
753 ei += wi.worker_stat_count;
754-
755- if (!mt76_is_sdio(&dev->mt76)) {
756- mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
757- stats_size += page_pool_ethtool_stats_get_count();
758- }
759-
760- if (ei != stats_size)
761- dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %d", ei, stats_size);
762+ if (ei != ARRAY_SIZE(mt7921_gstrings_stats))
763+ dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %zu",
764+ ei, ARRAY_SIZE(mt7921_gstrings_stats));
765 }
766
767 static u64
768diff --git a/usb.c b/usb.c
developer1d9da7d2023-04-15 12:45:34 +0800769index 5e5c7bf..3e28171 100644
developer6100db22023-04-05 13:22:26 +0800770--- a/usb.c
771+++ b/usb.c
772@@ -319,27 +319,29 @@ mt76u_set_endpoints(struct usb_interface *intf,
773
774 static int
775 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
776- int nsgs)
777+ int nsgs, gfp_t gfp)
778 {
779 int i;
780
781 for (i = 0; i < nsgs; i++) {
782+ struct page *page;
783 void *data;
784 int offset;
785
786- data = mt76_get_page_pool_buf(q, &offset, q->buf_size);
787+ data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
788 if (!data)
789 break;
790
791- sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size,
792- offset);
793+ page = virt_to_head_page(data);
794+ offset = data - page_address(page);
795+ sg_set_page(&urb->sg[i], page, q->buf_size, offset);
796 }
797
798 if (i < nsgs) {
799 int j;
800
801 for (j = nsgs; j < urb->num_sgs; j++)
802- mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false);
803+ skb_free_frag(sg_virt(&urb->sg[j]));
804 urb->num_sgs = i;
805 }
806
807@@ -352,16 +354,15 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
808
809 static int
810 mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
811- struct urb *urb, int nsgs)
812+ struct urb *urb, int nsgs, gfp_t gfp)
813 {
814 enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
815- int offset;
816
817 if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
818- return mt76u_fill_rx_sg(dev, q, urb, nsgs);
819+ return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
820
821 urb->transfer_buffer_length = q->buf_size;
822- urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size);
823+ urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
824
825 return urb->transfer_buffer ? 0 : -ENOMEM;
826 }
827@@ -399,7 +400,7 @@ mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
828 if (err)
829 return err;
830
831- return mt76u_refill_rx(dev, q, e->urb, sg_size);
832+ return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
833 }
834
835 static void mt76u_urb_free(struct urb *urb)
836@@ -407,10 +408,10 @@ static void mt76u_urb_free(struct urb *urb)
837 int i;
838
839 for (i = 0; i < urb->num_sgs; i++)
840- mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false);
841+ skb_free_frag(sg_virt(&urb->sg[i]));
842
843 if (urb->transfer_buffer)
844- mt76_put_page_pool_buf(urb->transfer_buffer, false);
845+ skb_free_frag(urb->transfer_buffer);
846
847 usb_free_urb(urb);
848 }
849@@ -546,8 +547,6 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
850 len -= data_len;
851 nsgs++;
852 }
853-
854- skb_mark_for_recycle(skb);
855 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL);
856
857 return nsgs;
858@@ -613,7 +612,7 @@ mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
859
860 count = mt76u_process_rx_entry(dev, urb, q->buf_size);
861 if (count > 0) {
862- err = mt76u_refill_rx(dev, q, urb, count);
863+ err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
864 if (err < 0)
865 break;
866 }
867@@ -664,10 +663,6 @@ mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
868 struct mt76_queue *q = &dev->q_rx[qid];
869 int i, err;
870
871- err = mt76_create_page_pool(dev, q);
872- if (err)
873- return err;
874-
875 spin_lock_init(&q->lock);
876 q->entry = devm_kcalloc(dev->dev,
877 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
878@@ -696,6 +691,7 @@ EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
879 static void
880 mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
881 {
882+ struct page *page;
883 int i;
884
885 for (i = 0; i < q->ndesc; i++) {
886@@ -705,8 +701,13 @@ mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
887 mt76u_urb_free(q->entry[i].urb);
888 q->entry[i].urb = NULL;
889 }
890- page_pool_destroy(q->page_pool);
891- q->page_pool = NULL;
892+
893+ if (!q->rx_page.va)
894+ return;
895+
896+ page = virt_to_page(q->rx_page.va);
897+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
898+ memset(&q->rx_page, 0, sizeof(q->rx_page));
899 }
900
901 static void mt76u_free_rx(struct mt76_dev *dev)
902--
developer2324aa22023-04-12 11:30:15 +08009032.18.0
developer6100db22023-04-05 13:22:26 +0800904