blob: 89cb09b4fdc2afa5c96867be0de5a5e8d33e839f [file] [log] [blame]
developer7af0f762023-05-22 15:16:16 +08001From 74a8d89cbec86aa3cddf05a8ecadc65458fb4f14 Mon Sep 17 00:00:00 2001
developerda18a742023-04-06 13:44:00 +08002From: Evelyn Tsai <evelyn.tsai@mediatek.com>
3Date: Sat, 1 Apr 2023 08:18:17 +0800
developer7af0f762023-05-22 15:16:16 +08004Subject: [PATCH 0999/1033] wifi: mt76: mt7915: build pass for Linux Kernel 5.4
5 fixes
developer60a3d662023-02-07 15:24:34 +08006
7---
developerda18a742023-04-06 13:44:00 +08008 debugfs.c | 2 ++
9 dma.c | 74 ++++++++++++++++++++++++-----------------------
10 eeprom.c | 8 ++++-
11 mac80211.c | 57 ------------------------------------
12 mcu.c | 1 +
13 mt76.h | 22 +-------------
14 mt7615/mcu.c | 1 +
15 mt76_connac.h | 2 --
16 mt76_connac_mcu.c | 47 +-----------------------------
17 mt76_connac_mcu.h | 4 ---
18 mt7915/main.c | 26 +++++++----------
19 mt7915/mcu.c | 1 +
20 mt7915/mmio.c | 55 +++++++++++++++++++++--------------
21 mt7921/main.c | 31 +++-----------------
22 usb.c | 43 +++++++++++++--------------
23 15 files changed, 122 insertions(+), 252 deletions(-)
developer60a3d662023-02-07 15:24:34 +080024
developerda18a742023-04-06 13:44:00 +080025diff --git a/debugfs.c b/debugfs.c
developer8effbd32023-04-17 15:57:28 +080026index 79064a4..4a8e186 100644
developerda18a742023-04-06 13:44:00 +080027--- a/debugfs.c
28+++ b/debugfs.c
29@@ -33,8 +33,10 @@ mt76_napi_threaded_set(void *data, u64 val)
30 if (!mt76_is_mmio(dev))
31 return -EOPNOTSUPP;
32
33+#if 0 /* disable in backport 5.15 */
34 if (dev->napi_dev.threaded != val)
35 return dev_set_threaded(&dev->napi_dev, val);
36+#endif
37
38 return 0;
39 }
developer60a3d662023-02-07 15:24:34 +080040diff --git a/dma.c b/dma.c
developer7af0f762023-05-22 15:16:16 +080041index 05d9ab3..c9d2671 100644
developer60a3d662023-02-07 15:24:34 +080042--- a/dma.c
43+++ b/dma.c
44@@ -173,7 +173,7 @@ mt76_free_pending_rxwi(struct mt76_dev *dev)
45 local_bh_disable();
46 while ((t = __mt76_get_rxwi(dev)) != NULL) {
47 if (t->ptr)
48- mt76_put_page_pool_buf(t->ptr, false);
49+ skb_free_frag(t->ptr);
50 kfree(t);
51 }
52 local_bh_enable();
53@@ -409,9 +409,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
54 if (!t)
55 return NULL;
56
57- dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
58- SKB_WITH_OVERHEAD(q->buf_size),
59- page_pool_get_dma_dir(q->page_pool));
60+ dma_unmap_single(dev->dma_dev, t->dma_addr,
61+ SKB_WITH_OVERHEAD(q->buf_size),
62+ DMA_FROM_DEVICE);
63
64 buf = t->ptr;
65 t->dma_addr = 0;
developerbb6ddff2023-03-08 17:22:32 +080066@@ -430,9 +430,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
developer60a3d662023-02-07 15:24:34 +080067 } else {
68 buf = e->buf;
69 e->buf = NULL;
70- dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
71- SKB_WITH_OVERHEAD(q->buf_size),
72- page_pool_get_dma_dir(q->page_pool));
73+ dma_unmap_single(dev->dma_dev, e->dma_addr[0],
74+ SKB_WITH_OVERHEAD(q->buf_size),
75+ DMA_FROM_DEVICE);
76 }
77
78 return buf;
developer7af0f762023-05-22 15:16:16 +080079@@ -592,11 +592,11 @@ free_skb:
developer60a3d662023-02-07 15:24:34 +080080 }
81
82 static int
83-mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
84- bool allow_direct)
85+mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
86 {
87 int len = SKB_WITH_OVERHEAD(q->buf_size);
88- int frames = 0;
89+ int frames = 0, offset = q->buf_offset;
90+ dma_addr_t addr;
91
92 if (!q->ndesc)
93 return 0;
developer7af0f762023-05-22 15:16:16 +080094@@ -604,25 +604,26 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
developer60a3d662023-02-07 15:24:34 +080095 spin_lock_bh(&q->lock);
96
97 while (q->queued < q->ndesc - 1) {
98- enum dma_data_direction dir;
99 struct mt76_queue_buf qbuf;
100- dma_addr_t addr;
101- int offset;
102- void *buf;
103+ void *buf = NULL;
104
105- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
106+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
107 if (!buf)
108 break;
109
110- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
111- dir = page_pool_get_dma_dir(q->page_pool);
112- dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
113+ addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
114+ if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
115+ skb_free_frag(buf);
116+ break;
117+ }
118
119- qbuf.addr = addr + q->buf_offset;
120- qbuf.len = len - q->buf_offset;
121+ qbuf.addr = addr + offset;
122+ qbuf.len = len - offset;
123 qbuf.skip_unmap = false;
124 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
125- mt76_put_page_pool_buf(buf, allow_direct);
126+ dma_unmap_single(dev->dma_dev, addr, len,
127+ DMA_FROM_DEVICE);
128+ skb_free_frag(buf);
129 break;
130 }
131 frames++;
developer7af0f762023-05-22 15:16:16 +0800132@@ -666,7 +667,7 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
developer60a3d662023-02-07 15:24:34 +0800133 /* WED txfree queue needs ring to be initialized before setup */
134 q->flags = 0;
135 mt76_dma_queue_reset(dev, q);
136- mt76_dma_rx_fill(dev, q, false);
137+ mt76_dma_rx_fill(dev, q);
138 q->flags = flags;
139
140 ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
developer7af0f762023-05-22 15:16:16 +0800141@@ -714,10 +715,6 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developer60a3d662023-02-07 15:24:34 +0800142 if (!q->entry)
143 return -ENOMEM;
144
145- ret = mt76_create_page_pool(dev, q);
146- if (ret)
147- return ret;
148-
149 ret = mt76_dma_wed_setup(dev, q, false);
150 if (ret)
151 return ret;
developer7af0f762023-05-22 15:16:16 +0800152@@ -731,6 +728,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developer60a3d662023-02-07 15:24:34 +0800153 static void
154 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
155 {
156+ struct page *page;
157 void *buf;
158 bool more;
159
developer7af0f762023-05-22 15:16:16 +0800160@@ -744,7 +742,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
developer60a3d662023-02-07 15:24:34 +0800161 if (!buf)
162 break;
163
164- mt76_put_page_pool_buf(buf, false);
165+ skb_free_frag(buf);
166 } while (1);
167
168 if (q->rx_head) {
developer7af0f762023-05-22 15:16:16 +0800169@@ -753,6 +751,13 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
developer60a3d662023-02-07 15:24:34 +0800170 }
171
172 spin_unlock_bh(&q->lock);
173+
174+ if (!q->rx_page.va)
175+ return;
176+
177+ page = virt_to_page(q->rx_page.va);
178+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
179+ memset(&q->rx_page, 0, sizeof(q->rx_page));
180 }
181
182 static void
developer7af0f762023-05-22 15:16:16 +0800183@@ -773,7 +778,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
developer60a3d662023-02-07 15:24:34 +0800184 mt76_dma_wed_setup(dev, q, true);
185 if (q->flags != MT_WED_Q_TXFREE) {
186 mt76_dma_sync_idx(dev, q);
187- mt76_dma_rx_fill(dev, q, false);
188+ mt76_dma_rx_fill(dev, q);
189 }
190 }
191
developer7af0f762023-05-22 15:16:16 +0800192@@ -791,7 +796,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
developer60a3d662023-02-07 15:24:34 +0800193
194 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
195 } else {
196- mt76_put_page_pool_buf(data, true);
197+ skb_free_frag(data);
198 }
199
200 if (more)
developer7af0f762023-05-22 15:16:16 +0800201@@ -859,12 +864,11 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
developerda18a742023-04-06 13:44:00 +0800202 !(dev->drv->rx_check(dev, data, len)))
203 goto free_frag;
204
205- skb = napi_build_skb(data, q->buf_size);
206+ skb = build_skb(data, q->buf_size);
207 if (!skb)
developer60a3d662023-02-07 15:24:34 +0800208 goto free_frag;
209
210 skb_reserve(skb, q->buf_offset);
211- skb_mark_for_recycle(skb);
212
213 *(u32 *)skb->cb = info;
214
developer7af0f762023-05-22 15:16:16 +0800215@@ -880,10 +884,10 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
developer60a3d662023-02-07 15:24:34 +0800216 continue;
217
218 free_frag:
219- mt76_put_page_pool_buf(data, true);
220+ skb_free_frag(data);
221 }
222
223- mt76_dma_rx_fill(dev, q, true);
224+ mt76_dma_rx_fill(dev, q);
225 return done;
226 }
227
developer7af0f762023-05-22 15:16:16 +0800228@@ -928,7 +932,7 @@ mt76_dma_init(struct mt76_dev *dev,
developer60a3d662023-02-07 15:24:34 +0800229
230 mt76_for_each_q_rx(dev, i) {
231 netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
232- mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
233+ mt76_dma_rx_fill(dev, &dev->q_rx[i]);
234 napi_enable(&dev->napi[i]);
235 }
236
developer7af0f762023-05-22 15:16:16 +0800237@@ -979,8 +983,6 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
developer60a3d662023-02-07 15:24:34 +0800238
239 netif_napi_del(&dev->napi[i]);
240 mt76_dma_rx_cleanup(dev, q);
241-
242- page_pool_destroy(q->page_pool);
243 }
244
245 mt76_free_pending_txwi(dev);
developerda18a742023-04-06 13:44:00 +0800246diff --git a/eeprom.c b/eeprom.c
developer8effbd32023-04-17 15:57:28 +0800247index ea54b7a..90d36c8 100644
developerda18a742023-04-06 13:44:00 +0800248--- a/eeprom.c
249+++ b/eeprom.c
250@@ -106,9 +106,15 @@ void
251 mt76_eeprom_override(struct mt76_phy *phy)
252 {
253 struct mt76_dev *dev = phy->dev;
254+#ifdef CONFIG_OF
255 struct device_node *np = dev->dev->of_node;
256+ const u8 *mac = NULL;
257
258- of_get_mac_address(np, phy->macaddr);
259+ if (np)
260+ mac = of_get_mac_address(np);
261+ if (!IS_ERR_OR_NULL(mac))
262+ ether_addr_copy(phy->macaddr, mac);
263+#endif
264
265 if (!is_valid_ether_addr(phy->macaddr)) {
266 eth_random_addr(phy->macaddr);
developer60a3d662023-02-07 15:24:34 +0800267diff --git a/mac80211.c b/mac80211.c
developer7af0f762023-05-22 15:16:16 +0800268index 2c4a529..991d91b 100644
developer60a3d662023-02-07 15:24:34 +0800269--- a/mac80211.c
270+++ b/mac80211.c
271@@ -4,7 +4,6 @@
272 */
273 #include <linux/sched.h>
274 #include <linux/of.h>
275-#include <net/page_pool.h>
276 #include "mt76.h"
277
278 #define CHAN2G(_idx, _freq) { \
developerbb6ddff2023-03-08 17:22:32 +0800279@@ -562,47 +561,6 @@ void mt76_unregister_phy(struct mt76_phy *phy)
developer60a3d662023-02-07 15:24:34 +0800280 }
281 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
282
283-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
284-{
285- struct page_pool_params pp_params = {
286- .order = 0,
287- .flags = PP_FLAG_PAGE_FRAG,
288- .nid = NUMA_NO_NODE,
289- .dev = dev->dma_dev,
290- };
291- int idx = q - dev->q_rx;
292-
293- switch (idx) {
294- case MT_RXQ_MAIN:
295- case MT_RXQ_BAND1:
296- case MT_RXQ_BAND2:
297- pp_params.pool_size = 256;
298- break;
299- default:
300- pp_params.pool_size = 16;
301- break;
302- }
303-
304- if (mt76_is_mmio(dev)) {
305- /* rely on page_pool for DMA mapping */
306- pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
307- pp_params.dma_dir = DMA_FROM_DEVICE;
308- pp_params.max_len = PAGE_SIZE;
309- pp_params.offset = 0;
310- }
311-
312- q->page_pool = page_pool_create(&pp_params);
313- if (IS_ERR(q->page_pool)) {
314- int err = PTR_ERR(q->page_pool);
315-
316- q->page_pool = NULL;
317- return err;
318- }
319-
320- return 0;
321-}
322-EXPORT_SYMBOL_GPL(mt76_create_page_pool);
323-
324 struct mt76_dev *
325 mt76_alloc_device(struct device *pdev, unsigned int size,
326 const struct ieee80211_ops *ops,
developer7af0f762023-05-22 15:16:16 +0800327@@ -1742,21 +1700,6 @@ void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
developer60a3d662023-02-07 15:24:34 +0800328 }
329 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
330
331-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
332-{
333-#ifdef CONFIG_PAGE_POOL_STATS
334- struct page_pool_stats stats = {};
335- int i;
336-
337- mt76_for_each_q_rx(dev, i)
338- page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
339-
340- page_pool_ethtool_stats_get(data, &stats);
341- *index += page_pool_ethtool_stats_get_count();
342-#endif
343-}
344-EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
345-
346 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
347 {
348 struct ieee80211_hw *hw = phy->hw;
developerda18a742023-04-06 13:44:00 +0800349diff --git a/mcu.c b/mcu.c
developer8effbd32023-04-17 15:57:28 +0800350index a8cafa3..fa4b054 100644
developerda18a742023-04-06 13:44:00 +0800351--- a/mcu.c
352+++ b/mcu.c
353@@ -4,6 +4,7 @@
354 */
355
356 #include "mt76.h"
357+#include <linux/moduleparam.h>
358
359 struct sk_buff *
360 __mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
developer60a3d662023-02-07 15:24:34 +0800361diff --git a/mt76.h b/mt76.h
developer7af0f762023-05-22 15:16:16 +0800362index 3f13cec..a8f26a8 100644
developer60a3d662023-02-07 15:24:34 +0800363--- a/mt76.h
364+++ b/mt76.h
365@@ -202,7 +202,7 @@ struct mt76_queue {
366
367 dma_addr_t desc_dma;
368 struct sk_buff *rx_head;
369- struct page_pool *page_pool;
370+ struct page_frag_cache rx_page;
371 };
372
373 struct mt76_mcu_ops {
developer7af0f762023-05-22 15:16:16 +0800374@@ -1324,7 +1324,6 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
developer60a3d662023-02-07 15:24:34 +0800375 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
376 }
377
378-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index);
379 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
380 struct mt76_sta_stats *stats, bool eht);
381 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
developer7af0f762023-05-22 15:16:16 +0800382@@ -1436,25 +1435,6 @@ void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
developer60a3d662023-02-07 15:24:34 +0800383 struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
384 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
385 struct mt76_txwi_cache *r, dma_addr_t phys);
386-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q);
387-static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct)
388-{
389- struct page *page = virt_to_head_page(buf);
390-
391- page_pool_put_full_page(page->pp, page, allow_direct);
392-}
393-
394-static inline void *
395-mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size)
396-{
397- struct page *page;
398-
399- page = page_pool_dev_alloc_frag(q->page_pool, offset, size);
400- if (!page)
401- return NULL;
402-
403- return page_address(page) + *offset;
404-}
405
406 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
407 {
developerda18a742023-04-06 13:44:00 +0800408diff --git a/mt7615/mcu.c b/mt7615/mcu.c
developer7af0f762023-05-22 15:16:16 +0800409index 8d745c9..86061e9 100644
developerda18a742023-04-06 13:44:00 +0800410--- a/mt7615/mcu.c
411+++ b/mt7615/mcu.c
412@@ -10,6 +10,7 @@
413 #include "mcu.h"
414 #include "mac.h"
415 #include "eeprom.h"
416+#include <linux/moduleparam.h>
417
418 static bool prefer_offload_fw = true;
419 module_param(prefer_offload_fw, bool, 0644);
420diff --git a/mt76_connac.h b/mt76_connac.h
developer7af0f762023-05-22 15:16:16 +0800421index 77ca8f0..ca26984 100644
developerda18a742023-04-06 13:44:00 +0800422--- a/mt76_connac.h
423+++ b/mt76_connac.h
developer7af0f762023-05-22 15:16:16 +0800424@@ -56,7 +56,6 @@ enum {
developerda18a742023-04-06 13:44:00 +0800425 CMD_CBW_10MHZ,
426 CMD_CBW_5MHZ,
427 CMD_CBW_8080MHZ,
428- CMD_CBW_320MHZ,
429
430 CMD_HE_MCS_BW80 = 0,
431 CMD_HE_MCS_BW160,
developer7af0f762023-05-22 15:16:16 +0800432@@ -264,7 +263,6 @@ static inline u8 mt76_connac_chan_bw(struct cfg80211_chan_def *chandef)
developerda18a742023-04-06 13:44:00 +0800433 [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
434 [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
435 [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
436- [NL80211_CHAN_WIDTH_320] = CMD_CBW_320MHZ,
437 };
438
439 if (chandef->width >= ARRAY_SIZE(width_to_bw))
440diff --git a/mt76_connac_mcu.c b/mt76_connac_mcu.c
developer7af0f762023-05-22 15:16:16 +0800441index 46f69aa..732a4e6 100644
developerda18a742023-04-06 13:44:00 +0800442--- a/mt76_connac_mcu.c
443+++ b/mt76_connac_mcu.c
444@@ -4,6 +4,7 @@
445 #include <linux/firmware.h>
446 #include "mt76_connac2_mac.h"
447 #include "mt76_connac_mcu.h"
448+#include <linux/module.h>
449
450 int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option)
451 {
452@@ -1329,40 +1330,6 @@ u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
453 }
454 EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode);
455
456-u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
457- enum nl80211_band band)
458-{
459- const struct ieee80211_sta_eht_cap *eht_cap;
460- struct ieee80211_supported_band *sband;
461- u8 mode = 0;
462-
463- if (band == NL80211_BAND_6GHZ)
464- mode |= PHY_MODE_AX_6G;
465-
466- sband = phy->hw->wiphy->bands[band];
467- eht_cap = ieee80211_get_eht_iftype_cap(sband, vif->type);
468-
469- if (!eht_cap || !eht_cap->has_eht)
470- return mode;
471-
472- switch (band) {
473- case NL80211_BAND_6GHZ:
474- mode |= PHY_MODE_BE_6G;
475- break;
476- case NL80211_BAND_5GHZ:
477- mode |= PHY_MODE_BE_5G;
478- break;
479- case NL80211_BAND_2GHZ:
480- mode |= PHY_MODE_BE_24G;
481- break;
482- default:
483- break;
484- }
485-
486- return mode;
487-}
488-EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode_ext);
489-
490 const struct ieee80211_sta_he_cap *
491 mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
492 {
493@@ -1375,18 +1342,6 @@ mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
494 }
495 EXPORT_SYMBOL_GPL(mt76_connac_get_he_phy_cap);
496
497-const struct ieee80211_sta_eht_cap *
498-mt76_connac_get_eht_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
499-{
500- enum nl80211_band band = phy->chandef.chan->band;
501- struct ieee80211_supported_band *sband;
502-
503- sband = phy->hw->wiphy->bands[band];
504-
505- return ieee80211_get_eht_iftype_cap(sband, vif->type);
506-}
507-EXPORT_SYMBOL_GPL(mt76_connac_get_eht_phy_cap);
508-
509 #define DEFAULT_HE_PE_DURATION 4
510 #define DEFAULT_HE_DURATION_RTS_THRES 1023
511 static void
512diff --git a/mt76_connac_mcu.h b/mt76_connac_mcu.h
developer7af0f762023-05-22 15:16:16 +0800513index 91d98ef..ebb7f58 100644
developerda18a742023-04-06 13:44:00 +0800514--- a/mt76_connac_mcu.h
515+++ b/mt76_connac_mcu.h
developer7af0f762023-05-22 15:16:16 +0800516@@ -1883,12 +1883,8 @@ void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val);
developerda18a742023-04-06 13:44:00 +0800517
518 const struct ieee80211_sta_he_cap *
519 mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
520-const struct ieee80211_sta_eht_cap *
521-mt76_connac_get_eht_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
522 u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
523 enum nl80211_band band, struct ieee80211_sta *sta);
524-u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
525- enum nl80211_band band);
526
527 int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
528 struct mt76_connac_sta_key_conf *sta_key_conf,
developer60a3d662023-02-07 15:24:34 +0800529diff --git a/mt7915/main.c b/mt7915/main.c
developer7af0f762023-05-22 15:16:16 +0800530index 1a741b8..f78f2bf 100644
developer60a3d662023-02-07 15:24:34 +0800531--- a/mt7915/main.c
532+++ b/mt7915/main.c
developer7af0f762023-05-22 15:16:16 +0800533@@ -1298,22 +1298,19 @@ void mt7915_get_et_strings(struct ieee80211_hw *hw,
developer60a3d662023-02-07 15:24:34 +0800534 struct ieee80211_vif *vif,
535 u32 sset, u8 *data)
536 {
537- if (sset != ETH_SS_STATS)
538- return;
539-
540- memcpy(data, *mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats));
541- data += sizeof(mt7915_gstrings_stats);
542- page_pool_ethtool_stats_get_strings(data);
543+ if (sset == ETH_SS_STATS)
544+ memcpy(data, *mt7915_gstrings_stats,
545+ sizeof(mt7915_gstrings_stats));
546 }
547
548 static
549 int mt7915_get_et_sset_count(struct ieee80211_hw *hw,
550 struct ieee80211_vif *vif, int sset)
551 {
552- if (sset != ETH_SS_STATS)
553- return 0;
554+ if (sset == ETH_SS_STATS)
555+ return MT7915_SSTATS_LEN;
556
557- return MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
558+ return 0;
559 }
560
561 static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
developer7af0f762023-05-22 15:16:16 +0800562@@ -1341,7 +1338,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
developer60a3d662023-02-07 15:24:34 +0800563 };
564 struct mib_stats *mib = &phy->mib;
565 /* See mt7915_ampdu_stat_read_phy, etc */
566- int i, ei = 0, stats_size;
567+ int i, ei = 0;
568
569 mutex_lock(&dev->mt76.mutex);
570
developer7af0f762023-05-22 15:16:16 +0800571@@ -1422,12 +1419,9 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
developer60a3d662023-02-07 15:24:34 +0800572 return;
573
574 ei += wi.worker_stat_count;
575-
576- mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
577-
578- stats_size = MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
579- if (ei != stats_size)
580- dev_err(dev->mt76.dev, "ei: %d size: %d", ei, stats_size);
581+ if (ei != MT7915_SSTATS_LEN)
582+ dev_err(dev->mt76.dev, "ei: %d MT7915_SSTATS_LEN: %d",
583+ ei, (int)MT7915_SSTATS_LEN);
584 }
585
586 static void
developerda18a742023-04-06 13:44:00 +0800587diff --git a/mt7915/mcu.c b/mt7915/mcu.c
developer7af0f762023-05-22 15:16:16 +0800588index 9dd4e34..dbdc48a 100644
developerda18a742023-04-06 13:44:00 +0800589--- a/mt7915/mcu.c
590+++ b/mt7915/mcu.c
591@@ -6,6 +6,7 @@
592 #include "mcu.h"
593 #include "mac.h"
594 #include "eeprom.h"
595+#include <linux/moduleparam.h>
596
597 #define fw_name(_dev, name, ...) ({ \
598 char *_fw; \
developer60a3d662023-02-07 15:24:34 +0800599diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developer7af0f762023-05-22 15:16:16 +0800600index 984b5f6..1bb8a4c 100644
developer60a3d662023-02-07 15:24:34 +0800601--- a/mt7915/mmio.c
602+++ b/mt7915/mmio.c
603@@ -596,9 +596,13 @@ static void mt7915_mmio_wed_offload_disable(struct mtk_wed_device *wed)
604 static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
605 {
606 struct mt7915_dev *dev;
607+ u32 length;
608 int i;
609
610 dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
611+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
612+ sizeof(struct skb_shared_info));
613+
614 for (i = 0; i < dev->mt76.rx_token_size; i++) {
615 struct mt76_txwi_cache *t;
616
617@@ -606,7 +610,9 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
618 if (!t || !t->ptr)
619 continue;
620
621- mt76_put_page_pool_buf(t->ptr, false);
622+ dma_unmap_single(dev->mt76.dma_dev, t->dma_addr,
623+ wed->wlan.rx_size, DMA_FROM_DEVICE);
624+ __free_pages(virt_to_page(t->ptr), get_order(length));
625 t->ptr = NULL;
626
627 mt76_put_rxwi(&dev->mt76, t);
628@@ -618,38 +624,47 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
629 static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
630 {
631 struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
632- struct mt76_txwi_cache *t = NULL;
633 struct mt7915_dev *dev;
634- struct mt76_queue *q;
635- int i, len;
636+ u32 length;
637+ int i;
638
639 dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
640- q = &dev->mt76.q_rx[MT_RXQ_MAIN];
641- len = SKB_WITH_OVERHEAD(q->buf_size);
642+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
643+ sizeof(struct skb_shared_info));
644
645 for (i = 0; i < size; i++) {
646- enum dma_data_direction dir;
647- dma_addr_t addr;
648- u32 offset;
649+ struct mt76_txwi_cache *t = mt76_get_rxwi(&dev->mt76);
650+ dma_addr_t phy_addr;
651+ struct page *page;
652 int token;
653- void *buf;
654+ void *ptr;
655
656- t = mt76_get_rxwi(&dev->mt76);
657 if (!t)
658 goto unmap;
659
660- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
661- if (!buf)
662+ page = __dev_alloc_pages(GFP_KERNEL, get_order(length));
663+ if (!page) {
664+ mt76_put_rxwi(&dev->mt76, t);
665 goto unmap;
666+ }
667
668- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
669- dir = page_pool_get_dma_dir(q->page_pool);
670- dma_sync_single_for_device(dev->mt76.dma_dev, addr, len, dir);
671+ ptr = page_address(page);
672+ phy_addr = dma_map_single(dev->mt76.dma_dev, ptr,
673+ wed->wlan.rx_size,
674+ DMA_TO_DEVICE);
675+ if (unlikely(dma_mapping_error(dev->mt76.dev, phy_addr))) {
676+ __free_pages(page, get_order(length));
677+ mt76_put_rxwi(&dev->mt76, t);
678+ goto unmap;
679+ }
680
681- desc->buf0 = cpu_to_le32(addr);
682- token = mt76_rx_token_consume(&dev->mt76, buf, t, addr);
683+ desc->buf0 = cpu_to_le32(phy_addr);
684+ token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr);
685 if (token < 0) {
686- mt76_put_page_pool_buf(buf, false);
687+ dma_unmap_single(dev->mt76.dma_dev, phy_addr,
688+ wed->wlan.rx_size, DMA_TO_DEVICE);
689+ __free_pages(page, get_order(length));
690+ mt76_put_rxwi(&dev->mt76, t);
691 goto unmap;
692 }
693
694@@ -661,8 +676,6 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
695 return 0;
696
697 unmap:
698- if (t)
699- mt76_put_rxwi(&dev->mt76, t);
700 mt7915_mmio_wed_release_rx_buf(wed);
701 return -ENOMEM;
702 }
703diff --git a/mt7921/main.c b/mt7921/main.c
developer7af0f762023-05-22 15:16:16 +0800704index 3b6adb2..47eb38e 100644
developer60a3d662023-02-07 15:24:34 +0800705--- a/mt7921/main.c
706+++ b/mt7921/main.c
developer7af0f762023-05-22 15:16:16 +0800707@@ -1083,34 +1083,17 @@ static void
developer60a3d662023-02-07 15:24:34 +0800708 mt7921_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
709 u32 sset, u8 *data)
710 {
711- struct mt7921_dev *dev = mt7921_hw_dev(hw);
712-
713 if (sset != ETH_SS_STATS)
714 return;
715
716 memcpy(data, *mt7921_gstrings_stats, sizeof(mt7921_gstrings_stats));
717-
718- if (mt76_is_sdio(&dev->mt76))
719- return;
720-
721- data += sizeof(mt7921_gstrings_stats);
722- page_pool_ethtool_stats_get_strings(data);
723 }
724
725 static int
726 mt7921_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
727 int sset)
728 {
729- struct mt7921_dev *dev = mt7921_hw_dev(hw);
730-
731- if (sset != ETH_SS_STATS)
732- return 0;
733-
734- if (mt76_is_sdio(&dev->mt76))
735- return ARRAY_SIZE(mt7921_gstrings_stats);
736-
737- return ARRAY_SIZE(mt7921_gstrings_stats) +
738- page_pool_ethtool_stats_get_count();
739+ return sset == ETH_SS_STATS ? ARRAY_SIZE(mt7921_gstrings_stats) : 0;
740 }
741
742 static void
developer7af0f762023-05-22 15:16:16 +0800743@@ -1130,7 +1113,6 @@ void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer60a3d662023-02-07 15:24:34 +0800744 struct ethtool_stats *stats, u64 *data)
745 {
746 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
747- int stats_size = ARRAY_SIZE(mt7921_gstrings_stats);
748 struct mt7921_phy *phy = mt7921_hw_phy(hw);
749 struct mt7921_dev *dev = phy->dev;
750 struct mib_stats *mib = &phy->mib;
developer7af0f762023-05-22 15:16:16 +0800751@@ -1186,14 +1168,9 @@ void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer60a3d662023-02-07 15:24:34 +0800752 return;
753
754 ei += wi.worker_stat_count;
755-
756- if (!mt76_is_sdio(&dev->mt76)) {
757- mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
758- stats_size += page_pool_ethtool_stats_get_count();
759- }
760-
761- if (ei != stats_size)
762- dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %d", ei, stats_size);
763+ if (ei != ARRAY_SIZE(mt7921_gstrings_stats))
764+ dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %zu",
765+ ei, ARRAY_SIZE(mt7921_gstrings_stats));
766 }
767
768 static u64
769diff --git a/usb.c b/usb.c
developer8effbd32023-04-17 15:57:28 +0800770index 5e5c7bf..3e28171 100644
developer60a3d662023-02-07 15:24:34 +0800771--- a/usb.c
772+++ b/usb.c
773@@ -319,27 +319,29 @@ mt76u_set_endpoints(struct usb_interface *intf,
774
775 static int
776 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
777- int nsgs)
778+ int nsgs, gfp_t gfp)
779 {
780 int i;
781
782 for (i = 0; i < nsgs; i++) {
783+ struct page *page;
784 void *data;
785 int offset;
786
787- data = mt76_get_page_pool_buf(q, &offset, q->buf_size);
788+ data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
789 if (!data)
790 break;
791
792- sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size,
793- offset);
794+ page = virt_to_head_page(data);
795+ offset = data - page_address(page);
796+ sg_set_page(&urb->sg[i], page, q->buf_size, offset);
797 }
798
799 if (i < nsgs) {
800 int j;
801
802 for (j = nsgs; j < urb->num_sgs; j++)
803- mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false);
804+ skb_free_frag(sg_virt(&urb->sg[j]));
805 urb->num_sgs = i;
806 }
807
808@@ -352,16 +354,15 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
809
810 static int
811 mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
812- struct urb *urb, int nsgs)
813+ struct urb *urb, int nsgs, gfp_t gfp)
814 {
815 enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
816- int offset;
817
818 if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
819- return mt76u_fill_rx_sg(dev, q, urb, nsgs);
820+ return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
821
822 urb->transfer_buffer_length = q->buf_size;
823- urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size);
824+ urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
825
826 return urb->transfer_buffer ? 0 : -ENOMEM;
827 }
828@@ -399,7 +400,7 @@ mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
829 if (err)
830 return err;
831
832- return mt76u_refill_rx(dev, q, e->urb, sg_size);
833+ return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
834 }
835
836 static void mt76u_urb_free(struct urb *urb)
837@@ -407,10 +408,10 @@ static void mt76u_urb_free(struct urb *urb)
838 int i;
839
840 for (i = 0; i < urb->num_sgs; i++)
841- mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false);
842+ skb_free_frag(sg_virt(&urb->sg[i]));
843
844 if (urb->transfer_buffer)
845- mt76_put_page_pool_buf(urb->transfer_buffer, false);
846+ skb_free_frag(urb->transfer_buffer);
847
848 usb_free_urb(urb);
849 }
850@@ -546,8 +547,6 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
851 len -= data_len;
852 nsgs++;
853 }
854-
855- skb_mark_for_recycle(skb);
856 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL);
857
858 return nsgs;
859@@ -613,7 +612,7 @@ mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
860
861 count = mt76u_process_rx_entry(dev, urb, q->buf_size);
862 if (count > 0) {
863- err = mt76u_refill_rx(dev, q, urb, count);
864+ err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
865 if (err < 0)
866 break;
867 }
868@@ -664,10 +663,6 @@ mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
869 struct mt76_queue *q = &dev->q_rx[qid];
870 int i, err;
871
872- err = mt76_create_page_pool(dev, q);
873- if (err)
874- return err;
875-
876 spin_lock_init(&q->lock);
877 q->entry = devm_kcalloc(dev->dev,
878 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
879@@ -696,6 +691,7 @@ EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
880 static void
881 mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
882 {
883+ struct page *page;
884 int i;
885
886 for (i = 0; i < q->ndesc; i++) {
developerbb6ddff2023-03-08 17:22:32 +0800887@@ -705,8 +701,13 @@ mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
developer60a3d662023-02-07 15:24:34 +0800888 mt76u_urb_free(q->entry[i].urb);
889 q->entry[i].urb = NULL;
890 }
891- page_pool_destroy(q->page_pool);
developerbb6ddff2023-03-08 17:22:32 +0800892- q->page_pool = NULL;
developer60a3d662023-02-07 15:24:34 +0800893+
894+ if (!q->rx_page.va)
895+ return;
896+
897+ page = virt_to_page(q->rx_page.va);
898+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
899+ memset(&q->rx_page, 0, sizeof(q->rx_page));
900 }
901
902 static void mt76u_free_rx(struct mt76_dev *dev)
903--
developer8effbd32023-04-17 15:57:28 +08009042.18.0
developer60a3d662023-02-07 15:24:34 +0800905