blob: 7d111a9945ca461b40c7cce3563739f957e74b2b [file] [log] [blame]
developer26d6cc52023-07-31 12:27:06 +08001From cb562c01d7c29ea4b15d31153278b94670650ad0 Mon Sep 17 00:00:00 2001
developer78848c62023-04-06 13:44:00 +08002From: Evelyn Tsai <evelyn.tsai@mediatek.com>
3Date: Sat, 1 Apr 2023 08:18:17 +0800
developer26d6cc52023-07-31 12:27:06 +08004Subject: [PATCH 0999/1034] wifi: mt76: mt7915: build pass for Linux Kernel 5.4
5 fixes
developerf3f5d9b2023-02-07 15:24:34 +08006
7---
developer78848c62023-04-06 13:44:00 +08008 debugfs.c | 2 ++
9 dma.c | 74 ++++++++++++++++++++++++-----------------------
10 eeprom.c | 8 ++++-
11 mac80211.c | 57 ------------------------------------
12 mcu.c | 1 +
13 mt76.h | 22 +-------------
14 mt7615/mcu.c | 1 +
15 mt76_connac.h | 2 --
16 mt76_connac_mcu.c | 47 +-----------------------------
17 mt76_connac_mcu.h | 4 ---
18 mt7915/main.c | 26 +++++++----------
19 mt7915/mcu.c | 1 +
20 mt7915/mmio.c | 55 +++++++++++++++++++++--------------
21 mt7921/main.c | 31 +++-----------------
22 usb.c | 43 +++++++++++++--------------
23 15 files changed, 122 insertions(+), 252 deletions(-)
developerf3f5d9b2023-02-07 15:24:34 +080024
developer78848c62023-04-06 13:44:00 +080025diff --git a/debugfs.c b/debugfs.c
developera43cc482023-04-17 15:57:28 +080026index 79064a4..4a8e186 100644
developer78848c62023-04-06 13:44:00 +080027--- a/debugfs.c
28+++ b/debugfs.c
29@@ -33,8 +33,10 @@ mt76_napi_threaded_set(void *data, u64 val)
30 if (!mt76_is_mmio(dev))
31 return -EOPNOTSUPP;
32
33+#if 0 /* disable in backport 5.15 */
34 if (dev->napi_dev.threaded != val)
35 return dev_set_threaded(&dev->napi_dev, val);
36+#endif
37
38 return 0;
39 }
developerf3f5d9b2023-02-07 15:24:34 +080040diff --git a/dma.c b/dma.c
developerad9333b2023-05-22 15:16:16 +080041index 05d9ab3..c9d2671 100644
developerf3f5d9b2023-02-07 15:24:34 +080042--- a/dma.c
43+++ b/dma.c
44@@ -173,7 +173,7 @@ mt76_free_pending_rxwi(struct mt76_dev *dev)
45 local_bh_disable();
46 while ((t = __mt76_get_rxwi(dev)) != NULL) {
47 if (t->ptr)
48- mt76_put_page_pool_buf(t->ptr, false);
49+ skb_free_frag(t->ptr);
50 kfree(t);
51 }
52 local_bh_enable();
53@@ -409,9 +409,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
54 if (!t)
55 return NULL;
56
57- dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
58- SKB_WITH_OVERHEAD(q->buf_size),
59- page_pool_get_dma_dir(q->page_pool));
60+ dma_unmap_single(dev->dma_dev, t->dma_addr,
61+ SKB_WITH_OVERHEAD(q->buf_size),
62+ DMA_FROM_DEVICE);
63
64 buf = t->ptr;
65 t->dma_addr = 0;
developerf8871e82023-03-08 17:22:32 +080066@@ -430,9 +430,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
developerf3f5d9b2023-02-07 15:24:34 +080067 } else {
68 buf = e->buf;
69 e->buf = NULL;
70- dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
71- SKB_WITH_OVERHEAD(q->buf_size),
72- page_pool_get_dma_dir(q->page_pool));
73+ dma_unmap_single(dev->dma_dev, e->dma_addr[0],
74+ SKB_WITH_OVERHEAD(q->buf_size),
75+ DMA_FROM_DEVICE);
76 }
77
78 return buf;
developerad9333b2023-05-22 15:16:16 +080079@@ -592,11 +592,11 @@ free_skb:
developerf3f5d9b2023-02-07 15:24:34 +080080 }
81
82 static int
83-mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
84- bool allow_direct)
85+mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
86 {
87 int len = SKB_WITH_OVERHEAD(q->buf_size);
88- int frames = 0;
89+ int frames = 0, offset = q->buf_offset;
90+ dma_addr_t addr;
91
92 if (!q->ndesc)
93 return 0;
developerad9333b2023-05-22 15:16:16 +080094@@ -604,25 +604,26 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
developerf3f5d9b2023-02-07 15:24:34 +080095 spin_lock_bh(&q->lock);
96
97 while (q->queued < q->ndesc - 1) {
98- enum dma_data_direction dir;
99 struct mt76_queue_buf qbuf;
100- dma_addr_t addr;
101- int offset;
102- void *buf;
103+ void *buf = NULL;
104
105- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
106+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
107 if (!buf)
108 break;
109
110- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
111- dir = page_pool_get_dma_dir(q->page_pool);
112- dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
113+ addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
114+ if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
115+ skb_free_frag(buf);
116+ break;
117+ }
118
119- qbuf.addr = addr + q->buf_offset;
120- qbuf.len = len - q->buf_offset;
121+ qbuf.addr = addr + offset;
122+ qbuf.len = len - offset;
123 qbuf.skip_unmap = false;
124 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
125- mt76_put_page_pool_buf(buf, allow_direct);
126+ dma_unmap_single(dev->dma_dev, addr, len,
127+ DMA_FROM_DEVICE);
128+ skb_free_frag(buf);
129 break;
130 }
131 frames++;
developerad9333b2023-05-22 15:16:16 +0800132@@ -666,7 +667,7 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
developerf3f5d9b2023-02-07 15:24:34 +0800133 /* WED txfree queue needs ring to be initialized before setup */
134 q->flags = 0;
135 mt76_dma_queue_reset(dev, q);
136- mt76_dma_rx_fill(dev, q, false);
137+ mt76_dma_rx_fill(dev, q);
138 q->flags = flags;
139
140 ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
developerad9333b2023-05-22 15:16:16 +0800141@@ -714,10 +715,6 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developerf3f5d9b2023-02-07 15:24:34 +0800142 if (!q->entry)
143 return -ENOMEM;
144
145- ret = mt76_create_page_pool(dev, q);
146- if (ret)
147- return ret;
148-
149 ret = mt76_dma_wed_setup(dev, q, false);
150 if (ret)
151 return ret;
developerad9333b2023-05-22 15:16:16 +0800152@@ -731,6 +728,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developerf3f5d9b2023-02-07 15:24:34 +0800153 static void
154 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
155 {
156+ struct page *page;
157 void *buf;
158 bool more;
159
developerad9333b2023-05-22 15:16:16 +0800160@@ -744,7 +742,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
developerf3f5d9b2023-02-07 15:24:34 +0800161 if (!buf)
162 break;
163
164- mt76_put_page_pool_buf(buf, false);
165+ skb_free_frag(buf);
166 } while (1);
167
168 if (q->rx_head) {
developerad9333b2023-05-22 15:16:16 +0800169@@ -753,6 +751,13 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
developerf3f5d9b2023-02-07 15:24:34 +0800170 }
171
172 spin_unlock_bh(&q->lock);
173+
174+ if (!q->rx_page.va)
175+ return;
176+
177+ page = virt_to_page(q->rx_page.va);
178+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
179+ memset(&q->rx_page, 0, sizeof(q->rx_page));
180 }
181
182 static void
developerad9333b2023-05-22 15:16:16 +0800183@@ -773,7 +778,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
developerf3f5d9b2023-02-07 15:24:34 +0800184 mt76_dma_wed_setup(dev, q, true);
185 if (q->flags != MT_WED_Q_TXFREE) {
186 mt76_dma_sync_idx(dev, q);
187- mt76_dma_rx_fill(dev, q, false);
188+ mt76_dma_rx_fill(dev, q);
189 }
190 }
191
developerad9333b2023-05-22 15:16:16 +0800192@@ -791,7 +796,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
developerf3f5d9b2023-02-07 15:24:34 +0800193
194 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
195 } else {
196- mt76_put_page_pool_buf(data, true);
197+ skb_free_frag(data);
198 }
199
200 if (more)
developerad9333b2023-05-22 15:16:16 +0800201@@ -859,12 +864,11 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
developer78848c62023-04-06 13:44:00 +0800202 !(dev->drv->rx_check(dev, data, len)))
203 goto free_frag;
204
205- skb = napi_build_skb(data, q->buf_size);
206+ skb = build_skb(data, q->buf_size);
207 if (!skb)
developerf3f5d9b2023-02-07 15:24:34 +0800208 goto free_frag;
209
210 skb_reserve(skb, q->buf_offset);
211- skb_mark_for_recycle(skb);
212
213 *(u32 *)skb->cb = info;
214
developerad9333b2023-05-22 15:16:16 +0800215@@ -880,10 +884,10 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
developerf3f5d9b2023-02-07 15:24:34 +0800216 continue;
217
218 free_frag:
219- mt76_put_page_pool_buf(data, true);
220+ skb_free_frag(data);
221 }
222
223- mt76_dma_rx_fill(dev, q, true);
224+ mt76_dma_rx_fill(dev, q);
225 return done;
226 }
227
developerad9333b2023-05-22 15:16:16 +0800228@@ -928,7 +932,7 @@ mt76_dma_init(struct mt76_dev *dev,
developerf3f5d9b2023-02-07 15:24:34 +0800229
230 mt76_for_each_q_rx(dev, i) {
231 netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
232- mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
233+ mt76_dma_rx_fill(dev, &dev->q_rx[i]);
234 napi_enable(&dev->napi[i]);
235 }
236
developerad9333b2023-05-22 15:16:16 +0800237@@ -979,8 +983,6 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
developerf3f5d9b2023-02-07 15:24:34 +0800238
239 netif_napi_del(&dev->napi[i]);
240 mt76_dma_rx_cleanup(dev, q);
241-
242- page_pool_destroy(q->page_pool);
243 }
244
245 mt76_free_pending_txwi(dev);
developer78848c62023-04-06 13:44:00 +0800246diff --git a/eeprom.c b/eeprom.c
developera43cc482023-04-17 15:57:28 +0800247index ea54b7a..90d36c8 100644
developer78848c62023-04-06 13:44:00 +0800248--- a/eeprom.c
249+++ b/eeprom.c
250@@ -106,9 +106,15 @@ void
251 mt76_eeprom_override(struct mt76_phy *phy)
252 {
253 struct mt76_dev *dev = phy->dev;
254+#ifdef CONFIG_OF
255 struct device_node *np = dev->dev->of_node;
256+ const u8 *mac = NULL;
257
258- of_get_mac_address(np, phy->macaddr);
259+ if (np)
260+ mac = of_get_mac_address(np);
261+ if (!IS_ERR_OR_NULL(mac))
262+ ether_addr_copy(phy->macaddr, mac);
263+#endif
264
265 if (!is_valid_ether_addr(phy->macaddr)) {
266 eth_random_addr(phy->macaddr);
developerf3f5d9b2023-02-07 15:24:34 +0800267diff --git a/mac80211.c b/mac80211.c
developer004e50c2023-06-29 20:33:22 +0800268index 8540738..76cb08b 100644
developerf3f5d9b2023-02-07 15:24:34 +0800269--- a/mac80211.c
270+++ b/mac80211.c
271@@ -4,7 +4,6 @@
272 */
273 #include <linux/sched.h>
274 #include <linux/of.h>
275-#include <net/page_pool.h>
276 #include "mt76.h"
277
278 #define CHAN2G(_idx, _freq) { \
developer004e50c2023-06-29 20:33:22 +0800279@@ -563,47 +562,6 @@ void mt76_unregister_phy(struct mt76_phy *phy)
developerf3f5d9b2023-02-07 15:24:34 +0800280 }
281 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
282
283-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
284-{
285- struct page_pool_params pp_params = {
286- .order = 0,
287- .flags = PP_FLAG_PAGE_FRAG,
288- .nid = NUMA_NO_NODE,
289- .dev = dev->dma_dev,
290- };
291- int idx = q - dev->q_rx;
292-
293- switch (idx) {
294- case MT_RXQ_MAIN:
295- case MT_RXQ_BAND1:
296- case MT_RXQ_BAND2:
297- pp_params.pool_size = 256;
298- break;
299- default:
300- pp_params.pool_size = 16;
301- break;
302- }
303-
304- if (mt76_is_mmio(dev)) {
305- /* rely on page_pool for DMA mapping */
306- pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
307- pp_params.dma_dir = DMA_FROM_DEVICE;
308- pp_params.max_len = PAGE_SIZE;
309- pp_params.offset = 0;
310- }
311-
312- q->page_pool = page_pool_create(&pp_params);
313- if (IS_ERR(q->page_pool)) {
314- int err = PTR_ERR(q->page_pool);
315-
316- q->page_pool = NULL;
317- return err;
318- }
319-
320- return 0;
321-}
322-EXPORT_SYMBOL_GPL(mt76_create_page_pool);
323-
324 struct mt76_dev *
325 mt76_alloc_device(struct device *pdev, unsigned int size,
326 const struct ieee80211_ops *ops,
developer004e50c2023-06-29 20:33:22 +0800327@@ -1748,21 +1706,6 @@ void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
developerf3f5d9b2023-02-07 15:24:34 +0800328 }
329 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
330
331-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
332-{
333-#ifdef CONFIG_PAGE_POOL_STATS
334- struct page_pool_stats stats = {};
335- int i;
336-
337- mt76_for_each_q_rx(dev, i)
338- page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
339-
340- page_pool_ethtool_stats_get(data, &stats);
341- *index += page_pool_ethtool_stats_get_count();
342-#endif
343-}
344-EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
345-
346 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
347 {
348 struct ieee80211_hw *hw = phy->hw;
developer78848c62023-04-06 13:44:00 +0800349diff --git a/mcu.c b/mcu.c
developera43cc482023-04-17 15:57:28 +0800350index a8cafa3..fa4b054 100644
developer78848c62023-04-06 13:44:00 +0800351--- a/mcu.c
352+++ b/mcu.c
353@@ -4,6 +4,7 @@
354 */
355
356 #include "mt76.h"
357+#include <linux/moduleparam.h>
358
359 struct sk_buff *
360 __mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
developerf3f5d9b2023-02-07 15:24:34 +0800361diff --git a/mt76.h b/mt76.h
developer004e50c2023-06-29 20:33:22 +0800362index a4cf9b6..35bf19f 100644
developerf3f5d9b2023-02-07 15:24:34 +0800363--- a/mt76.h
364+++ b/mt76.h
developer004e50c2023-06-29 20:33:22 +0800365@@ -210,7 +210,7 @@ struct mt76_queue {
developerf3f5d9b2023-02-07 15:24:34 +0800366
367 dma_addr_t desc_dma;
368 struct sk_buff *rx_head;
369- struct page_pool *page_pool;
370+ struct page_frag_cache rx_page;
371 };
372
373 struct mt76_mcu_ops {
developer004e50c2023-06-29 20:33:22 +0800374@@ -1439,7 +1439,6 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
developerf3f5d9b2023-02-07 15:24:34 +0800375 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
376 }
377
378-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index);
379 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
380 struct mt76_sta_stats *stats, bool eht);
381 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
developer004e50c2023-06-29 20:33:22 +0800382@@ -1551,25 +1550,6 @@ void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
developerf3f5d9b2023-02-07 15:24:34 +0800383 struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
384 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
385 struct mt76_txwi_cache *r, dma_addr_t phys);
386-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q);
387-static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct)
388-{
389- struct page *page = virt_to_head_page(buf);
390-
391- page_pool_put_full_page(page->pp, page, allow_direct);
392-}
393-
394-static inline void *
395-mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size)
396-{
397- struct page *page;
398-
399- page = page_pool_dev_alloc_frag(q->page_pool, offset, size);
400- if (!page)
401- return NULL;
402-
403- return page_address(page) + *offset;
404-}
405
406 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
407 {
developer78848c62023-04-06 13:44:00 +0800408diff --git a/mt7615/mcu.c b/mt7615/mcu.c
developerad9333b2023-05-22 15:16:16 +0800409index 8d745c9..86061e9 100644
developer78848c62023-04-06 13:44:00 +0800410--- a/mt7615/mcu.c
411+++ b/mt7615/mcu.c
412@@ -10,6 +10,7 @@
413 #include "mcu.h"
414 #include "mac.h"
415 #include "eeprom.h"
416+#include <linux/moduleparam.h>
417
418 static bool prefer_offload_fw = true;
419 module_param(prefer_offload_fw, bool, 0644);
420diff --git a/mt76_connac.h b/mt76_connac.h
developer004e50c2023-06-29 20:33:22 +0800421index 22878f0..4560ab7 100644
developer78848c62023-04-06 13:44:00 +0800422--- a/mt76_connac.h
423+++ b/mt76_connac.h
developerad9333b2023-05-22 15:16:16 +0800424@@ -56,7 +56,6 @@ enum {
developer78848c62023-04-06 13:44:00 +0800425 CMD_CBW_10MHZ,
426 CMD_CBW_5MHZ,
427 CMD_CBW_8080MHZ,
428- CMD_CBW_320MHZ,
429
430 CMD_HE_MCS_BW80 = 0,
431 CMD_HE_MCS_BW160,
developerad9333b2023-05-22 15:16:16 +0800432@@ -264,7 +263,6 @@ static inline u8 mt76_connac_chan_bw(struct cfg80211_chan_def *chandef)
developer78848c62023-04-06 13:44:00 +0800433 [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
434 [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
435 [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
436- [NL80211_CHAN_WIDTH_320] = CMD_CBW_320MHZ,
437 };
438
439 if (chandef->width >= ARRAY_SIZE(width_to_bw))
440diff --git a/mt76_connac_mcu.c b/mt76_connac_mcu.c
developer004e50c2023-06-29 20:33:22 +0800441index cd6ce3c..a558c68 100644
developer78848c62023-04-06 13:44:00 +0800442--- a/mt76_connac_mcu.c
443+++ b/mt76_connac_mcu.c
444@@ -4,6 +4,7 @@
445 #include <linux/firmware.h>
446 #include "mt76_connac2_mac.h"
447 #include "mt76_connac_mcu.h"
448+#include <linux/module.h>
449
450 int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option)
451 {
developer004e50c2023-06-29 20:33:22 +0800452@@ -1332,40 +1333,6 @@ u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
developer78848c62023-04-06 13:44:00 +0800453 }
454 EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode);
455
456-u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
457- enum nl80211_band band)
458-{
459- const struct ieee80211_sta_eht_cap *eht_cap;
460- struct ieee80211_supported_band *sband;
461- u8 mode = 0;
462-
463- if (band == NL80211_BAND_6GHZ)
464- mode |= PHY_MODE_AX_6G;
465-
466- sband = phy->hw->wiphy->bands[band];
467- eht_cap = ieee80211_get_eht_iftype_cap(sband, vif->type);
468-
469- if (!eht_cap || !eht_cap->has_eht)
470- return mode;
471-
472- switch (band) {
473- case NL80211_BAND_6GHZ:
474- mode |= PHY_MODE_BE_6G;
475- break;
476- case NL80211_BAND_5GHZ:
477- mode |= PHY_MODE_BE_5G;
478- break;
479- case NL80211_BAND_2GHZ:
480- mode |= PHY_MODE_BE_24G;
481- break;
482- default:
483- break;
484- }
485-
486- return mode;
487-}
488-EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode_ext);
489-
490 const struct ieee80211_sta_he_cap *
491 mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
492 {
developer004e50c2023-06-29 20:33:22 +0800493@@ -1378,18 +1345,6 @@ mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
developer78848c62023-04-06 13:44:00 +0800494 }
495 EXPORT_SYMBOL_GPL(mt76_connac_get_he_phy_cap);
496
497-const struct ieee80211_sta_eht_cap *
498-mt76_connac_get_eht_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
499-{
500- enum nl80211_band band = phy->chandef.chan->band;
501- struct ieee80211_supported_band *sband;
502-
503- sband = phy->hw->wiphy->bands[band];
504-
505- return ieee80211_get_eht_iftype_cap(sband, vif->type);
506-}
507-EXPORT_SYMBOL_GPL(mt76_connac_get_eht_phy_cap);
508-
509 #define DEFAULT_HE_PE_DURATION 4
510 #define DEFAULT_HE_DURATION_RTS_THRES 1023
511 static void
512diff --git a/mt76_connac_mcu.h b/mt76_connac_mcu.h
developer004e50c2023-06-29 20:33:22 +0800513index fe729bb..bd0bf4b 100644
developer78848c62023-04-06 13:44:00 +0800514--- a/mt76_connac_mcu.h
515+++ b/mt76_connac_mcu.h
developer004e50c2023-06-29 20:33:22 +0800516@@ -1887,12 +1887,8 @@ void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val);
developer78848c62023-04-06 13:44:00 +0800517
518 const struct ieee80211_sta_he_cap *
519 mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
520-const struct ieee80211_sta_eht_cap *
521-mt76_connac_get_eht_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
522 u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
523 enum nl80211_band band, struct ieee80211_sta *sta);
524-u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
525- enum nl80211_band band);
526
527 int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
528 struct mt76_connac_sta_key_conf *sta_key_conf,
developerf3f5d9b2023-02-07 15:24:34 +0800529diff --git a/mt7915/main.c b/mt7915/main.c
developer26d6cc52023-07-31 12:27:06 +0800530index 86f794f..e403cd8 100644
developerf3f5d9b2023-02-07 15:24:34 +0800531--- a/mt7915/main.c
532+++ b/mt7915/main.c
developer26d6cc52023-07-31 12:27:06 +0800533@@ -1391,22 +1391,19 @@ void mt7915_get_et_strings(struct ieee80211_hw *hw,
developerf3f5d9b2023-02-07 15:24:34 +0800534 struct ieee80211_vif *vif,
535 u32 sset, u8 *data)
536 {
537- if (sset != ETH_SS_STATS)
538- return;
539-
540- memcpy(data, *mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats));
541- data += sizeof(mt7915_gstrings_stats);
542- page_pool_ethtool_stats_get_strings(data);
543+ if (sset == ETH_SS_STATS)
544+ memcpy(data, *mt7915_gstrings_stats,
545+ sizeof(mt7915_gstrings_stats));
546 }
547
548 static
549 int mt7915_get_et_sset_count(struct ieee80211_hw *hw,
550 struct ieee80211_vif *vif, int sset)
551 {
552- if (sset != ETH_SS_STATS)
553- return 0;
554+ if (sset == ETH_SS_STATS)
555+ return MT7915_SSTATS_LEN;
556
557- return MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
558+ return 0;
559 }
560
561 static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
developer26d6cc52023-07-31 12:27:06 +0800562@@ -1434,7 +1431,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
developer004e50c2023-06-29 20:33:22 +0800563 .idx = mvif->mt76.idx,
developerf3f5d9b2023-02-07 15:24:34 +0800564 };
developerf3f5d9b2023-02-07 15:24:34 +0800565 /* See mt7915_ampdu_stat_read_phy, etc */
566- int i, ei = 0, stats_size;
567+ int i, ei = 0;
568
569 mutex_lock(&dev->mt76.mutex);
570
developer26d6cc52023-07-31 12:27:06 +0800571@@ -1546,12 +1543,9 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
developerf3f5d9b2023-02-07 15:24:34 +0800572 return;
573
574 ei += wi.worker_stat_count;
575-
576- mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
577-
578- stats_size = MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
579- if (ei != stats_size)
580- dev_err(dev->mt76.dev, "ei: %d size: %d", ei, stats_size);
581+ if (ei != MT7915_SSTATS_LEN)
582+ dev_err(dev->mt76.dev, "ei: %d MT7915_SSTATS_LEN: %d",
583+ ei, (int)MT7915_SSTATS_LEN);
584 }
585
586 static void
developer78848c62023-04-06 13:44:00 +0800587diff --git a/mt7915/mcu.c b/mt7915/mcu.c
developer26d6cc52023-07-31 12:27:06 +0800588index a34b75d..50b49e5 100644
developer78848c62023-04-06 13:44:00 +0800589--- a/mt7915/mcu.c
590+++ b/mt7915/mcu.c
591@@ -6,6 +6,7 @@
592 #include "mcu.h"
593 #include "mac.h"
594 #include "eeprom.h"
595+#include <linux/moduleparam.h>
596
597 #define fw_name(_dev, name, ...) ({ \
598 char *_fw; \
developerf3f5d9b2023-02-07 15:24:34 +0800599diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developer004e50c2023-06-29 20:33:22 +0800600index fc7ace6..8d92e76 100644
developerf3f5d9b2023-02-07 15:24:34 +0800601--- a/mt7915/mmio.c
602+++ b/mt7915/mmio.c
developer004e50c2023-06-29 20:33:22 +0800603@@ -570,9 +570,13 @@ static void mt7915_mmio_wed_offload_disable(struct mtk_wed_device *wed)
developerf3f5d9b2023-02-07 15:24:34 +0800604 static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
605 {
606 struct mt7915_dev *dev;
607+ u32 length;
608 int i;
609
610 dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
611+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
612+ sizeof(struct skb_shared_info));
613+
614 for (i = 0; i < dev->mt76.rx_token_size; i++) {
615 struct mt76_txwi_cache *t;
616
developer004e50c2023-06-29 20:33:22 +0800617@@ -580,7 +584,9 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
developerf3f5d9b2023-02-07 15:24:34 +0800618 if (!t || !t->ptr)
619 continue;
620
621- mt76_put_page_pool_buf(t->ptr, false);
622+ dma_unmap_single(dev->mt76.dma_dev, t->dma_addr,
623+ wed->wlan.rx_size, DMA_FROM_DEVICE);
624+ __free_pages(virt_to_page(t->ptr), get_order(length));
625 t->ptr = NULL;
626
627 mt76_put_rxwi(&dev->mt76, t);
developer004e50c2023-06-29 20:33:22 +0800628@@ -592,38 +598,47 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
developerf3f5d9b2023-02-07 15:24:34 +0800629 static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
630 {
631 struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
632- struct mt76_txwi_cache *t = NULL;
633 struct mt7915_dev *dev;
634- struct mt76_queue *q;
635- int i, len;
636+ u32 length;
637+ int i;
638
639 dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
640- q = &dev->mt76.q_rx[MT_RXQ_MAIN];
641- len = SKB_WITH_OVERHEAD(q->buf_size);
642+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
643+ sizeof(struct skb_shared_info));
644
645 for (i = 0; i < size; i++) {
646- enum dma_data_direction dir;
647- dma_addr_t addr;
648- u32 offset;
649+ struct mt76_txwi_cache *t = mt76_get_rxwi(&dev->mt76);
650+ dma_addr_t phy_addr;
651+ struct page *page;
652 int token;
653- void *buf;
654+ void *ptr;
655
656- t = mt76_get_rxwi(&dev->mt76);
657 if (!t)
658 goto unmap;
659
660- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
661- if (!buf)
662+ page = __dev_alloc_pages(GFP_KERNEL, get_order(length));
663+ if (!page) {
664+ mt76_put_rxwi(&dev->mt76, t);
665 goto unmap;
666+ }
667
668- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
669- dir = page_pool_get_dma_dir(q->page_pool);
670- dma_sync_single_for_device(dev->mt76.dma_dev, addr, len, dir);
671+ ptr = page_address(page);
672+ phy_addr = dma_map_single(dev->mt76.dma_dev, ptr,
673+ wed->wlan.rx_size,
674+ DMA_TO_DEVICE);
675+ if (unlikely(dma_mapping_error(dev->mt76.dev, phy_addr))) {
676+ __free_pages(page, get_order(length));
677+ mt76_put_rxwi(&dev->mt76, t);
678+ goto unmap;
679+ }
680
681- desc->buf0 = cpu_to_le32(addr);
682- token = mt76_rx_token_consume(&dev->mt76, buf, t, addr);
683+ desc->buf0 = cpu_to_le32(phy_addr);
684+ token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr);
685 if (token < 0) {
686- mt76_put_page_pool_buf(buf, false);
687+ dma_unmap_single(dev->mt76.dma_dev, phy_addr,
688+ wed->wlan.rx_size, DMA_TO_DEVICE);
689+ __free_pages(page, get_order(length));
690+ mt76_put_rxwi(&dev->mt76, t);
691 goto unmap;
692 }
693
developer004e50c2023-06-29 20:33:22 +0800694@@ -635,8 +650,6 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
developerf3f5d9b2023-02-07 15:24:34 +0800695 return 0;
696
697 unmap:
698- if (t)
699- mt76_put_rxwi(&dev->mt76, t);
700 mt7915_mmio_wed_release_rx_buf(wed);
701 return -ENOMEM;
702 }
703diff --git a/mt7921/main.c b/mt7921/main.c
developer004e50c2023-06-29 20:33:22 +0800704index 87067ac..022cd34 100644
developerf3f5d9b2023-02-07 15:24:34 +0800705--- a/mt7921/main.c
706+++ b/mt7921/main.c
developer004e50c2023-06-29 20:33:22 +0800707@@ -1087,34 +1087,17 @@ static void
developerf3f5d9b2023-02-07 15:24:34 +0800708 mt7921_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
709 u32 sset, u8 *data)
710 {
711- struct mt7921_dev *dev = mt7921_hw_dev(hw);
712-
713 if (sset != ETH_SS_STATS)
714 return;
715
716 memcpy(data, *mt7921_gstrings_stats, sizeof(mt7921_gstrings_stats));
717-
718- if (mt76_is_sdio(&dev->mt76))
719- return;
720-
721- data += sizeof(mt7921_gstrings_stats);
722- page_pool_ethtool_stats_get_strings(data);
723 }
724
725 static int
726 mt7921_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
727 int sset)
728 {
729- struct mt7921_dev *dev = mt7921_hw_dev(hw);
730-
731- if (sset != ETH_SS_STATS)
732- return 0;
733-
734- if (mt76_is_sdio(&dev->mt76))
735- return ARRAY_SIZE(mt7921_gstrings_stats);
736-
737- return ARRAY_SIZE(mt7921_gstrings_stats) +
738- page_pool_ethtool_stats_get_count();
739+ return sset == ETH_SS_STATS ? ARRAY_SIZE(mt7921_gstrings_stats) : 0;
740 }
741
742 static void
developer004e50c2023-06-29 20:33:22 +0800743@@ -1134,7 +1117,6 @@ void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developerf3f5d9b2023-02-07 15:24:34 +0800744 struct ethtool_stats *stats, u64 *data)
745 {
746 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
747- int stats_size = ARRAY_SIZE(mt7921_gstrings_stats);
748 struct mt7921_phy *phy = mt7921_hw_phy(hw);
749 struct mt7921_dev *dev = phy->dev;
developer004e50c2023-06-29 20:33:22 +0800750 struct mt76_mib_stats *mib = &phy->mib;
751@@ -1190,14 +1172,9 @@ void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developerf3f5d9b2023-02-07 15:24:34 +0800752 return;
753
754 ei += wi.worker_stat_count;
755-
756- if (!mt76_is_sdio(&dev->mt76)) {
757- mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
758- stats_size += page_pool_ethtool_stats_get_count();
759- }
760-
761- if (ei != stats_size)
762- dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %d", ei, stats_size);
763+ if (ei != ARRAY_SIZE(mt7921_gstrings_stats))
764+ dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %zu",
765+ ei, ARRAY_SIZE(mt7921_gstrings_stats));
766 }
767
768 static u64
769diff --git a/usb.c b/usb.c
developera43cc482023-04-17 15:57:28 +0800770index 5e5c7bf..3e28171 100644
developerf3f5d9b2023-02-07 15:24:34 +0800771--- a/usb.c
772+++ b/usb.c
773@@ -319,27 +319,29 @@ mt76u_set_endpoints(struct usb_interface *intf,
774
775 static int
776 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
777- int nsgs)
778+ int nsgs, gfp_t gfp)
779 {
780 int i;
781
782 for (i = 0; i < nsgs; i++) {
783+ struct page *page;
784 void *data;
785 int offset;
786
787- data = mt76_get_page_pool_buf(q, &offset, q->buf_size);
788+ data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
789 if (!data)
790 break;
791
792- sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size,
793- offset);
794+ page = virt_to_head_page(data);
795+ offset = data - page_address(page);
796+ sg_set_page(&urb->sg[i], page, q->buf_size, offset);
797 }
798
799 if (i < nsgs) {
800 int j;
801
802 for (j = nsgs; j < urb->num_sgs; j++)
803- mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false);
804+ skb_free_frag(sg_virt(&urb->sg[j]));
805 urb->num_sgs = i;
806 }
807
808@@ -352,16 +354,15 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
809
810 static int
811 mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
812- struct urb *urb, int nsgs)
813+ struct urb *urb, int nsgs, gfp_t gfp)
814 {
815 enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
816- int offset;
817
818 if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
819- return mt76u_fill_rx_sg(dev, q, urb, nsgs);
820+ return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
821
822 urb->transfer_buffer_length = q->buf_size;
823- urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size);
824+ urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
825
826 return urb->transfer_buffer ? 0 : -ENOMEM;
827 }
828@@ -399,7 +400,7 @@ mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
829 if (err)
830 return err;
831
832- return mt76u_refill_rx(dev, q, e->urb, sg_size);
833+ return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
834 }
835
836 static void mt76u_urb_free(struct urb *urb)
837@@ -407,10 +408,10 @@ static void mt76u_urb_free(struct urb *urb)
838 int i;
839
840 for (i = 0; i < urb->num_sgs; i++)
841- mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false);
842+ skb_free_frag(sg_virt(&urb->sg[i]));
843
844 if (urb->transfer_buffer)
845- mt76_put_page_pool_buf(urb->transfer_buffer, false);
846+ skb_free_frag(urb->transfer_buffer);
847
848 usb_free_urb(urb);
849 }
850@@ -546,8 +547,6 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
851 len -= data_len;
852 nsgs++;
853 }
854-
855- skb_mark_for_recycle(skb);
856 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL);
857
858 return nsgs;
859@@ -613,7 +612,7 @@ mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
860
861 count = mt76u_process_rx_entry(dev, urb, q->buf_size);
862 if (count > 0) {
863- err = mt76u_refill_rx(dev, q, urb, count);
864+ err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
865 if (err < 0)
866 break;
867 }
868@@ -664,10 +663,6 @@ mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
869 struct mt76_queue *q = &dev->q_rx[qid];
870 int i, err;
871
872- err = mt76_create_page_pool(dev, q);
873- if (err)
874- return err;
875-
876 spin_lock_init(&q->lock);
877 q->entry = devm_kcalloc(dev->dev,
878 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
879@@ -696,6 +691,7 @@ EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
880 static void
881 mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
882 {
883+ struct page *page;
884 int i;
885
886 for (i = 0; i < q->ndesc; i++) {
developerf8871e82023-03-08 17:22:32 +0800887@@ -705,8 +701,13 @@ mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
developerf3f5d9b2023-02-07 15:24:34 +0800888 mt76u_urb_free(q->entry[i].urb);
889 q->entry[i].urb = NULL;
890 }
891- page_pool_destroy(q->page_pool);
developerf8871e82023-03-08 17:22:32 +0800892- q->page_pool = NULL;
developerf3f5d9b2023-02-07 15:24:34 +0800893+
894+ if (!q->rx_page.va)
895+ return;
896+
897+ page = virt_to_page(q->rx_page.va);
898+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
899+ memset(&q->rx_page, 0, sizeof(q->rx_page));
900 }
901
902 static void mt76u_free_rx(struct mt76_dev *dev)
903--
developera43cc482023-04-17 15:57:28 +08009042.18.0
developerf3f5d9b2023-02-07 15:24:34 +0800905