blob: 8ccddd2d381573bf12c3294da00bf47cb737c1ce [file] [log] [blame]
developer0443cd32023-09-19 14:11:49 +08001From ed118d36cb5185ef0bc31725c973998edfc6969f Mon Sep 17 00:00:00 2001
developer6100db22023-04-05 13:22:26 +08002From: Evelyn Tsai <evelyn.tsai@mediatek.com>
3Date: Sat, 1 Apr 2023 08:18:17 +0800
developer0443cd32023-09-19 14:11:49 +08004Subject: [PATCH 0999/1040] wifi: mt76: mt7915: build pass for Linux Kernel 5.4
developer8f0d89b2023-07-28 07:16:44 +08005 fixes
developer6100db22023-04-05 13:22:26 +08006
7---
8 debugfs.c | 2 ++
9 dma.c | 74 ++++++++++++++++++++++++-----------------------
10 eeprom.c | 8 ++++-
11 mac80211.c | 57 ------------------------------------
12 mcu.c | 1 +
13 mt76.h | 22 +-------------
14 mt7615/mcu.c | 1 +
15 mt76_connac.h | 2 --
16 mt76_connac_mcu.c | 47 +-----------------------------
17 mt76_connac_mcu.h | 4 ---
developer0443cd32023-09-19 14:11:49 +080018 mt7915/main.c | 25 +++++++---------
developer6100db22023-04-05 13:22:26 +080019 mt7915/mcu.c | 1 +
20 mt7915/mmio.c | 55 +++++++++++++++++++++--------------
developer6100db22023-04-05 13:22:26 +080021 usb.c | 43 +++++++++++++--------------
developer0443cd32023-09-19 14:11:49 +080022 14 files changed, 118 insertions(+), 224 deletions(-)
developer6100db22023-04-05 13:22:26 +080023
24diff --git a/debugfs.c b/debugfs.c
developer0443cd32023-09-19 14:11:49 +080025index c4649ba..1c8328d 100644
developer6100db22023-04-05 13:22:26 +080026--- a/debugfs.c
27+++ b/debugfs.c
28@@ -33,8 +33,10 @@ mt76_napi_threaded_set(void *data, u64 val)
29 if (!mt76_is_mmio(dev))
30 return -EOPNOTSUPP;
31
32+#if 0 /* disable in backport 5.15 */
33 if (dev->napi_dev.threaded != val)
34 return dev_set_threaded(&dev->napi_dev, val);
35+#endif
36
37 return 0;
38 }
39diff --git a/dma.c b/dma.c
developer0443cd32023-09-19 14:11:49 +080040index 643e18e..24b44e7 100644
developer6100db22023-04-05 13:22:26 +080041--- a/dma.c
42+++ b/dma.c
developer0443cd32023-09-19 14:11:49 +080043@@ -178,7 +178,7 @@ mt76_free_pending_rxwi(struct mt76_dev *dev)
developer6100db22023-04-05 13:22:26 +080044 local_bh_disable();
45 while ((t = __mt76_get_rxwi(dev)) != NULL) {
46 if (t->ptr)
47- mt76_put_page_pool_buf(t->ptr, false);
48+ skb_free_frag(t->ptr);
49 kfree(t);
50 }
51 local_bh_enable();
developer0443cd32023-09-19 14:11:49 +080052@@ -411,9 +411,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
developer6100db22023-04-05 13:22:26 +080053 if (!t)
54 return NULL;
55
56- dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
57- SKB_WITH_OVERHEAD(q->buf_size),
58- page_pool_get_dma_dir(q->page_pool));
59+ dma_unmap_single(dev->dma_dev, t->dma_addr,
60+ SKB_WITH_OVERHEAD(q->buf_size),
61+ DMA_FROM_DEVICE);
62
63 buf = t->ptr;
64 t->dma_addr = 0;
developer0443cd32023-09-19 14:11:49 +080065@@ -432,9 +432,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
developer6100db22023-04-05 13:22:26 +080066 } else {
67 buf = e->buf;
68 e->buf = NULL;
69- dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
70- SKB_WITH_OVERHEAD(q->buf_size),
71- page_pool_get_dma_dir(q->page_pool));
72+ dma_unmap_single(dev->dma_dev, e->dma_addr[0],
73+ SKB_WITH_OVERHEAD(q->buf_size),
74+ DMA_FROM_DEVICE);
75 }
76
77 return buf;
developer0443cd32023-09-19 14:11:49 +080078@@ -594,11 +594,11 @@ free_skb:
developer6100db22023-04-05 13:22:26 +080079 }
80
81 static int
82-mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
83- bool allow_direct)
84+mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
85 {
86 int len = SKB_WITH_OVERHEAD(q->buf_size);
87- int frames = 0;
88+ int frames = 0, offset = q->buf_offset;
89+ dma_addr_t addr;
90
91 if (!q->ndesc)
92 return 0;
developer0443cd32023-09-19 14:11:49 +080093@@ -606,25 +606,26 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
developer6100db22023-04-05 13:22:26 +080094 spin_lock_bh(&q->lock);
95
96 while (q->queued < q->ndesc - 1) {
97- enum dma_data_direction dir;
98 struct mt76_queue_buf qbuf;
99- dma_addr_t addr;
100- int offset;
101- void *buf;
102+ void *buf = NULL;
103
104- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
105+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
106 if (!buf)
107 break;
108
109- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
110- dir = page_pool_get_dma_dir(q->page_pool);
111- dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
112+ addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
113+ if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
114+ skb_free_frag(buf);
115+ break;
116+ }
117
118- qbuf.addr = addr + q->buf_offset;
119- qbuf.len = len - q->buf_offset;
120+ qbuf.addr = addr + offset;
121+ qbuf.len = len - offset;
122 qbuf.skip_unmap = false;
123 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
124- mt76_put_page_pool_buf(buf, allow_direct);
125+ dma_unmap_single(dev->dma_dev, addr, len,
126+ DMA_FROM_DEVICE);
127+ skb_free_frag(buf);
128 break;
129 }
130 frames++;
developer0443cd32023-09-19 14:11:49 +0800131@@ -668,7 +669,7 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
developer6100db22023-04-05 13:22:26 +0800132 /* WED txfree queue needs ring to be initialized before setup */
133 q->flags = 0;
134 mt76_dma_queue_reset(dev, q);
135- mt76_dma_rx_fill(dev, q, false);
136+ mt76_dma_rx_fill(dev, q);
137 q->flags = flags;
138
139 ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
developer0443cd32023-09-19 14:11:49 +0800140@@ -716,10 +717,6 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developer6100db22023-04-05 13:22:26 +0800141 if (!q->entry)
142 return -ENOMEM;
143
144- ret = mt76_create_page_pool(dev, q);
145- if (ret)
146- return ret;
147-
148 ret = mt76_dma_wed_setup(dev, q, false);
149 if (ret)
150 return ret;
developer0443cd32023-09-19 14:11:49 +0800151@@ -733,6 +730,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developer6100db22023-04-05 13:22:26 +0800152 static void
153 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
154 {
155+ struct page *page;
156 void *buf;
157 bool more;
158
developer0443cd32023-09-19 14:11:49 +0800159@@ -746,7 +744,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
developer6100db22023-04-05 13:22:26 +0800160 if (!buf)
161 break;
162
163- mt76_put_page_pool_buf(buf, false);
164+ skb_free_frag(buf);
165 } while (1);
166
167 if (q->rx_head) {
developer0443cd32023-09-19 14:11:49 +0800168@@ -755,6 +753,13 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
developer6100db22023-04-05 13:22:26 +0800169 }
170
171 spin_unlock_bh(&q->lock);
172+
173+ if (!q->rx_page.va)
174+ return;
175+
176+ page = virt_to_page(q->rx_page.va);
177+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
178+ memset(&q->rx_page, 0, sizeof(q->rx_page));
179 }
180
181 static void
developer0443cd32023-09-19 14:11:49 +0800182@@ -775,7 +780,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
developer6100db22023-04-05 13:22:26 +0800183 mt76_dma_wed_setup(dev, q, true);
184 if (q->flags != MT_WED_Q_TXFREE) {
185 mt76_dma_sync_idx(dev, q);
186- mt76_dma_rx_fill(dev, q, false);
187+ mt76_dma_rx_fill(dev, q);
188 }
189 }
190
developer0443cd32023-09-19 14:11:49 +0800191@@ -793,7 +798,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
developer6100db22023-04-05 13:22:26 +0800192
193 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
194 } else {
195- mt76_put_page_pool_buf(data, true);
196+ skb_free_frag(data);
197 }
198
199 if (more)
developer0443cd32023-09-19 14:11:49 +0800200@@ -861,12 +866,11 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
developer6100db22023-04-05 13:22:26 +0800201 !(dev->drv->rx_check(dev, data, len)))
202 goto free_frag;
203
204- skb = napi_build_skb(data, q->buf_size);
205+ skb = build_skb(data, q->buf_size);
206 if (!skb)
207 goto free_frag;
208
209 skb_reserve(skb, q->buf_offset);
210- skb_mark_for_recycle(skb);
211
212 *(u32 *)skb->cb = info;
213
developer0443cd32023-09-19 14:11:49 +0800214@@ -882,10 +886,10 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
developer6100db22023-04-05 13:22:26 +0800215 continue;
216
217 free_frag:
218- mt76_put_page_pool_buf(data, true);
219+ skb_free_frag(data);
220 }
221
222- mt76_dma_rx_fill(dev, q, true);
223+ mt76_dma_rx_fill(dev, q);
224 return done;
225 }
226
developer0443cd32023-09-19 14:11:49 +0800227@@ -930,7 +934,7 @@ mt76_dma_init(struct mt76_dev *dev,
developer6100db22023-04-05 13:22:26 +0800228
229 mt76_for_each_q_rx(dev, i) {
230 netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
231- mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
232+ mt76_dma_rx_fill(dev, &dev->q_rx[i]);
233 napi_enable(&dev->napi[i]);
234 }
235
developer0443cd32023-09-19 14:11:49 +0800236@@ -981,8 +985,6 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
developer6100db22023-04-05 13:22:26 +0800237
238 netif_napi_del(&dev->napi[i]);
239 mt76_dma_rx_cleanup(dev, q);
240-
241- page_pool_destroy(q->page_pool);
242 }
243
244 mt76_free_pending_txwi(dev);
245diff --git a/eeprom.c b/eeprom.c
developer0443cd32023-09-19 14:11:49 +0800246index 750e031..ff8dc93 100644
developer6100db22023-04-05 13:22:26 +0800247--- a/eeprom.c
248+++ b/eeprom.c
developer849549c2023-08-02 17:26:48 +0800249@@ -161,9 +161,15 @@ void
developer6100db22023-04-05 13:22:26 +0800250 mt76_eeprom_override(struct mt76_phy *phy)
251 {
252 struct mt76_dev *dev = phy->dev;
253+#ifdef CONFIG_OF
254 struct device_node *np = dev->dev->of_node;
255+ const u8 *mac = NULL;
256
257- of_get_mac_address(np, phy->macaddr);
258+ if (np)
259+ mac = of_get_mac_address(np);
260+ if (!IS_ERR_OR_NULL(mac))
261+ ether_addr_copy(phy->macaddr, mac);
262+#endif
263
264 if (!is_valid_ether_addr(phy->macaddr)) {
265 eth_random_addr(phy->macaddr);
266diff --git a/mac80211.c b/mac80211.c
developer0443cd32023-09-19 14:11:49 +0800267index aaaf6a9..647f74c 100644
developer6100db22023-04-05 13:22:26 +0800268--- a/mac80211.c
269+++ b/mac80211.c
270@@ -4,7 +4,6 @@
271 */
272 #include <linux/sched.h>
273 #include <linux/of.h>
274-#include <net/page_pool.h>
275 #include "mt76.h"
276
277 #define CHAN2G(_idx, _freq) { \
developer0443cd32023-09-19 14:11:49 +0800278@@ -567,47 +566,6 @@ void mt76_unregister_phy(struct mt76_phy *phy)
developer6100db22023-04-05 13:22:26 +0800279 }
280 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
281
282-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
283-{
284- struct page_pool_params pp_params = {
285- .order = 0,
286- .flags = PP_FLAG_PAGE_FRAG,
287- .nid = NUMA_NO_NODE,
288- .dev = dev->dma_dev,
289- };
290- int idx = q - dev->q_rx;
291-
292- switch (idx) {
293- case MT_RXQ_MAIN:
294- case MT_RXQ_BAND1:
295- case MT_RXQ_BAND2:
296- pp_params.pool_size = 256;
297- break;
298- default:
299- pp_params.pool_size = 16;
300- break;
301- }
302-
303- if (mt76_is_mmio(dev)) {
304- /* rely on page_pool for DMA mapping */
305- pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
306- pp_params.dma_dir = DMA_FROM_DEVICE;
307- pp_params.max_len = PAGE_SIZE;
308- pp_params.offset = 0;
309- }
310-
311- q->page_pool = page_pool_create(&pp_params);
312- if (IS_ERR(q->page_pool)) {
313- int err = PTR_ERR(q->page_pool);
314-
315- q->page_pool = NULL;
316- return err;
317- }
318-
319- return 0;
320-}
321-EXPORT_SYMBOL_GPL(mt76_create_page_pool);
322-
323 struct mt76_dev *
324 mt76_alloc_device(struct device *pdev, unsigned int size,
325 const struct ieee80211_ops *ops,
developer0443cd32023-09-19 14:11:49 +0800326@@ -1805,21 +1763,6 @@ void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
developer6100db22023-04-05 13:22:26 +0800327 }
328 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
329
330-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
331-{
332-#ifdef CONFIG_PAGE_POOL_STATS
333- struct page_pool_stats stats = {};
334- int i;
335-
336- mt76_for_each_q_rx(dev, i)
337- page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
338-
339- page_pool_ethtool_stats_get(data, &stats);
340- *index += page_pool_ethtool_stats_get_count();
341-#endif
342-}
343-EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
344-
345 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
346 {
347 struct ieee80211_hw *hw = phy->hw;
348diff --git a/mcu.c b/mcu.c
developer0443cd32023-09-19 14:11:49 +0800349index a8cafa3..fa4b054 100644
developer6100db22023-04-05 13:22:26 +0800350--- a/mcu.c
351+++ b/mcu.c
352@@ -4,6 +4,7 @@
353 */
354
355 #include "mt76.h"
356+#include <linux/moduleparam.h>
357
358 struct sk_buff *
359 __mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
360diff --git a/mt76.h b/mt76.h
developer0443cd32023-09-19 14:11:49 +0800361index 0d864fe..97ced84 100644
developer6100db22023-04-05 13:22:26 +0800362--- a/mt76.h
363+++ b/mt76.h
developer2157bf82023-06-26 02:27:49 +0800364@@ -210,7 +210,7 @@ struct mt76_queue {
developer6100db22023-04-05 13:22:26 +0800365
366 dma_addr_t desc_dma;
367 struct sk_buff *rx_head;
368- struct page_pool *page_pool;
369+ struct page_frag_cache rx_page;
370 };
371
372 struct mt76_mcu_ops {
developer0443cd32023-09-19 14:11:49 +0800373@@ -1457,7 +1457,6 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
developer6100db22023-04-05 13:22:26 +0800374 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
375 }
376
377-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index);
378 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
379 struct mt76_sta_stats *stats, bool eht);
380 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
developer0443cd32023-09-19 14:11:49 +0800381@@ -1569,25 +1568,6 @@ void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
developer6100db22023-04-05 13:22:26 +0800382 struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
383 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
384 struct mt76_txwi_cache *r, dma_addr_t phys);
385-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q);
386-static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct)
387-{
388- struct page *page = virt_to_head_page(buf);
389-
390- page_pool_put_full_page(page->pp, page, allow_direct);
391-}
392-
393-static inline void *
394-mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size)
395-{
396- struct page *page;
397-
398- page = page_pool_dev_alloc_frag(q->page_pool, offset, size);
399- if (!page)
400- return NULL;
401-
402- return page_address(page) + *offset;
403-}
404
405 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
406 {
407diff --git a/mt7615/mcu.c b/mt7615/mcu.c
developer0443cd32023-09-19 14:11:49 +0800408index 955974a..db337aa 100644
developer6100db22023-04-05 13:22:26 +0800409--- a/mt7615/mcu.c
410+++ b/mt7615/mcu.c
411@@ -10,6 +10,7 @@
412 #include "mcu.h"
413 #include "mac.h"
414 #include "eeprom.h"
415+#include <linux/moduleparam.h>
416
417 static bool prefer_offload_fw = true;
418 module_param(prefer_offload_fw, bool, 0644);
419diff --git a/mt76_connac.h b/mt76_connac.h
developer0443cd32023-09-19 14:11:49 +0800420index 1f29d8c..6f5cf18 100644
developer6100db22023-04-05 13:22:26 +0800421--- a/mt76_connac.h
422+++ b/mt76_connac.h
developerbbd45e12023-05-19 08:22:06 +0800423@@ -56,7 +56,6 @@ enum {
developer6100db22023-04-05 13:22:26 +0800424 CMD_CBW_10MHZ,
425 CMD_CBW_5MHZ,
426 CMD_CBW_8080MHZ,
427- CMD_CBW_320MHZ,
428
429 CMD_HE_MCS_BW80 = 0,
430 CMD_HE_MCS_BW160,
developer0443cd32023-09-19 14:11:49 +0800431@@ -270,7 +269,6 @@ static inline u8 mt76_connac_chan_bw(struct cfg80211_chan_def *chandef)
developer6100db22023-04-05 13:22:26 +0800432 [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
433 [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
434 [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
435- [NL80211_CHAN_WIDTH_320] = CMD_CBW_320MHZ,
436 };
437
438 if (chandef->width >= ARRAY_SIZE(width_to_bw))
439diff --git a/mt76_connac_mcu.c b/mt76_connac_mcu.c
developer0443cd32023-09-19 14:11:49 +0800440index 3aef23a..f06a81d 100644
developer6100db22023-04-05 13:22:26 +0800441--- a/mt76_connac_mcu.c
442+++ b/mt76_connac_mcu.c
443@@ -4,6 +4,7 @@
444 #include <linux/firmware.h>
445 #include "mt76_connac2_mac.h"
446 #include "mt76_connac_mcu.h"
447+#include <linux/module.h>
448
449 int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option)
450 {
developer0443cd32023-09-19 14:11:49 +0800451@@ -1346,40 +1347,6 @@ u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
developer6100db22023-04-05 13:22:26 +0800452 }
453 EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode);
454
455-u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
456- enum nl80211_band band)
457-{
458- const struct ieee80211_sta_eht_cap *eht_cap;
459- struct ieee80211_supported_band *sband;
460- u8 mode = 0;
461-
462- if (band == NL80211_BAND_6GHZ)
463- mode |= PHY_MODE_AX_6G;
464-
465- sband = phy->hw->wiphy->bands[band];
466- eht_cap = ieee80211_get_eht_iftype_cap(sband, vif->type);
467-
468- if (!eht_cap || !eht_cap->has_eht)
469- return mode;
470-
471- switch (band) {
472- case NL80211_BAND_6GHZ:
473- mode |= PHY_MODE_BE_6G;
474- break;
475- case NL80211_BAND_5GHZ:
476- mode |= PHY_MODE_BE_5G;
477- break;
478- case NL80211_BAND_2GHZ:
479- mode |= PHY_MODE_BE_24G;
480- break;
481- default:
482- break;
483- }
484-
485- return mode;
486-}
487-EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode_ext);
488-
489 const struct ieee80211_sta_he_cap *
490 mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
491 {
developer0443cd32023-09-19 14:11:49 +0800492@@ -1395,18 +1362,6 @@ mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
developer6100db22023-04-05 13:22:26 +0800493 }
494 EXPORT_SYMBOL_GPL(mt76_connac_get_he_phy_cap);
495
496-const struct ieee80211_sta_eht_cap *
497-mt76_connac_get_eht_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
498-{
499- enum nl80211_band band = phy->chandef.chan->band;
500- struct ieee80211_supported_band *sband;
501-
502- sband = phy->hw->wiphy->bands[band];
503-
504- return ieee80211_get_eht_iftype_cap(sband, vif->type);
505-}
506-EXPORT_SYMBOL_GPL(mt76_connac_get_eht_phy_cap);
507-
508 #define DEFAULT_HE_PE_DURATION 4
509 #define DEFAULT_HE_DURATION_RTS_THRES 1023
510 static void
511diff --git a/mt76_connac_mcu.h b/mt76_connac_mcu.h
developer0443cd32023-09-19 14:11:49 +0800512index 6064973..ddf901a 100644
developer6100db22023-04-05 13:22:26 +0800513--- a/mt76_connac_mcu.h
514+++ b/mt76_connac_mcu.h
developer0443cd32023-09-19 14:11:49 +0800515@@ -1928,12 +1928,8 @@ void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val);
developer6100db22023-04-05 13:22:26 +0800516
517 const struct ieee80211_sta_he_cap *
518 mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
519-const struct ieee80211_sta_eht_cap *
520-mt76_connac_get_eht_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
521 u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
522 enum nl80211_band band, struct ieee80211_sta *sta);
523-u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
524- enum nl80211_band band);
525
526 int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
527 struct mt76_connac_sta_key_conf *sta_key_conf,
528diff --git a/mt7915/main.c b/mt7915/main.c
developer0443cd32023-09-19 14:11:49 +0800529index 96336b6..95ad05d 100644
developer6100db22023-04-05 13:22:26 +0800530--- a/mt7915/main.c
531+++ b/mt7915/main.c
developer0443cd32023-09-19 14:11:49 +0800532@@ -1405,22 +1405,20 @@ void mt7915_get_et_strings(struct ieee80211_hw *hw,
developer6100db22023-04-05 13:22:26 +0800533 struct ieee80211_vif *vif,
534 u32 sset, u8 *data)
535 {
536- if (sset != ETH_SS_STATS)
537- return;
developer6100db22023-04-05 13:22:26 +0800538+ if (sset == ETH_SS_STATS)
539+ memcpy(data, *mt7915_gstrings_stats,
540+ sizeof(mt7915_gstrings_stats));
developer0443cd32023-09-19 14:11:49 +0800541
542- memcpy(data, mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats));
543- data += sizeof(mt7915_gstrings_stats);
544- page_pool_ethtool_stats_get_strings(data);
developer6100db22023-04-05 13:22:26 +0800545 }
546
547 static
548 int mt7915_get_et_sset_count(struct ieee80211_hw *hw,
549 struct ieee80211_vif *vif, int sset)
550 {
551- if (sset != ETH_SS_STATS)
552- return 0;
553+ if (sset == ETH_SS_STATS)
554+ return MT7915_SSTATS_LEN;
555
556- return MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
557+ return 0;
558 }
559
560 static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
developer0443cd32023-09-19 14:11:49 +0800561@@ -1448,7 +1446,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
developer2157bf82023-06-26 02:27:49 +0800562 .idx = mvif->mt76.idx,
developer6100db22023-04-05 13:22:26 +0800563 };
developer6100db22023-04-05 13:22:26 +0800564 /* See mt7915_ampdu_stat_read_phy, etc */
565- int i, ei = 0, stats_size;
566+ int i, ei = 0;
567
568 mutex_lock(&dev->mt76.mutex);
569
developer0443cd32023-09-19 14:11:49 +0800570@@ -1560,12 +1558,9 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
developer6100db22023-04-05 13:22:26 +0800571 return;
572
573 ei += wi.worker_stat_count;
574-
575- mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
576-
577- stats_size = MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
578- if (ei != stats_size)
579- dev_err(dev->mt76.dev, "ei: %d size: %d", ei, stats_size);
580+ if (ei != MT7915_SSTATS_LEN)
581+ dev_err(dev->mt76.dev, "ei: %d MT7915_SSTATS_LEN: %d",
582+ ei, (int)MT7915_SSTATS_LEN);
583 }
584
585 static void
586diff --git a/mt7915/mcu.c b/mt7915/mcu.c
developer0443cd32023-09-19 14:11:49 +0800587index 9a79119..e9d7f20 100644
developer6100db22023-04-05 13:22:26 +0800588--- a/mt7915/mcu.c
589+++ b/mt7915/mcu.c
590@@ -6,6 +6,7 @@
591 #include "mcu.h"
592 #include "mac.h"
593 #include "eeprom.h"
594+#include <linux/moduleparam.h>
595
596 #define fw_name(_dev, name, ...) ({ \
597 char *_fw; \
598diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developer0443cd32023-09-19 14:11:49 +0800599index 5509661..10f4e66 100644
developer6100db22023-04-05 13:22:26 +0800600--- a/mt7915/mmio.c
601+++ b/mt7915/mmio.c
developer0443cd32023-09-19 14:11:49 +0800602@@ -608,9 +608,13 @@ static void mt7915_mmio_wed_offload_disable(struct mtk_wed_device *wed)
developer6100db22023-04-05 13:22:26 +0800603 static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
604 {
605 struct mt7915_dev *dev;
606+ u32 length;
607 int i;
608
609 dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
610+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
611+ sizeof(struct skb_shared_info));
612+
613 for (i = 0; i < dev->mt76.rx_token_size; i++) {
614 struct mt76_txwi_cache *t;
615
developer0443cd32023-09-19 14:11:49 +0800616@@ -618,7 +622,9 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
developer6100db22023-04-05 13:22:26 +0800617 if (!t || !t->ptr)
618 continue;
619
620- mt76_put_page_pool_buf(t->ptr, false);
621+ dma_unmap_single(dev->mt76.dma_dev, t->dma_addr,
622+ wed->wlan.rx_size, DMA_FROM_DEVICE);
623+ __free_pages(virt_to_page(t->ptr), get_order(length));
624 t->ptr = NULL;
625
626 mt76_put_rxwi(&dev->mt76, t);
developer0443cd32023-09-19 14:11:49 +0800627@@ -630,38 +636,47 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
developer6100db22023-04-05 13:22:26 +0800628 static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
629 {
630 struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
631- struct mt76_txwi_cache *t = NULL;
632 struct mt7915_dev *dev;
633- struct mt76_queue *q;
634- int i, len;
635+ u32 length;
636+ int i;
637
638 dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
639- q = &dev->mt76.q_rx[MT_RXQ_MAIN];
640- len = SKB_WITH_OVERHEAD(q->buf_size);
641+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
642+ sizeof(struct skb_shared_info));
643
644 for (i = 0; i < size; i++) {
645- enum dma_data_direction dir;
646- dma_addr_t addr;
647- u32 offset;
648+ struct mt76_txwi_cache *t = mt76_get_rxwi(&dev->mt76);
649+ dma_addr_t phy_addr;
650+ struct page *page;
651 int token;
652- void *buf;
653+ void *ptr;
654
655- t = mt76_get_rxwi(&dev->mt76);
656 if (!t)
657 goto unmap;
658
659- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
660- if (!buf)
661+ page = __dev_alloc_pages(GFP_KERNEL, get_order(length));
662+ if (!page) {
663+ mt76_put_rxwi(&dev->mt76, t);
664 goto unmap;
665+ }
666
667- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
668- dir = page_pool_get_dma_dir(q->page_pool);
669- dma_sync_single_for_device(dev->mt76.dma_dev, addr, len, dir);
670+ ptr = page_address(page);
671+ phy_addr = dma_map_single(dev->mt76.dma_dev, ptr,
672+ wed->wlan.rx_size,
673+ DMA_TO_DEVICE);
674+ if (unlikely(dma_mapping_error(dev->mt76.dev, phy_addr))) {
675+ __free_pages(page, get_order(length));
676+ mt76_put_rxwi(&dev->mt76, t);
677+ goto unmap;
678+ }
679
680- desc->buf0 = cpu_to_le32(addr);
681- token = mt76_rx_token_consume(&dev->mt76, buf, t, addr);
682+ desc->buf0 = cpu_to_le32(phy_addr);
683+ token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr);
684 if (token < 0) {
685- mt76_put_page_pool_buf(buf, false);
686+ dma_unmap_single(dev->mt76.dma_dev, phy_addr,
687+ wed->wlan.rx_size, DMA_TO_DEVICE);
688+ __free_pages(page, get_order(length));
689+ mt76_put_rxwi(&dev->mt76, t);
690 goto unmap;
691 }
692
developer0443cd32023-09-19 14:11:49 +0800693@@ -673,8 +688,6 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
developer6100db22023-04-05 13:22:26 +0800694 return 0;
695
696 unmap:
697- if (t)
698- mt76_put_rxwi(&dev->mt76, t);
699 mt7915_mmio_wed_release_rx_buf(wed);
700 return -ENOMEM;
701 }
developer6100db22023-04-05 13:22:26 +0800702diff --git a/usb.c b/usb.c
developer0443cd32023-09-19 14:11:49 +0800703index 5e5c7bf..3e28171 100644
developer6100db22023-04-05 13:22:26 +0800704--- a/usb.c
705+++ b/usb.c
706@@ -319,27 +319,29 @@ mt76u_set_endpoints(struct usb_interface *intf,
707
708 static int
709 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
710- int nsgs)
711+ int nsgs, gfp_t gfp)
712 {
713 int i;
714
715 for (i = 0; i < nsgs; i++) {
716+ struct page *page;
717 void *data;
718 int offset;
719
720- data = mt76_get_page_pool_buf(q, &offset, q->buf_size);
721+ data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
722 if (!data)
723 break;
724
725- sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size,
726- offset);
727+ page = virt_to_head_page(data);
728+ offset = data - page_address(page);
729+ sg_set_page(&urb->sg[i], page, q->buf_size, offset);
730 }
731
732 if (i < nsgs) {
733 int j;
734
735 for (j = nsgs; j < urb->num_sgs; j++)
736- mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false);
737+ skb_free_frag(sg_virt(&urb->sg[j]));
738 urb->num_sgs = i;
739 }
740
741@@ -352,16 +354,15 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
742
743 static int
744 mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
745- struct urb *urb, int nsgs)
746+ struct urb *urb, int nsgs, gfp_t gfp)
747 {
748 enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
749- int offset;
750
751 if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
752- return mt76u_fill_rx_sg(dev, q, urb, nsgs);
753+ return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
754
755 urb->transfer_buffer_length = q->buf_size;
756- urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size);
757+ urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
758
759 return urb->transfer_buffer ? 0 : -ENOMEM;
760 }
761@@ -399,7 +400,7 @@ mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
762 if (err)
763 return err;
764
765- return mt76u_refill_rx(dev, q, e->urb, sg_size);
766+ return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
767 }
768
769 static void mt76u_urb_free(struct urb *urb)
770@@ -407,10 +408,10 @@ static void mt76u_urb_free(struct urb *urb)
771 int i;
772
773 for (i = 0; i < urb->num_sgs; i++)
774- mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false);
775+ skb_free_frag(sg_virt(&urb->sg[i]));
776
777 if (urb->transfer_buffer)
778- mt76_put_page_pool_buf(urb->transfer_buffer, false);
779+ skb_free_frag(urb->transfer_buffer);
780
781 usb_free_urb(urb);
782 }
783@@ -546,8 +547,6 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
784 len -= data_len;
785 nsgs++;
786 }
787-
788- skb_mark_for_recycle(skb);
789 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL);
790
791 return nsgs;
792@@ -613,7 +612,7 @@ mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
793
794 count = mt76u_process_rx_entry(dev, urb, q->buf_size);
795 if (count > 0) {
796- err = mt76u_refill_rx(dev, q, urb, count);
797+ err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
798 if (err < 0)
799 break;
800 }
801@@ -664,10 +663,6 @@ mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
802 struct mt76_queue *q = &dev->q_rx[qid];
803 int i, err;
804
805- err = mt76_create_page_pool(dev, q);
806- if (err)
807- return err;
808-
809 spin_lock_init(&q->lock);
810 q->entry = devm_kcalloc(dev->dev,
811 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
812@@ -696,6 +691,7 @@ EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
813 static void
814 mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
815 {
816+ struct page *page;
817 int i;
818
819 for (i = 0; i < q->ndesc; i++) {
820@@ -705,8 +701,13 @@ mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
821 mt76u_urb_free(q->entry[i].urb);
822 q->entry[i].urb = NULL;
823 }
824- page_pool_destroy(q->page_pool);
825- q->page_pool = NULL;
826+
827+ if (!q->rx_page.va)
828+ return;
829+
830+ page = virt_to_page(q->rx_page.va);
831+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
832+ memset(&q->rx_page, 0, sizeof(q->rx_page));
833 }
834
835 static void mt76u_free_rx(struct mt76_dev *dev)
836--
developer0443cd32023-09-19 14:11:49 +08008372.18.0
developer6100db22023-04-05 13:22:26 +0800838