blob: cb45df51649d1d0dddd4cefde1d02249a153af1f [file] [log] [blame]
developerc9233442023-04-04 06:06:17 +08001From 2a3d2190a9728a80563463420a329a732ac38a8e Mon Sep 17 00:00:00 2001
2From: Evelyn Tsai <evelyn.tsai@mediatek.com>
3Date: Sat, 1 Apr 2023 08:18:17 +0800
4Subject: [PATCH] wifi: mt76: mt7996: for build pass
5
6---
7 debugfs.c | 2 +
8 dma.c | 76 +++++++++++----------
9 eeprom.c | 8 ++-
10 mac80211.c | 61 +----------------
11 mcu.c | 1 +
12 mt76.h | 22 +-----
13 mt7615/dma.c | 4 +-
14 mt7615/main.c | 6 +-
15 mt7615/mcu.c | 9 +--
16 mt76_connac.h | 2 -
17 mt76_connac_mcu.c | 155 +++++++++++++++---------------------------
18 mt76_connac_mcu.h | 4 --
19 mt76x02_mac.c | 6 +-
20 mt7915/debugfs.c | 4 +-
21 mt7915/dma.c | 4 +-
22 mt7915/init.c | 3 +-
23 mt7915/mac.c | 2 +-
24 mt7915/main.c | 36 +++++-----
25 mt7915/mcu.c | 167 +++++++++++++++++++++++-----------------------
26 mt7915/mmio.c | 55 +++++++++------
27 mt7921/main.c | 31 ++-------
28 tx.c | 11 +--
29 usb.c | 43 ++++++------
30 23 files changed, 286 insertions(+), 426 deletions(-)
31
32diff --git a/debugfs.c b/debugfs.c
33index 79064a4d..4a8e1864 100644
34--- a/debugfs.c
35+++ b/debugfs.c
36@@ -33,8 +33,10 @@ mt76_napi_threaded_set(void *data, u64 val)
37 if (!mt76_is_mmio(dev))
38 return -EOPNOTSUPP;
39
40+#if 0 /* disable in backport 5.15 */
41 if (dev->napi_dev.threaded != val)
42 return dev_set_threaded(&dev->napi_dev, val);
43+#endif
44
45 return 0;
46 }
47diff --git a/dma.c b/dma.c
48index df2ca73f..d4829376 100644
49--- a/dma.c
50+++ b/dma.c
51@@ -173,7 +173,7 @@ mt76_free_pending_rxwi(struct mt76_dev *dev)
52 local_bh_disable();
53 while ((t = __mt76_get_rxwi(dev)) != NULL) {
54 if (t->ptr)
55- mt76_put_page_pool_buf(t->ptr, false);
56+ skb_free_frag(t->ptr);
57 kfree(t);
58 }
59 local_bh_enable();
60@@ -409,9 +409,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
61 if (!t)
62 return NULL;
63
64- dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
65- SKB_WITH_OVERHEAD(q->buf_size),
66- page_pool_get_dma_dir(q->page_pool));
67+ dma_unmap_single(dev->dma_dev, t->dma_addr,
68+ SKB_WITH_OVERHEAD(q->buf_size),
69+ DMA_FROM_DEVICE);
70
71 buf = t->ptr;
72 t->dma_addr = 0;
73@@ -430,9 +430,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
74 } else {
75 buf = e->buf;
76 e->buf = NULL;
77- dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
78- SKB_WITH_OVERHEAD(q->buf_size),
79- page_pool_get_dma_dir(q->page_pool));
80+ dma_unmap_single(dev->dma_dev, e->dma_addr[0],
81+ SKB_WITH_OVERHEAD(q->buf_size),
82+ DMA_FROM_DEVICE);
83 }
84
85 return buf;
86@@ -584,11 +584,11 @@ free_skb:
87 }
88
89 static int
90-mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
91- bool allow_direct)
92+mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
93 {
94 int len = SKB_WITH_OVERHEAD(q->buf_size);
95- int frames = 0;
96+ int frames = 0, offset = q->buf_offset;
97+ dma_addr_t addr;
98
99 if (!q->ndesc)
100 return 0;
101@@ -596,25 +596,26 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
102 spin_lock_bh(&q->lock);
103
104 while (q->queued < q->ndesc - 1) {
105- enum dma_data_direction dir;
106 struct mt76_queue_buf qbuf;
107- dma_addr_t addr;
108- int offset;
109- void *buf;
110+ void *buf = NULL;
111
112- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
113+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
114 if (!buf)
115 break;
116
117- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
118- dir = page_pool_get_dma_dir(q->page_pool);
119- dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
120+ addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
121+ if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
122+ skb_free_frag(buf);
123+ break;
124+ }
125
126- qbuf.addr = addr + q->buf_offset;
127- qbuf.len = len - q->buf_offset;
128+ qbuf.addr = addr + offset;
129+ qbuf.len = len - offset;
130 qbuf.skip_unmap = false;
131 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
132- mt76_put_page_pool_buf(buf, allow_direct);
133+ dma_unmap_single(dev->dma_dev, addr, len,
134+ DMA_FROM_DEVICE);
135+ skb_free_frag(buf);
136 break;
137 }
138 frames++;
139@@ -658,7 +659,7 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
140 /* WED txfree queue needs ring to be initialized before setup */
141 q->flags = 0;
142 mt76_dma_queue_reset(dev, q);
143- mt76_dma_rx_fill(dev, q, false);
144+ mt76_dma_rx_fill(dev, q);
145 q->flags = flags;
146
147 ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
148@@ -706,10 +707,6 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
149 if (!q->entry)
150 return -ENOMEM;
151
152- ret = mt76_create_page_pool(dev, q);
153- if (ret)
154- return ret;
155-
156 ret = mt76_dma_wed_setup(dev, q, false);
157 if (ret)
158 return ret;
159@@ -723,6 +720,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
160 static void
161 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
162 {
163+ struct page *page;
164 void *buf;
165 bool more;
166
167@@ -736,7 +734,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
168 if (!buf)
169 break;
170
171- mt76_put_page_pool_buf(buf, false);
172+ skb_free_frag(buf);
173 } while (1);
174
175 if (q->rx_head) {
176@@ -745,6 +743,13 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
177 }
178
179 spin_unlock_bh(&q->lock);
180+
181+ if (!q->rx_page.va)
182+ return;
183+
184+ page = virt_to_page(q->rx_page.va);
185+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
186+ memset(&q->rx_page, 0, sizeof(q->rx_page));
187 }
188
189 static void
190@@ -765,7 +770,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
191 mt76_dma_wed_setup(dev, q, true);
192 if (q->flags != MT_WED_Q_TXFREE) {
193 mt76_dma_sync_idx(dev, q);
194- mt76_dma_rx_fill(dev, q, false);
195+ mt76_dma_rx_fill(dev, q);
196 }
197 }
198
199@@ -783,7 +788,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
200
201 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
202 } else {
203- mt76_put_page_pool_buf(data, true);
204+ skb_free_frag(data);
205 }
206
207 if (more)
208@@ -851,12 +856,11 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
209 !(dev->drv->rx_check(dev, data, len)))
210 goto free_frag;
211
212- skb = napi_build_skb(data, q->buf_size);
213+ skb = build_skb(data, q->buf_size);
214 if (!skb)
215 goto free_frag;
216
217 skb_reserve(skb, q->buf_offset);
218- skb_mark_for_recycle(skb);
219
220 *(u32 *)skb->cb = info;
221
222@@ -872,10 +876,10 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
223 continue;
224
225 free_frag:
226- mt76_put_page_pool_buf(data, true);
227+ skb_free_frag(data);
228 }
229
230- mt76_dma_rx_fill(dev, q, true);
231+ mt76_dma_rx_fill(dev, q);
232 return done;
233 }
234
235@@ -919,8 +923,8 @@ mt76_dma_init(struct mt76_dev *dev,
236 init_completion(&dev->mmio.wed_reset_complete);
237
238 mt76_for_each_q_rx(dev, i) {
239- netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
240- mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
241+ netif_napi_add(&dev->napi_dev, &dev->napi[i], poll, 64);
242+ mt76_dma_rx_fill(dev, &dev->q_rx[i]);
243 napi_enable(&dev->napi[i]);
244 }
245
246@@ -971,8 +975,6 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
247
248 netif_napi_del(&dev->napi[i]);
249 mt76_dma_rx_cleanup(dev, q);
250-
251- page_pool_destroy(q->page_pool);
252 }
253
254 mt76_free_pending_txwi(dev);
255diff --git a/eeprom.c b/eeprom.c
256index ea54b7af..90d36c8d 100644
257--- a/eeprom.c
258+++ b/eeprom.c
259@@ -106,9 +106,15 @@ void
260 mt76_eeprom_override(struct mt76_phy *phy)
261 {
262 struct mt76_dev *dev = phy->dev;
263+#ifdef CONFIG_OF
264 struct device_node *np = dev->dev->of_node;
265+ const u8 *mac = NULL;
266
267- of_get_mac_address(np, phy->macaddr);
268+ if (np)
269+ mac = of_get_mac_address(np);
270+ if (!IS_ERR_OR_NULL(mac))
271+ ether_addr_copy(phy->macaddr, mac);
272+#endif
273
274 if (!is_valid_ether_addr(phy->macaddr)) {
275 eth_random_addr(phy->macaddr);
276diff --git a/mac80211.c b/mac80211.c
277index 87902f4b..46e35668 100644
278--- a/mac80211.c
279+++ b/mac80211.c
280@@ -4,7 +4,6 @@
281 */
282 #include <linux/sched.h>
283 #include <linux/of.h>
284-#include <net/page_pool.h>
285 #include "mt76.h"
286
287 #define CHAN2G(_idx, _freq) { \
288@@ -562,47 +561,6 @@ void mt76_unregister_phy(struct mt76_phy *phy)
289 }
290 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
291
292-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
293-{
294- struct page_pool_params pp_params = {
295- .order = 0,
296- .flags = PP_FLAG_PAGE_FRAG,
297- .nid = NUMA_NO_NODE,
298- .dev = dev->dma_dev,
299- };
300- int idx = q - dev->q_rx;
301-
302- switch (idx) {
303- case MT_RXQ_MAIN:
304- case MT_RXQ_BAND1:
305- case MT_RXQ_BAND2:
306- pp_params.pool_size = 256;
307- break;
308- default:
309- pp_params.pool_size = 16;
310- break;
311- }
312-
313- if (mt76_is_mmio(dev)) {
314- /* rely on page_pool for DMA mapping */
315- pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
316- pp_params.dma_dir = DMA_FROM_DEVICE;
317- pp_params.max_len = PAGE_SIZE;
318- pp_params.offset = 0;
319- }
320-
321- q->page_pool = page_pool_create(&pp_params);
322- if (IS_ERR(q->page_pool)) {
323- int err = PTR_ERR(q->page_pool);
324-
325- q->page_pool = NULL;
326- return err;
327- }
328-
329- return 0;
330-}
331-EXPORT_SYMBOL_GPL(mt76_create_page_pool);
332-
333 struct mt76_dev *
334 mt76_alloc_device(struct device *pdev, unsigned int size,
335 const struct ieee80211_ops *ops,
336@@ -1547,7 +1505,7 @@ EXPORT_SYMBOL_GPL(mt76_get_sar_power);
337 static void
338 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
339 {
340- if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
341+ if (vif->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
342 ieee80211_csa_finish(vif);
343 }
344
345@@ -1569,7 +1527,7 @@ __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
346 {
347 struct mt76_dev *dev = priv;
348
349- if (!vif->bss_conf.csa_active)
350+ if (!vif->csa_active)
351 return;
352
353 dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
354@@ -1741,21 +1699,6 @@ void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
355 }
356 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
357
358-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
359-{
360-#ifdef CONFIG_PAGE_POOL_STATS
361- struct page_pool_stats stats = {};
362- int i;
363-
364- mt76_for_each_q_rx(dev, i)
365- page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
366-
367- page_pool_ethtool_stats_get(data, &stats);
368- *index += page_pool_ethtool_stats_get_count();
369-#endif
370-}
371-EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
372-
373 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
374 {
375 struct ieee80211_hw *hw = phy->hw;
376diff --git a/mcu.c b/mcu.c
377index a8cafa39..fa4b0544 100644
378--- a/mcu.c
379+++ b/mcu.c
380@@ -4,6 +4,7 @@
381 */
382
383 #include "mt76.h"
384+#include <linux/moduleparam.h>
385
386 struct sk_buff *
387 __mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
388diff --git a/mt76.h b/mt76.h
389index 183b0fc5..856dacbc 100644
390--- a/mt76.h
391+++ b/mt76.h
392@@ -202,7 +202,7 @@ struct mt76_queue {
393
394 dma_addr_t desc_dma;
395 struct sk_buff *rx_head;
396- struct page_pool *page_pool;
397+ struct page_frag_cache rx_page;
398 };
399
400 struct mt76_mcu_ops {
401@@ -1319,7 +1319,6 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
402 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
403 }
404
405-void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index);
406 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
407 struct mt76_sta_stats *stats, bool eht);
408 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
409@@ -1431,25 +1430,6 @@ void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
410 struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
411 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
412 struct mt76_txwi_cache *r, dma_addr_t phys);
413-int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q);
414-static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct)
415-{
416- struct page *page = virt_to_head_page(buf);
417-
418- page_pool_put_full_page(page->pp, page, allow_direct);
419-}
420-
421-static inline void *
422-mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size)
423-{
424- struct page *page;
425-
426- page = page_pool_dev_alloc_frag(q->page_pool, offset, size);
427- if (!page)
428- return NULL;
429-
430- return page_address(page) + *offset;
431-}
432
433 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
434 {
435diff --git a/mt7615/dma.c b/mt7615/dma.c
436index f1914431..ec729dbe 100644
437--- a/mt7615/dma.c
438+++ b/mt7615/dma.c
439@@ -281,8 +281,8 @@ int mt7615_dma_init(struct mt7615_dev *dev)
440 if (ret < 0)
441 return ret;
442
443- netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
444- mt7615_poll_tx);
445+ netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
446+ mt7615_poll_tx, NAPI_POLL_WEIGHT);
447 napi_enable(&dev->mt76.tx_napi);
448
449 mt76_poll(dev, MT_WPDMA_GLO_CFG,
450diff --git a/mt7615/main.c b/mt7615/main.c
451index ab4c1b44..8fb5b256 100644
452--- a/mt7615/main.c
453+++ b/mt7615/main.c
454@@ -474,7 +474,7 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
455
456 static int
457 mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
458- unsigned int link_id, u16 queue,
459+ u16 queue,
460 const struct ieee80211_tx_queue_params *params)
461 {
462 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
463@@ -556,7 +556,7 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
464 static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
465 struct ieee80211_vif *vif,
466 struct ieee80211_bss_conf *info,
467- u64 changed)
468+ u32 changed)
469 {
470 struct mt7615_dev *dev = mt7615_hw_dev(hw);
471 struct mt7615_phy *phy = mt7615_hw_phy(hw);
472@@ -599,7 +599,7 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
473 }
474
475 if (changed & BSS_CHANGED_ASSOC)
476- mt7615_mac_set_beacon_filter(phy, vif, vif->cfg.assoc);
477+ mt7615_mac_set_beacon_filter(phy, vif, info->assoc);
478
479 mt7615_mutex_release(dev);
480 }
481diff --git a/mt7615/mcu.c b/mt7615/mcu.c
482index eea398c7..39e81d26 100644
483--- a/mt7615/mcu.c
484+++ b/mt7615/mcu.c
485@@ -10,6 +10,7 @@
486 #include "mcu.h"
487 #include "mac.h"
488 #include "eeprom.h"
489+#include <linux/moduleparam.h>
490
491 static bool prefer_offload_fw = true;
492 module_param(prefer_offload_fw, bool, 0644);
493@@ -352,7 +353,7 @@ out:
494 static void
495 mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
496 {
497- if (vif->bss_conf.csa_active)
498+ if (vif->csa_active)
499 ieee80211_csa_finish(vif);
500 }
501
502@@ -698,7 +699,7 @@ mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev,
503 if (!enable)
504 goto out;
505
506- skb = ieee80211_beacon_get_template(hw, vif, &offs, 0);
507+ skb = ieee80211_beacon_get_template(hw, vif, &offs);
508 if (!skb)
509 return -EINVAL;
510
511@@ -1073,7 +1074,7 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev,
512 if (!enable)
513 goto out;
514
515- skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs, 0);
516+ skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs);
517 if (!skb)
518 return -EINVAL;
519
520@@ -2524,7 +2525,7 @@ int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
521 u8 pad;
522 } req = {
523 .bss_idx = mvif->mt76.idx,
524- .aid = cpu_to_le16(vif->cfg.aid),
525+ .aid = cpu_to_le16(vif->bss_conf.aid),
526 .dtim_period = vif->bss_conf.dtim_period,
527 .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
528 };
529diff --git a/mt76_connac.h b/mt76_connac.h
530index b339c50b..2ee9a3c8 100644
531--- a/mt76_connac.h
532+++ b/mt76_connac.h
533@@ -42,7 +42,6 @@ enum {
534 CMD_CBW_10MHZ,
535 CMD_CBW_5MHZ,
536 CMD_CBW_8080MHZ,
537- CMD_CBW_320MHZ,
538
539 CMD_HE_MCS_BW80 = 0,
540 CMD_HE_MCS_BW160,
541@@ -240,7 +239,6 @@ static inline u8 mt76_connac_chan_bw(struct cfg80211_chan_def *chandef)
542 [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
543 [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
544 [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
545- [NL80211_CHAN_WIDTH_320] = CMD_CBW_320MHZ,
546 };
547
548 if (chandef->width >= ARRAY_SIZE(width_to_bw))
549diff --git a/mt76_connac_mcu.c b/mt76_connac_mcu.c
550index efb9bfaa..fd14221a 100644
551--- a/mt76_connac_mcu.c
552+++ b/mt76_connac_mcu.c
553@@ -4,6 +4,7 @@
554 #include <linux/firmware.h>
555 #include "mt76_connac2_mac.h"
556 #include "mt76_connac_mcu.h"
557+#include <linux/module.h>
558
559 int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option)
560 {
561@@ -196,7 +197,7 @@ int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif)
562 */
563 } req = {
564 .bss_idx = mvif->idx,
565- .ps_state = vif->cfg.ps ? 2 : 0,
566+ .ps_state = vif->bss_conf.ps ? 2 : 0,
567 };
568
569 if (vif->type != NL80211_IFTYPE_STATION)
570@@ -407,7 +408,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
571 else
572 conn_type = CONNECTION_INFRA_AP;
573 basic->conn_type = cpu_to_le32(conn_type);
574- basic->aid = cpu_to_le16(vif->cfg.aid);
575+ basic->aid = cpu_to_le16(vif->bss_conf.aid);
576 break;
577 case NL80211_IFTYPE_ADHOC:
578 basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
579@@ -551,7 +552,7 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
580
581 if (sta) {
582 if (vif->type == NL80211_IFTYPE_STATION)
583- generic->partial_aid = cpu_to_le16(vif->cfg.aid);
584+ generic->partial_aid = cpu_to_le16(vif->bss_conf.aid);
585 else
586 generic->partial_aid = cpu_to_le16(sta->aid);
587 memcpy(generic->peer_addr, sta->addr, ETH_ALEN);
588@@ -597,14 +598,14 @@ mt76_connac_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
589 vif->type != NL80211_IFTYPE_STATION)
590 return;
591
592- if (!sta->deflink.agg.max_amsdu_len)
593+ if (!sta->max_amsdu_len)
594 return;
595
596 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
597 amsdu = (struct sta_rec_amsdu *)tlv;
598 amsdu->max_amsdu_num = 8;
599 amsdu->amsdu_en = true;
600- amsdu->max_mpdu_size = sta->deflink.agg.max_amsdu_len >=
601+ amsdu->max_mpdu_size = sta->max_amsdu_len >=
602 IEEE80211_MAX_MPDU_LEN_VHT_7991;
603
604 wcid->amsdu = true;
605@@ -615,7 +616,7 @@ mt76_connac_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
606 static void
607 mt76_connac_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
608 {
609- struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
610+ struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
611 struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
612 struct sta_rec_he *he;
613 struct tlv *tlv;
614@@ -703,7 +704,7 @@ mt76_connac_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
615
616 he->he_cap = cpu_to_le32(cap);
617
618- switch (sta->deflink.bandwidth) {
619+ switch (sta->bandwidth) {
620 case IEEE80211_STA_RX_BW_160:
621 if (elem->phy_cap_info[0] &
622 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
623@@ -748,7 +749,7 @@ mt76_connac_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
624 static void
625 mt76_connac_mcu_sta_he_tlv_v2(struct sk_buff *skb, struct ieee80211_sta *sta)
626 {
627- struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
628+ struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
629 struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
630 struct sta_rec_he_v2 *he;
631 struct tlv *tlv;
632@@ -759,7 +760,7 @@ mt76_connac_mcu_sta_he_tlv_v2(struct sk_buff *skb, struct ieee80211_sta *sta)
633 memcpy(he->he_phy_cap, elem->phy_cap_info, sizeof(he->he_phy_cap));
634 memcpy(he->he_mac_cap, elem->mac_cap_info, sizeof(he->he_mac_cap));
635
636- switch (sta->deflink.bandwidth) {
637+ switch (sta->bandwidth) {
638 case IEEE80211_STA_RX_BW_160:
639 if (elem->phy_cap_info[0] &
640 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
641@@ -775,7 +776,7 @@ mt76_connac_mcu_sta_he_tlv_v2(struct sk_buff *skb, struct ieee80211_sta *sta)
642 break;
643 }
644
645- he->pkt_ext = IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US;
646+ he->pkt_ext = IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US;
647 }
648
649 static u8
650@@ -788,9 +789,9 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif,
651 u8 mode = 0;
652
653 if (sta) {
654- ht_cap = &sta->deflink.ht_cap;
655- vht_cap = &sta->deflink.vht_cap;
656- he_cap = &sta->deflink.he_cap;
657+ ht_cap = &sta->ht_cap;
658+ vht_cap = &sta->vht_cap;
659+ he_cap = &sta->he_cap;
660 } else {
661 struct ieee80211_supported_band *sband;
662
663@@ -839,25 +840,25 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
664 u16 supp_rates;
665
666 /* starec ht */
667- if (sta->deflink.ht_cap.ht_supported) {
668+ if (sta->ht_cap.ht_supported) {
669 struct sta_rec_ht *ht;
670
671 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
672 ht = (struct sta_rec_ht *)tlv;
673- ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap);
674+ ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
675 }
676
677 /* starec vht */
678- if (sta->deflink.vht_cap.vht_supported) {
679+ if (sta->vht_cap.vht_supported) {
680 struct sta_rec_vht *vht;
681 int len;
682
683 len = is_mt7921(dev) ? sizeof(*vht) : sizeof(*vht) - 4;
684 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, len);
685 vht = (struct sta_rec_vht *)tlv;
686- vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap);
687- vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map;
688- vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map;
689+ vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
690+ vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
691+ vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
692 }
693
694 /* starec uapsd */
695@@ -866,11 +867,11 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
696 if (!is_mt7921(dev))
697 return;
698
699- if (sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)
700+ if (sta->ht_cap.ht_supported || sta->he_cap.has_he)
701 mt76_connac_mcu_sta_amsdu_tlv(skb, sta, vif);
702
703 /* starec he */
704- if (sta->deflink.he_cap.has_he) {
705+ if (sta->he_cap.has_he) {
706 mt76_connac_mcu_sta_he_tlv(skb, sta);
707 mt76_connac_mcu_sta_he_tlv_v2(skb, sta);
708 if (band == NL80211_BAND_6GHZ &&
709@@ -880,7 +881,7 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
710 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_6G,
711 sizeof(*he_6g_capa));
712 he_6g_capa = (struct sta_rec_he_6g_capa *)tlv;
713- he_6g_capa->capa = sta->deflink.he_6ghz_capa.capa;
714+ he_6g_capa->capa = sta->he_6ghz_capa.capa;
715 }
716 }
717
718@@ -890,14 +891,14 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
719 phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
720 phy->rcpi = rcpi;
721 phy->ampdu = FIELD_PREP(IEEE80211_HT_AMPDU_PARM_FACTOR,
722- sta->deflink.ht_cap.ampdu_factor) |
723+ sta->ht_cap.ampdu_factor) |
724 FIELD_PREP(IEEE80211_HT_AMPDU_PARM_DENSITY,
725- sta->deflink.ht_cap.ampdu_density);
726+ sta->ht_cap.ampdu_density);
727
728 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info));
729 ra_info = (struct sta_rec_ra_info *)tlv;
730
731- supp_rates = sta->deflink.supp_rates[band];
732+ supp_rates = sta->supp_rates[band];
733 if (band == NL80211_BAND_2GHZ)
734 supp_rates = FIELD_PREP(RA_LEGACY_OFDM, supp_rates >> 4) |
735 FIELD_PREP(RA_LEGACY_CCK, supp_rates & 0xf);
736@@ -906,18 +907,18 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
737
738 ra_info->legacy = cpu_to_le16(supp_rates);
739
740- if (sta->deflink.ht_cap.ht_supported)
741+ if (sta->ht_cap.ht_supported)
742 memcpy(ra_info->rx_mcs_bitmask,
743- sta->deflink.ht_cap.mcs.rx_mask,
744+ sta->ht_cap.mcs.rx_mask,
745 HT_MCS_MASK_NUM);
746
747 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_STATE, sizeof(*state));
748 state = (struct sta_rec_state *)tlv;
749 state->state = sta_state;
750
751- if (sta->deflink.vht_cap.vht_supported) {
752- state->vht_opmode = sta->deflink.bandwidth;
753- state->vht_opmode |= (sta->deflink.rx_nss - 1) <<
754+ if (sta->vht_cap.vht_supported) {
755+ state->vht_opmode = sta->bandwidth;
756+ state->vht_opmode |= (sta->rx_nss - 1) <<
757 IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
758 }
759 }
760@@ -933,7 +934,7 @@ void mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb,
761 tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
762 wtbl_tlv, sta_wtbl);
763 smps = (struct wtbl_smps *)tlv;
764- smps->smps = (sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC);
765+ smps->smps = (sta->smps_mode == IEEE80211_SMPS_DYNAMIC);
766 }
767 EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_smps_tlv);
768
769@@ -945,27 +946,27 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
770 struct tlv *tlv;
771 u32 flags = 0;
772
773- if (sta->deflink.ht_cap.ht_supported || sta->deflink.he_6ghz_capa.capa) {
774+ if (sta->ht_cap.ht_supported || sta->he_6ghz_capa.capa) {
775 tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
776 wtbl_tlv, sta_wtbl);
777 ht = (struct wtbl_ht *)tlv;
778 ht->ldpc = ht_ldpc &&
779- !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
780+ !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
781
782- if (sta->deflink.ht_cap.ht_supported) {
783- ht->af = sta->deflink.ht_cap.ampdu_factor;
784- ht->mm = sta->deflink.ht_cap.ampdu_density;
785+ if (sta->ht_cap.ht_supported) {
786+ ht->af = sta->ht_cap.ampdu_factor;
787+ ht->mm = sta->ht_cap.ampdu_density;
788 } else {
789- ht->af = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
790+ ht->af = le16_get_bits(sta->he_6ghz_capa.capa,
791 IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
792- ht->mm = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
793+ ht->mm = le16_get_bits(sta->he_6ghz_capa.capa,
794 IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
795 }
796
797 ht->ht = true;
798 }
799
800- if (sta->deflink.vht_cap.vht_supported || sta->deflink.he_6ghz_capa.capa) {
801+ if (sta->vht_cap.vht_supported || sta->he_6ghz_capa.capa) {
802 struct wtbl_vht *vht;
803 u8 af;
804
805@@ -974,18 +975,18 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
806 sta_wtbl);
807 vht = (struct wtbl_vht *)tlv;
808 vht->ldpc = vht_ldpc &&
809- !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
810+ !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
811 vht->vht = true;
812
813 af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
814- sta->deflink.vht_cap.cap);
815+ sta->vht_cap.cap);
816 if (ht)
817 ht->af = max(ht->af, af);
818 }
819
820 mt76_connac_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_tlv);
821
822- if (is_connac_v1(dev) && sta->deflink.ht_cap.ht_supported) {
823+ if (is_connac_v1(dev) && sta->ht_cap.ht_supported) {
824 /* sgi */
825 u32 msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 |
826 MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160;
827@@ -995,15 +996,15 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
828 sizeof(*raw), wtbl_tlv,
829 sta_wtbl);
830
831- if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
832+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
833 flags |= MT_WTBL_W5_SHORT_GI_20;
834- if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
835+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
836 flags |= MT_WTBL_W5_SHORT_GI_40;
837
838- if (sta->deflink.vht_cap.vht_supported) {
839- if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
840+ if (sta->vht_cap.vht_supported) {
841+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
842 flags |= MT_WTBL_W5_SHORT_GI_80;
843- if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
844+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
845 flags |= MT_WTBL_W5_SHORT_GI_160;
846 }
847 raw = (struct wtbl_raw *)tlv;
848@@ -1289,9 +1290,9 @@ u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
849 return 0x38;
850
851 if (sta) {
852- ht_cap = &sta->deflink.ht_cap;
853- vht_cap = &sta->deflink.vht_cap;
854- he_cap = &sta->deflink.he_cap;
855+ ht_cap = &sta->ht_cap;
856+ vht_cap = &sta->vht_cap;
857+ he_cap = &sta->he_cap;
858 } else {
859 struct ieee80211_supported_band *sband;
860
861@@ -1329,40 +1330,6 @@ u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
862 }
863 EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode);
864
865-u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
866- enum nl80211_band band)
867-{
868- const struct ieee80211_sta_eht_cap *eht_cap;
869- struct ieee80211_supported_band *sband;
870- u8 mode = 0;
871-
872- if (band == NL80211_BAND_6GHZ)
873- mode |= PHY_MODE_AX_6G;
874-
875- sband = phy->hw->wiphy->bands[band];
876- eht_cap = ieee80211_get_eht_iftype_cap(sband, vif->type);
877-
878- if (!eht_cap || !eht_cap->has_eht)
879- return mode;
880-
881- switch (band) {
882- case NL80211_BAND_6GHZ:
883- mode |= PHY_MODE_BE_6G;
884- break;
885- case NL80211_BAND_5GHZ:
886- mode |= PHY_MODE_BE_5G;
887- break;
888- case NL80211_BAND_2GHZ:
889- mode |= PHY_MODE_BE_24G;
890- break;
891- default:
892- break;
893- }
894-
895- return mode;
896-}
897-EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode_ext);
898-
899 const struct ieee80211_sta_he_cap *
900 mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
901 {
902@@ -1375,18 +1342,6 @@ mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
903 }
904 EXPORT_SYMBOL_GPL(mt76_connac_get_he_phy_cap);
905
906-const struct ieee80211_sta_eht_cap *
907-mt76_connac_get_eht_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
908-{
909- enum nl80211_band band = phy->chandef.chan->band;
910- struct ieee80211_supported_band *sband;
911-
912- sband = phy->hw->wiphy->bands[band];
913-
914- return ieee80211_get_eht_iftype_cap(sband, vif->type);
915-}
916-EXPORT_SYMBOL_GPL(mt76_connac_get_eht_phy_cap);
917-
918 #define DEFAULT_HE_PE_DURATION 4
919 #define DEFAULT_HE_DURATION_RTS_THRES 1023
920 static void
921@@ -1657,7 +1612,6 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
922 for (i = 0; i < sreq->n_ssids; i++) {
923 if (!sreq->ssids[i].ssid_len)
924 continue;
925-
926 req->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len);
927 memcpy(req->ssids[i].ssid, sreq->ssids[i].ssid,
928 sreq->ssids[i].ssid_len);
929@@ -1790,7 +1744,6 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
930 memcpy(req->ssids[i].ssid, ssid->ssid, ssid->ssid_len);
931 req->ssids[i].ssid_len = cpu_to_le32(ssid->ssid_len);
932 }
933-
934 req->match_num = sreq->n_match_sets;
935 for (i = 0; i < req->match_num; i++) {
936 match = &sreq->match_sets[i];
937@@ -2277,10 +2230,8 @@ int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev,
938 struct mt76_vif *vif,
939 struct ieee80211_bss_conf *info)
940 {
941- struct ieee80211_vif *mvif = container_of(info, struct ieee80211_vif,
942- bss_conf);
943 struct sk_buff *skb;
944- int i, len = min_t(int, mvif->cfg.arp_addr_cnt,
945+ int i, len = min_t(int, info->arp_addr_cnt,
946 IEEE80211_BSS_ARP_ADDR_LIST_LEN);
947 struct {
948 struct {
949@@ -2308,7 +2259,7 @@ int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev,
950
951 skb_put_data(skb, &req_hdr, sizeof(req_hdr));
952 for (i = 0; i < len; i++)
953- skb_put_data(skb, &mvif->cfg.arp_addr_list[i], sizeof(__be32));
954+ skb_put_data(skb, &info->arp_addr_list[i], sizeof(__be32));
955
956 return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(OFFLOAD), true);
957 }
958diff --git a/mt76_connac_mcu.h b/mt76_connac_mcu.h
959index 40a99e0c..d5fb7a62 100644
960--- a/mt76_connac_mcu.h
961+++ b/mt76_connac_mcu.h
962@@ -1871,12 +1871,8 @@ void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val);
963
964 const struct ieee80211_sta_he_cap *
965 mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
966-const struct ieee80211_sta_eht_cap *
967-mt76_connac_get_eht_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
968 u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
969 enum nl80211_band band, struct ieee80211_sta *sta);
970-u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
971- enum nl80211_band band);
972
973 int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
974 struct mt76_connac_sta_key_conf *sta_key_conf,
975diff --git a/mt76x02_mac.c b/mt76x02_mac.c
976index d3f74473..87ea3db1 100644
977--- a/mt76x02_mac.c
978+++ b/mt76x02_mac.c
979@@ -404,7 +404,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
980 txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
981 if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
982 txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
983- if (nss > 1 && sta && sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC)
984+ if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
985 txwi_flags |= MT_TXWI_FLAGS_MMPS;
986 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
987 txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
988@@ -412,9 +412,9 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
989 txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
990 if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
991 u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
992- u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
993+ u8 ampdu_density = sta->ht_cap.ampdu_density;
994
995- ba_size <<= sta->deflink.ht_cap.ampdu_factor;
996+ ba_size <<= sta->ht_cap.ampdu_factor;
997 ba_size = min_t(int, 63, ba_size - 1);
998 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
999 ba_size = 0;
1000diff --git a/mt7915/debugfs.c b/mt7915/debugfs.c
1001index 5a46813a..6cb7c16b 100644
1002--- a/mt7915/debugfs.c
1003+++ b/mt7915/debugfs.c
1004@@ -1364,8 +1364,8 @@ static ssize_t mt7915_sta_fixed_rate_set(struct file *file,
1005
1006 phy.ldpc = (phy.bw || phy.ldpc) * GENMASK(2, 0);
1007 for (i = 0; i <= phy.bw; i++) {
1008- phy.sgi |= gi << (i << sta->deflink.he_cap.has_he);
1009- phy.he_ltf |= he_ltf << (i << sta->deflink.he_cap.has_he);
1010+ phy.sgi |= gi << (i << sta->he_cap.has_he);
1011+ phy.he_ltf |= he_ltf << (i << sta->he_cap.has_he);
1012 }
1013 field = RATE_PARAM_FIXED;
1014
1015diff --git a/mt7915/dma.c b/mt7915/dma.c
1016index 43a5456d..d64f492a 100644
1017--- a/mt7915/dma.c
1018+++ b/mt7915/dma.c
1019@@ -556,8 +556,8 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
1020 if (ret < 0)
1021 return ret;
1022
1023- netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
1024- mt7915_poll_tx);
1025+ netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
1026+ mt7915_poll_tx, NAPI_POLL_WEIGHT);
1027 napi_enable(&dev->mt76.tx_napi);
1028
1029 mt7915_dma_enable(dev);
1030diff --git a/mt7915/init.c b/mt7915/init.c
1031index b88c3827..1216416b 100644
1032--- a/mt7915/init.c
1033+++ b/mt7915/init.c
1034@@ -1107,8 +1107,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
1035 mt7915_gen_ppe_thresh(he_cap->ppe_thres, nss);
1036 } else {
1037 he_cap_elem->phy_cap_info[9] |=
1038- u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
1039- IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
1040+ IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US;
1041 }
1042
1043 if (band == NL80211_BAND_6GHZ) {
1044diff --git a/mt7915/mac.c b/mt7915/mac.c
1045index 97ca55d2..c060e5ec 100644
1046--- a/mt7915/mac.c
1047+++ b/mt7915/mac.c
1048@@ -852,7 +852,7 @@ mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
1049 u16 fc, tid;
1050 u32 val;
1051
1052- if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
1053+ if (!sta || !(sta->ht_cap.ht_supported || sta->he_cap.has_he))
1054 return;
1055
1056 tid = le32_get_bits(txwi[1], MT_TXD1_TID);
1057diff --git a/mt7915/main.c b/mt7915/main.c
1058index ea1d4e6a..c673b1bf 100644
1059--- a/mt7915/main.c
1060+++ b/mt7915/main.c
1061@@ -502,7 +502,7 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
1062
1063 static int
1064 mt7915_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1065- unsigned int link_id, u16 queue,
1066+ u16 queue,
1067 const struct ieee80211_tx_queue_params *params)
1068 {
1069 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1070@@ -597,7 +597,7 @@ mt7915_update_bss_color(struct ieee80211_hw *hw,
1071 static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
1072 struct ieee80211_vif *vif,
1073 struct ieee80211_bss_conf *info,
1074- u64 changed)
1075+ u32 changed)
1076 {
1077 struct mt7915_phy *phy = mt7915_hw_phy(hw);
1078 struct mt7915_dev *dev = mt7915_hw_dev(hw);
1079@@ -617,7 +617,7 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
1080 }
1081
1082 if (changed & BSS_CHANGED_ASSOC)
1083- mt7915_mcu_add_bss_info(phy, vif, vif->cfg.assoc);
1084+ mt7915_mcu_add_bss_info(phy, vif, info->assoc);
1085
1086 if (changed & BSS_CHANGED_ERP_CTS_PROT)
1087 mt7915_mac_enable_rtscts(dev, vif, info->use_cts_prot);
1088@@ -1159,10 +1159,10 @@ static int mt7915_sta_set_txpwr(struct ieee80211_hw *hw,
1089 {
1090 struct mt7915_phy *phy = mt7915_hw_phy(hw);
1091 struct mt7915_dev *dev = mt7915_hw_dev(hw);
1092- s16 txpower = sta->deflink.txpwr.power;
1093+ s16 txpower = sta->txpwr.power;
1094 int ret;
1095
1096- if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC)
1097+ if (sta->txpwr.type == NL80211_TX_POWER_AUTOMATIC)
1098 txpower = 0;
1099
1100 mutex_lock(&dev->mt76.mutex);
1101@@ -1293,22 +1293,19 @@ void mt7915_get_et_strings(struct ieee80211_hw *hw,
1102 struct ieee80211_vif *vif,
1103 u32 sset, u8 *data)
1104 {
1105- if (sset != ETH_SS_STATS)
1106- return;
1107-
1108- memcpy(data, *mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats));
1109- data += sizeof(mt7915_gstrings_stats);
1110- page_pool_ethtool_stats_get_strings(data);
1111+ if (sset == ETH_SS_STATS)
1112+ memcpy(data, *mt7915_gstrings_stats,
1113+ sizeof(mt7915_gstrings_stats));
1114 }
1115
1116 static
1117 int mt7915_get_et_sset_count(struct ieee80211_hw *hw,
1118 struct ieee80211_vif *vif, int sset)
1119 {
1120- if (sset != ETH_SS_STATS)
1121- return 0;
1122+ if (sset == ETH_SS_STATS)
1123+ return MT7915_SSTATS_LEN;
1124
1125- return MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
1126+ return 0;
1127 }
1128
1129 static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
1130@@ -1336,7 +1333,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
1131 };
1132 struct mib_stats *mib = &phy->mib;
1133 /* See mt7915_ampdu_stat_read_phy, etc */
1134- int i, ei = 0, stats_size;
1135+ int i, ei = 0;
1136
1137 mutex_lock(&dev->mt76.mutex);
1138
1139@@ -1417,12 +1414,9 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
1140 return;
1141
1142 ei += wi.worker_stat_count;
1143-
1144- mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
1145-
1146- stats_size = MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
1147- if (ei != stats_size)
1148- dev_err(dev->mt76.dev, "ei: %d size: %d", ei, stats_size);
1149+ if (ei != MT7915_SSTATS_LEN)
1150+ dev_err(dev->mt76.dev, "ei: %d MT7915_SSTATS_LEN: %d",
1151+ ei, (int)MT7915_SSTATS_LEN);
1152 }
1153
1154 static void
1155diff --git a/mt7915/mcu.c b/mt7915/mcu.c
1156index 03ae3bc9..4b183a74 100644
1157--- a/mt7915/mcu.c
1158+++ b/mt7915/mcu.c
1159@@ -6,6 +6,7 @@
1160 #include "mcu.h"
1161 #include "mac.h"
1162 #include "eeprom.h"
1163+#include <linux/moduleparam.h>
1164
1165 #define fw_name(_dev, name, ...) ({ \
1166 char *_fw; \
1167@@ -59,7 +60,7 @@ mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
1168 struct mt7915_dev *dev = msta->vif->phy->dev;
1169 enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band;
1170 const u16 *mask = msta->vif->bitrate_mask.control[band].he_mcs;
1171- int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
1172+ int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
1173
1174 for (nss = 0; nss < max_nss; nss++) {
1175 int mcs;
1176@@ -99,7 +100,7 @@ mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
1177
1178 /* only support 2ss on 160MHz for mt7915 */
1179 if (is_mt7915(&dev->mt76) && nss > 1 &&
1180- sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
1181+ sta->bandwidth == IEEE80211_STA_RX_BW_160)
1182 break;
1183 }
1184
1185@@ -112,8 +113,8 @@ mt7915_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs,
1186 {
1187 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
1188 struct mt7915_dev *dev = msta->vif->phy->dev;
1189- u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map);
1190- int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
1191+ u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
1192+ int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
1193 u16 mcs;
1194
1195 for (nss = 0; nss < max_nss; nss++, mcs_map >>= 2) {
1196@@ -135,7 +136,7 @@ mt7915_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs,
1197
1198 /* only support 2ss on 160MHz for mt7915 */
1199 if (is_mt7915(&dev->mt76) && nss > 1 &&
1200- sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
1201+ sta->bandwidth == IEEE80211_STA_RX_BW_160)
1202 break;
1203 }
1204 }
1205@@ -144,10 +145,10 @@ static void
1206 mt7915_mcu_set_sta_ht_mcs(struct ieee80211_sta *sta, u8 *ht_mcs,
1207 const u8 *mask)
1208 {
1209- int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
1210+ int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
1211
1212 for (nss = 0; nss < max_nss; nss++)
1213- ht_mcs[nss] = sta->deflink.ht_cap.mcs.rx_mask[nss] & mask[nss];
1214+ ht_mcs[nss] = sta->ht_cap.mcs.rx_mask[nss] & mask[nss];
1215 }
1216
1217 static int
1218@@ -220,7 +221,7 @@ int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3)
1219 static void
1220 mt7915_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1221 {
1222- if (vif->bss_conf.csa_active)
1223+ if (vif->csa_active)
1224 ieee80211_csa_finish(vif);
1225 }
1226
1227@@ -321,7 +322,7 @@ mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb)
1228 static void
1229 mt7915_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1230 {
1231- if (!vif->bss_conf.color_change_active)
1232+ if (!vif->color_change_active)
1233 return;
1234
1235 ieee80211_color_change_finish(vif);
1236@@ -707,13 +708,13 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
1237 struct ieee80211_vif *vif)
1238 {
1239 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1240- struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem;
1241+ struct ieee80211_he_cap_elem *elem = &sta->he_cap.he_cap_elem;
1242 struct ieee80211_he_mcs_nss_supp mcs_map;
1243 struct sta_rec_he *he;
1244 struct tlv *tlv;
1245 u32 cap = 0;
1246
1247- if (!sta->deflink.he_cap.has_he)
1248+ if (!sta->he_cap.has_he)
1249 return;
1250
1251 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he));
1252@@ -799,8 +800,8 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
1253
1254 he->he_cap = cpu_to_le32(cap);
1255
1256- mcs_map = sta->deflink.he_cap.he_mcs_nss_supp;
1257- switch (sta->deflink.bandwidth) {
1258+ mcs_map = sta->he_cap.he_mcs_nss_supp;
1259+ switch (sta->bandwidth) {
1260 case IEEE80211_STA_RX_BW_160:
1261 if (elem->phy_cap_info[0] &
1262 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
1263@@ -850,7 +851,7 @@ mt7915_mcu_sta_muru_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
1264 struct ieee80211_sta *sta, struct ieee80211_vif *vif)
1265 {
1266 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1267- struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem;
1268+ struct ieee80211_he_cap_elem *elem = &sta->he_cap.he_cap_elem;
1269 struct sta_rec_muru *muru;
1270 struct tlv *tlv;
1271
1272@@ -869,11 +870,11 @@ mt7915_mcu_sta_muru_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
1273 muru->cfg.mimo_ul_en = true;
1274 muru->cfg.ofdma_dl_en = true;
1275
1276- if (sta->deflink.vht_cap.vht_supported)
1277+ if (sta->vht_cap.vht_supported)
1278 muru->mimo_dl.vht_mu_bfee =
1279- !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
1280+ !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
1281
1282- if (!sta->deflink.he_cap.has_he)
1283+ if (!sta->he_cap.has_he)
1284 return;
1285
1286 muru->mimo_dl.partial_bw_dl_mimo =
1287@@ -907,13 +908,13 @@ mt7915_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
1288 struct sta_rec_ht *ht;
1289 struct tlv *tlv;
1290
1291- if (!sta->deflink.ht_cap.ht_supported)
1292+ if (!sta->ht_cap.ht_supported)
1293 return;
1294
1295 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
1296
1297 ht = (struct sta_rec_ht *)tlv;
1298- ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap);
1299+ ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
1300 }
1301
1302 static void
1303@@ -922,15 +923,15 @@ mt7915_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
1304 struct sta_rec_vht *vht;
1305 struct tlv *tlv;
1306
1307- if (!sta->deflink.vht_cap.vht_supported)
1308+ if (!sta->vht_cap.vht_supported)
1309 return;
1310
1311 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
1312
1313 vht = (struct sta_rec_vht *)tlv;
1314- vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap);
1315- vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map;
1316- vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map;
1317+ vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
1318+ vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
1319+ vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
1320 }
1321
1322 static void
1323@@ -945,7 +946,7 @@ mt7915_mcu_sta_amsdu_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
1324 vif->type != NL80211_IFTYPE_AP)
1325 return;
1326
1327- if (!sta->deflink.agg.max_amsdu_len)
1328+ if (!sta->max_amsdu_len)
1329 return;
1330
1331 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
1332@@ -954,7 +955,7 @@ mt7915_mcu_sta_amsdu_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
1333 amsdu->amsdu_en = true;
1334 msta->wcid.amsdu = true;
1335
1336- switch (sta->deflink.agg.max_amsdu_len) {
1337+ switch (sta->max_amsdu_len) {
1338 case IEEE80211_MAX_MPDU_LEN_VHT_11454:
1339 if (!is_mt7915(&dev->mt76)) {
1340 amsdu->max_mpdu_size =
1341@@ -1017,8 +1018,8 @@ mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
1342 if (!bfee && tx_ant < 2)
1343 return false;
1344
1345- if (sta->deflink.he_cap.has_he) {
1346- struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem;
1347+ if (sta->he_cap.has_he) {
1348+ struct ieee80211_he_cap_elem *pe = &sta->he_cap.he_cap_elem;
1349
1350 if (bfee)
1351 return mvif->cap.he_su_ebfee &&
1352@@ -1028,8 +1029,8 @@ mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
1353 HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]);
1354 }
1355
1356- if (sta->deflink.vht_cap.vht_supported) {
1357- u32 cap = sta->deflink.vht_cap.cap;
1358+ if (sta->vht_cap.vht_supported) {
1359+ u32 cap = sta->vht_cap.cap;
1360
1361 if (bfee)
1362 return mvif->cap.vht_su_ebfee &&
1363@@ -1055,7 +1056,7 @@ static void
1364 mt7915_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7915_phy *phy,
1365 struct sta_rec_bf *bf)
1366 {
1367- struct ieee80211_mcs_info *mcs = &sta->deflink.ht_cap.mcs;
1368+ struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
1369 u8 n = 0;
1370
1371 bf->tx_mode = MT_PHY_TYPE_HT;
1372@@ -1080,7 +1081,7 @@ static void
1373 mt7915_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7915_phy *phy,
1374 struct sta_rec_bf *bf, bool explicit)
1375 {
1376- struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap;
1377+ struct ieee80211_sta_vht_cap *pc = &sta->vht_cap;
1378 struct ieee80211_sta_vht_cap *vc = &phy->mt76->sband_5g.sband.vht_cap;
1379 u16 mcs_map = le16_to_cpu(pc->vht_mcs.rx_mcs_map);
1380 u8 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
1381@@ -1101,14 +1102,14 @@ mt7915_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7915_phy *phy,
1382 bf->ncol = min_t(u8, nss_mcs, bf->nrow);
1383 bf->ibf_ncol = bf->ncol;
1384
1385- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
1386+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
1387 bf->nrow = 1;
1388 } else {
1389 bf->nrow = tx_ant;
1390 bf->ncol = min_t(u8, nss_mcs, bf->nrow);
1391 bf->ibf_ncol = nss_mcs;
1392
1393- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
1394+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
1395 bf->ibf_nrow = 1;
1396 }
1397 }
1398@@ -1117,7 +1118,7 @@ static void
1399 mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
1400 struct mt7915_phy *phy, struct sta_rec_bf *bf)
1401 {
1402- struct ieee80211_sta_he_cap *pc = &sta->deflink.he_cap;
1403+ struct ieee80211_sta_he_cap *pc = &sta->he_cap;
1404 struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem;
1405 const struct ieee80211_sta_he_cap *vc =
1406 mt76_connac_get_he_phy_cap(phy->mt76, vif);
1407@@ -1142,7 +1143,7 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
1408 bf->ncol = min_t(u8, nss_mcs, bf->nrow);
1409 bf->ibf_ncol = bf->ncol;
1410
1411- if (sta->deflink.bandwidth != IEEE80211_STA_RX_BW_160)
1412+ if (sta->bandwidth != IEEE80211_STA_RX_BW_160)
1413 return;
1414
1415 /* go over for 160MHz and 80p80 */
1416@@ -1190,7 +1191,7 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
1417 };
1418 bool ebf;
1419
1420- if (!(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
1421+ if (!(sta->ht_cap.ht_supported || sta->he_cap.has_he))
1422 return;
1423
1424 ebf = mt7915_is_ebf_supported(phy, vif, sta, false);
1425@@ -1204,21 +1205,21 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
1426 * vht: support eBF and iBF
1427 * ht: iBF only, since mac80211 lacks of eBF support
1428 */
1429- if (sta->deflink.he_cap.has_he && ebf)
1430+ if (sta->he_cap.has_he && ebf)
1431 mt7915_mcu_sta_bfer_he(sta, vif, phy, bf);
1432- else if (sta->deflink.vht_cap.vht_supported)
1433+ else if (sta->vht_cap.vht_supported)
1434 mt7915_mcu_sta_bfer_vht(sta, phy, bf, ebf);
1435- else if (sta->deflink.ht_cap.ht_supported)
1436+ else if (sta->ht_cap.ht_supported)
1437 mt7915_mcu_sta_bfer_ht(sta, phy, bf);
1438 else
1439 return;
1440
1441 bf->bf_cap = ebf ? ebf : dev->ibf << 1;
1442- bf->bw = sta->deflink.bandwidth;
1443- bf->ibf_dbw = sta->deflink.bandwidth;
1444+ bf->bw = sta->bandwidth;
1445+ bf->ibf_dbw = sta->bandwidth;
1446 bf->ibf_nrow = tx_ant;
1447
1448- if (!ebf && sta->deflink.bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol)
1449+ if (!ebf && sta->bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol)
1450 bf->ibf_timeout = 0x48;
1451 else
1452 bf->ibf_timeout = 0x18;
1453@@ -1228,7 +1229,7 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
1454 else
1455 bf->mem_20m = matrix[bf->nrow][bf->ncol];
1456
1457- switch (sta->deflink.bandwidth) {
1458+ switch (sta->bandwidth) {
1459 case IEEE80211_STA_RX_BW_160:
1460 case IEEE80211_STA_RX_BW_80:
1461 bf->mem_total = bf->mem_20m * 2;
1462@@ -1253,7 +1254,7 @@ mt7915_mcu_sta_bfee_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
1463 struct tlv *tlv;
1464 u8 nrow = 0;
1465
1466- if (!(sta->deflink.vht_cap.vht_supported || sta->deflink.he_cap.has_he))
1467+ if (!(sta->vht_cap.vht_supported || sta->he_cap.has_he))
1468 return;
1469
1470 if (!mt7915_is_ebf_supported(phy, vif, sta, true))
1471@@ -1262,13 +1263,13 @@ mt7915_mcu_sta_bfee_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
1472 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee));
1473 bfee = (struct sta_rec_bfee *)tlv;
1474
1475- if (sta->deflink.he_cap.has_he) {
1476- struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem;
1477+ if (sta->he_cap.has_he) {
1478+ struct ieee80211_he_cap_elem *pe = &sta->he_cap.he_cap_elem;
1479
1480 nrow = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
1481 pe->phy_cap_info[5]);
1482- } else if (sta->deflink.vht_cap.vht_supported) {
1483- struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap;
1484+ } else if (sta->vht_cap.vht_supported) {
1485+ struct ieee80211_sta_vht_cap *pc = &sta->vht_cap;
1486
1487 nrow = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
1488 pc->cap);
1489@@ -1324,7 +1325,7 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
1490 ra->phy = *phy;
1491 break;
1492 case RATE_PARAM_MMPS_UPDATE:
1493- ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->deflink.smps_mode);
1494+ ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->smps_mode);
1495 break;
1496 case RATE_PARAM_SPE_UPDATE:
1497 ra->spe_idx = *(u8 *)data;
1498@@ -1399,7 +1400,7 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
1499 do { \
1500 u8 i, gi = mask->control[band]._gi; \
1501 gi = (_he) ? gi : gi == NL80211_TXRATE_FORCE_SGI; \
1502- for (i = 0; i <= sta->deflink.bandwidth; i++) { \
1503+ for (i = 0; i <= sta->bandwidth; i++) { \
1504 phy.sgi |= gi << (i << (_he)); \
1505 phy.he_ltf |= mask->control[band].he_ltf << (i << (_he));\
1506 } \
1507@@ -1413,11 +1414,11 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
1508 } \
1509 } while (0)
1510
1511- if (sta->deflink.he_cap.has_he) {
1512+ if (sta->he_cap.has_he) {
1513 __sta_phy_bitrate_mask_check(he_mcs, he_gi, 0, 1);
1514- } else if (sta->deflink.vht_cap.vht_supported) {
1515+ } else if (sta->vht_cap.vht_supported) {
1516 __sta_phy_bitrate_mask_check(vht_mcs, gi, 0, 0);
1517- } else if (sta->deflink.ht_cap.ht_supported) {
1518+ } else if (sta->ht_cap.ht_supported) {
1519 __sta_phy_bitrate_mask_check(ht_mcs, gi, 1, 0);
1520 } else {
1521 nrates = hweight32(mask->control[band].legacy);
1522@@ -1451,7 +1452,7 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
1523 * actual txrate hardware sends out.
1524 */
1525 addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 7);
1526- if (sta->deflink.he_cap.has_he)
1527+ if (sta->he_cap.has_he)
1528 mt76_rmw_field(dev, addr, GENMASK(31, 24), phy.sgi);
1529 else
1530 mt76_rmw_field(dev, addr, GENMASK(15, 12), phy.sgi);
1531@@ -1484,7 +1485,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
1532 enum nl80211_band band = chandef->chan->band;
1533 struct sta_rec_ra *ra;
1534 struct tlv *tlv;
1535- u32 supp_rate = sta->deflink.supp_rates[band];
1536+ u32 supp_rate = sta->supp_rates[band];
1537 u32 cap = sta->wme ? STA_CAP_WMM : 0;
1538
1539 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra));
1540@@ -1494,9 +1495,9 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
1541 ra->auto_rate = true;
1542 ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, sta);
1543 ra->channel = chandef->chan->hw_value;
1544- ra->bw = sta->deflink.bandwidth;
1545- ra->phy.bw = sta->deflink.bandwidth;
1546- ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->deflink.smps_mode);
1547+ ra->bw = sta->bandwidth;
1548+ ra->phy.bw = sta->bandwidth;
1549+ ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->smps_mode);
1550
1551 if (supp_rate) {
1552 supp_rate &= mask->control[band].legacy;
1553@@ -1516,22 +1517,22 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
1554 }
1555 }
1556
1557- if (sta->deflink.ht_cap.ht_supported) {
1558+ if (sta->ht_cap.ht_supported) {
1559 ra->supp_mode |= MODE_HT;
1560- ra->af = sta->deflink.ht_cap.ampdu_factor;
1561- ra->ht_gf = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
1562+ ra->af = sta->ht_cap.ampdu_factor;
1563+ ra->ht_gf = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
1564
1565 cap |= STA_CAP_HT;
1566- if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
1567+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
1568 cap |= STA_CAP_SGI_20;
1569- if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
1570+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
1571 cap |= STA_CAP_SGI_40;
1572- if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_TX_STBC)
1573+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC)
1574 cap |= STA_CAP_TX_STBC;
1575- if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
1576+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
1577 cap |= STA_CAP_RX_STBC;
1578 if (mvif->cap.ht_ldpc &&
1579- (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
1580+ (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
1581 cap |= STA_CAP_LDPC;
1582
1583 mt7915_mcu_set_sta_ht_mcs(sta, ra->ht_mcs,
1584@@ -1539,37 +1540,37 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
1585 ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs;
1586 }
1587
1588- if (sta->deflink.vht_cap.vht_supported) {
1589+ if (sta->vht_cap.vht_supported) {
1590 u8 af;
1591
1592 ra->supp_mode |= MODE_VHT;
1593 af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
1594- sta->deflink.vht_cap.cap);
1595+ sta->vht_cap.cap);
1596 ra->af = max_t(u8, ra->af, af);
1597
1598 cap |= STA_CAP_VHT;
1599- if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
1600+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
1601 cap |= STA_CAP_VHT_SGI_80;
1602- if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
1603+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
1604 cap |= STA_CAP_VHT_SGI_160;
1605- if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC)
1606+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC)
1607 cap |= STA_CAP_VHT_TX_STBC;
1608- if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
1609+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
1610 cap |= STA_CAP_VHT_RX_STBC;
1611 if (mvif->cap.vht_ldpc &&
1612- (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC))
1613+ (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC))
1614 cap |= STA_CAP_VHT_LDPC;
1615
1616 mt7915_mcu_set_sta_vht_mcs(sta, ra->supp_vht_mcs,
1617 mask->control[band].vht_mcs);
1618 }
1619
1620- if (sta->deflink.he_cap.has_he) {
1621+ if (sta->he_cap.has_he) {
1622 ra->supp_mode |= MODE_HE;
1623 cap |= STA_CAP_HE;
1624
1625- if (sta->deflink.he_6ghz_capa.capa)
1626- ra->af = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
1627+ if (sta->he_6ghz_capa.capa)
1628+ ra->af = le16_get_bits(sta->he_6ghz_capa.capa,
1629 IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
1630 }
1631
1632@@ -1778,7 +1779,7 @@ mt7915_mcu_beacon_cntdwn(struct ieee80211_vif *vif, struct sk_buff *rskb,
1633 if (!offs->cntdwn_counter_offs[0])
1634 return;
1635
1636- sub_tag = vif->bss_conf.csa_active ? BSS_INFO_BCN_CSA : BSS_INFO_BCN_BCC;
1637+ sub_tag = vif->csa_active ? BSS_INFO_BCN_CSA : BSS_INFO_BCN_BCC;
1638 tlv = mt7915_mcu_add_nested_subtlv(rskb, sub_tag, sizeof(*info),
1639 &bcn->sub_ntlv, &bcn->len);
1640 info = (struct bss_info_bcn_cntdwn *)tlv;
1641@@ -1863,9 +1864,9 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif,
1642 if (offs->cntdwn_counter_offs[0]) {
1643 u16 offset = offs->cntdwn_counter_offs[0];
1644
1645- if (vif->bss_conf.csa_active)
1646+ if (vif->csa_active)
1647 cont->csa_ofs = cpu_to_le16(offset - 4);
1648- if (vif->bss_conf.color_change_active)
1649+ if (vif->color_change_active)
1650 cont->bcc_ofs = cpu_to_le16(offset - 3);
1651 }
1652
1653@@ -2065,7 +2066,7 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1654 if (!en)
1655 goto out;
1656
1657- skb = ieee80211_beacon_get_template(hw, vif, &offs, 0);
1658+ skb = ieee80211_beacon_get_template(hw, vif, &offs);
1659 if (!skb)
1660 return -EINVAL;
1661
1662@@ -3247,17 +3248,17 @@ int mt7915_mcu_set_txpower_frame(struct mt7915_phy *phy,
1663 if (txpower) {
1664 u32 offs, len, i;
1665
1666- if (sta->deflink.ht_cap.ht_supported) {
1667+ if (sta->ht_cap.ht_supported) {
1668 const u8 *sku_len = mt7915_sku_group_len;
1669
1670 offs = sku_len[SKU_CCK] + sku_len[SKU_OFDM];
1671 len = sku_len[SKU_HT_BW20] + sku_len[SKU_HT_BW40];
1672
1673- if (sta->deflink.vht_cap.vht_supported) {
1674+ if (sta->vht_cap.vht_supported) {
1675 offs += len;
1676 len = sku_len[SKU_VHT_BW20] * 4;
1677
1678- if (sta->deflink.he_cap.has_he) {
1679+ if (sta->he_cap.has_he) {
1680 offs += len + sku_len[SKU_HE_RU26] * 3;
1681 len = sku_len[SKU_HE_RU242] * 4;
1682 }
1683diff --git a/mt7915/mmio.c b/mt7915/mmio.c
1684index 6f0c0e2a..5ef43c44 100644
1685--- a/mt7915/mmio.c
1686+++ b/mt7915/mmio.c
1687@@ -596,9 +596,13 @@ static void mt7915_mmio_wed_offload_disable(struct mtk_wed_device *wed)
1688 static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
1689 {
1690 struct mt7915_dev *dev;
1691+ u32 length;
1692 int i;
1693
1694 dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
1695+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
1696+ sizeof(struct skb_shared_info));
1697+
1698 for (i = 0; i < dev->mt76.rx_token_size; i++) {
1699 struct mt76_txwi_cache *t;
1700
1701@@ -606,7 +610,9 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
1702 if (!t || !t->ptr)
1703 continue;
1704
1705- mt76_put_page_pool_buf(t->ptr, false);
1706+ dma_unmap_single(dev->mt76.dma_dev, t->dma_addr,
1707+ wed->wlan.rx_size, DMA_FROM_DEVICE);
1708+ __free_pages(virt_to_page(t->ptr), get_order(length));
1709 t->ptr = NULL;
1710
1711 mt76_put_rxwi(&dev->mt76, t);
1712@@ -618,38 +624,47 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
1713 static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
1714 {
1715 struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
1716- struct mt76_txwi_cache *t = NULL;
1717 struct mt7915_dev *dev;
1718- struct mt76_queue *q;
1719- int i, len;
1720+ u32 length;
1721+ int i;
1722
1723 dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
1724- q = &dev->mt76.q_rx[MT_RXQ_MAIN];
1725- len = SKB_WITH_OVERHEAD(q->buf_size);
1726+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
1727+ sizeof(struct skb_shared_info));
1728
1729 for (i = 0; i < size; i++) {
1730- enum dma_data_direction dir;
1731- dma_addr_t addr;
1732- u32 offset;
1733+ struct mt76_txwi_cache *t = mt76_get_rxwi(&dev->mt76);
1734+ dma_addr_t phy_addr;
1735+ struct page *page;
1736 int token;
1737- void *buf;
1738+ void *ptr;
1739
1740- t = mt76_get_rxwi(&dev->mt76);
1741 if (!t)
1742 goto unmap;
1743
1744- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
1745- if (!buf)
1746+ page = __dev_alloc_pages(GFP_KERNEL, get_order(length));
1747+ if (!page) {
1748+ mt76_put_rxwi(&dev->mt76, t);
1749 goto unmap;
1750+ }
1751
1752- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
1753- dir = page_pool_get_dma_dir(q->page_pool);
1754- dma_sync_single_for_device(dev->mt76.dma_dev, addr, len, dir);
1755+ ptr = page_address(page);
1756+ phy_addr = dma_map_single(dev->mt76.dma_dev, ptr,
1757+ wed->wlan.rx_size,
1758+ DMA_TO_DEVICE);
1759+ if (unlikely(dma_mapping_error(dev->mt76.dev, phy_addr))) {
1760+ __free_pages(page, get_order(length));
1761+ mt76_put_rxwi(&dev->mt76, t);
1762+ goto unmap;
1763+ }
1764
1765- desc->buf0 = cpu_to_le32(addr);
1766- token = mt76_rx_token_consume(&dev->mt76, buf, t, addr);
1767+ desc->buf0 = cpu_to_le32(phy_addr);
1768+ token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr);
1769 if (token < 0) {
1770- mt76_put_page_pool_buf(buf, false);
1771+ dma_unmap_single(dev->mt76.dma_dev, phy_addr,
1772+ wed->wlan.rx_size, DMA_TO_DEVICE);
1773+ __free_pages(page, get_order(length));
1774+ mt76_put_rxwi(&dev->mt76, t);
1775 goto unmap;
1776 }
1777
1778@@ -661,8 +676,6 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
1779 return 0;
1780
1781 unmap:
1782- if (t)
1783- mt76_put_rxwi(&dev->mt76, t);
1784 mt7915_mmio_wed_release_rx_buf(wed);
1785 return -ENOMEM;
1786 }
1787diff --git a/mt7921/main.c b/mt7921/main.c
1788index a72964e7..4c400223 100644
1789--- a/mt7921/main.c
1790+++ b/mt7921/main.c
1791@@ -1090,34 +1090,17 @@ static void
1792 mt7921_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1793 u32 sset, u8 *data)
1794 {
1795- struct mt7921_dev *dev = mt7921_hw_dev(hw);
1796-
1797 if (sset != ETH_SS_STATS)
1798 return;
1799
1800 memcpy(data, *mt7921_gstrings_stats, sizeof(mt7921_gstrings_stats));
1801-
1802- if (mt76_is_sdio(&dev->mt76))
1803- return;
1804-
1805- data += sizeof(mt7921_gstrings_stats);
1806- page_pool_ethtool_stats_get_strings(data);
1807 }
1808
1809 static int
1810 mt7921_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1811 int sset)
1812 {
1813- struct mt7921_dev *dev = mt7921_hw_dev(hw);
1814-
1815- if (sset != ETH_SS_STATS)
1816- return 0;
1817-
1818- if (mt76_is_sdio(&dev->mt76))
1819- return ARRAY_SIZE(mt7921_gstrings_stats);
1820-
1821- return ARRAY_SIZE(mt7921_gstrings_stats) +
1822- page_pool_ethtool_stats_get_count();
1823+ return sset == ETH_SS_STATS ? ARRAY_SIZE(mt7921_gstrings_stats) : 0;
1824 }
1825
1826 static void
1827@@ -1137,7 +1120,6 @@ void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1828 struct ethtool_stats *stats, u64 *data)
1829 {
1830 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
1831- int stats_size = ARRAY_SIZE(mt7921_gstrings_stats);
1832 struct mt7921_phy *phy = mt7921_hw_phy(hw);
1833 struct mt7921_dev *dev = phy->dev;
1834 struct mib_stats *mib = &phy->mib;
1835@@ -1193,14 +1175,9 @@ void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1836 return;
1837
1838 ei += wi.worker_stat_count;
1839-
1840- if (!mt76_is_sdio(&dev->mt76)) {
1841- mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
1842- stats_size += page_pool_ethtool_stats_get_count();
1843- }
1844-
1845- if (ei != stats_size)
1846- dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %d", ei, stats_size);
1847+ if (ei != ARRAY_SIZE(mt7921_gstrings_stats))
1848+ dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %zu",
1849+ ei, ARRAY_SIZE(mt7921_gstrings_stats));
1850 }
1851
1852 static u64
1853diff --git a/tx.c b/tx.c
1854index 1f309d05..6cda23fa 100644
1855--- a/tx.c
1856+++ b/tx.c
1857@@ -60,20 +60,15 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
1858 .skb = skb,
1859 .info = IEEE80211_SKB_CB(skb),
1860 };
1861- struct ieee80211_rate_status rs = {};
1862 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
1863 struct mt76_wcid *wcid;
1864
1865 wcid = rcu_dereference(dev->wcid[cb->wcid]);
1866 if (wcid) {
1867 status.sta = wcid_to_sta(wcid);
1868- if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) {
1869- rs.rate_idx = wcid->rate;
1870- status.rates = &rs;
1871- status.n_rates = 1;
1872- } else {
1873- status.n_rates = 0;
1874- }
1875+
1876+ if (status.sta)
1877+ status.rate = &wcid->rate;
1878 }
1879
1880 hw = mt76_tx_status_get_hw(dev, skb);
1881diff --git a/usb.c b/usb.c
1882index 5e5c7bf5..3e281715 100644
1883--- a/usb.c
1884+++ b/usb.c
1885@@ -319,27 +319,29 @@ mt76u_set_endpoints(struct usb_interface *intf,
1886
1887 static int
1888 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
1889- int nsgs)
1890+ int nsgs, gfp_t gfp)
1891 {
1892 int i;
1893
1894 for (i = 0; i < nsgs; i++) {
1895+ struct page *page;
1896 void *data;
1897 int offset;
1898
1899- data = mt76_get_page_pool_buf(q, &offset, q->buf_size);
1900+ data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
1901 if (!data)
1902 break;
1903
1904- sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size,
1905- offset);
1906+ page = virt_to_head_page(data);
1907+ offset = data - page_address(page);
1908+ sg_set_page(&urb->sg[i], page, q->buf_size, offset);
1909 }
1910
1911 if (i < nsgs) {
1912 int j;
1913
1914 for (j = nsgs; j < urb->num_sgs; j++)
1915- mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false);
1916+ skb_free_frag(sg_virt(&urb->sg[j]));
1917 urb->num_sgs = i;
1918 }
1919
1920@@ -352,16 +354,15 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
1921
1922 static int
1923 mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
1924- struct urb *urb, int nsgs)
1925+ struct urb *urb, int nsgs, gfp_t gfp)
1926 {
1927 enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
1928- int offset;
1929
1930 if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
1931- return mt76u_fill_rx_sg(dev, q, urb, nsgs);
1932+ return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
1933
1934 urb->transfer_buffer_length = q->buf_size;
1935- urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size);
1936+ urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
1937
1938 return urb->transfer_buffer ? 0 : -ENOMEM;
1939 }
1940@@ -399,7 +400,7 @@ mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
1941 if (err)
1942 return err;
1943
1944- return mt76u_refill_rx(dev, q, e->urb, sg_size);
1945+ return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
1946 }
1947
1948 static void mt76u_urb_free(struct urb *urb)
1949@@ -407,10 +408,10 @@ static void mt76u_urb_free(struct urb *urb)
1950 int i;
1951
1952 for (i = 0; i < urb->num_sgs; i++)
1953- mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false);
1954+ skb_free_frag(sg_virt(&urb->sg[i]));
1955
1956 if (urb->transfer_buffer)
1957- mt76_put_page_pool_buf(urb->transfer_buffer, false);
1958+ skb_free_frag(urb->transfer_buffer);
1959
1960 usb_free_urb(urb);
1961 }
1962@@ -546,8 +547,6 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
1963 len -= data_len;
1964 nsgs++;
1965 }
1966-
1967- skb_mark_for_recycle(skb);
1968 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL);
1969
1970 return nsgs;
1971@@ -613,7 +612,7 @@ mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
1972
1973 count = mt76u_process_rx_entry(dev, urb, q->buf_size);
1974 if (count > 0) {
1975- err = mt76u_refill_rx(dev, q, urb, count);
1976+ err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
1977 if (err < 0)
1978 break;
1979 }
1980@@ -664,10 +663,6 @@ mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
1981 struct mt76_queue *q = &dev->q_rx[qid];
1982 int i, err;
1983
1984- err = mt76_create_page_pool(dev, q);
1985- if (err)
1986- return err;
1987-
1988 spin_lock_init(&q->lock);
1989 q->entry = devm_kcalloc(dev->dev,
1990 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
1991@@ -696,6 +691,7 @@ EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
1992 static void
1993 mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
1994 {
1995+ struct page *page;
1996 int i;
1997
1998 for (i = 0; i < q->ndesc; i++) {
1999@@ -705,8 +701,13 @@ mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
2000 mt76u_urb_free(q->entry[i].urb);
2001 q->entry[i].urb = NULL;
2002 }
2003- page_pool_destroy(q->page_pool);
2004- q->page_pool = NULL;
2005+
2006+ if (!q->rx_page.va)
2007+ return;
2008+
2009+ page = virt_to_page(q->rx_page.va);
2010+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
2011+ memset(&q->rx_page, 0, sizeof(q->rx_page));
2012 }
2013
2014 static void mt76u_free_rx(struct mt76_dev *dev)
2015--
20162.39.0
2017