blob: dc307c3e478903db5395d0b40d8a292794e839eb [file] [log] [blame]
developerbf24a8a2022-11-30 14:52:20 +08001From fd9a307422024d5c6e953634129cc2b61425e93f Mon Sep 17 00:00:00 2001
2From: Ryder Lee <ryder.lee@mediatek.com>
3Date: Mon, 14 Nov 2022 10:17:47 +0800
4Subject: [PATCH] mt76: sync to master lastest commit
5
6wifi: mt76: mt7915: fix uninitialized irq_mask
7wifi: mt76: mt7921: introduce remain_on_channel support
8wifi: mt76: connac: rework macros for unified command
9wifi: mt76: connac: update struct sta_rec_phy
10wifi: mt76: connac: rework fields for larger bandwidth support in sta_rec_bf
11wifi: mt76: connac: add more unified command IDs
12wifi: mt76: connac: introduce unified event table
13wifi: mt76: connac: add more bss info command tags
14wifi: mt76: connac: add more starec command tags
15wifi: mt76: connac: introduce helper for mt7996 chipset
16wifi: mt76: mt7921: fix wrong power after multiple SAR set
17wifi: mt76: mt7915: add missing MODULE_PARM_DESC
18wifi: mt76: mt7915: add support to configure spatial reuse parameter set
19wifi: mt76: introduce rxwi and rx token utility routines
20wifi: mt76: add WED RX support to mt76_dma_{add,get}_buf
21wifi: mt76: add WED RX support to mt76_dma_rx_fill
22wifi: mt76: add WED RX support to dma queue alloc
23wifi: mt76: add info parameter to rx_skb signature
24wifi: mt76: connac: introduce mt76_connac_mcu_sta_wed_update utility routine
25wifi: mt76: mt7915: enable WED RX support
26wifi: mt76: mt7915: enable WED RX stats
27wifi: mt76: mt7915: add basedband Txpower info into debugfs
28wifi: mt76: mt7915: enable .sta_set_txpwr support
29wifi: mt76: mt7915: fix band_idx usage
30---
31 dma.c | 244 +++++++++++++++++++++++++-------
32 dma.h | 8 ++
33 mac80211.c | 10 +-
34 mt76.h | 26 +++-
35 mt7603/dma.c | 2 +-
36 mt7603/mt7603.h | 2 +-
37 mt7615/mac.c | 2 +-
38 mt7615/mt7615.h | 2 +-
39 mt76_connac.h | 5 +
40 mt76_connac_mcu.c | 25 +++-
41 mt76_connac_mcu.h | 70 ++++++++-
42 mt76x02.h | 2 +-
43 mt76x02_txrx.c | 2 +-
44 mt7915/coredump.c | 1 +
45 mt7915/debugfs.c | 29 ++--
46 mt7915/dma.c | 26 +++-
47 mt7915/init.c | 3 +
48 mt7915/mac.c | 60 ++++++--
49 mt7915/main.c | 84 ++++++++---
50 mt7915/mcu.c | 354 ++++++++++++++++++++++++++++++++++++++++------
51 mt7915/mcu.h | 30 ++++
52 mt7915/mmio.c | 320 +++++++++++++++++++++++++++++------------
53 mt7915/mt7915.h | 13 +-
54 mt7915/regs.h | 11 ++
55 mt7915/testmode.c | 18 +--
56 mt7921/init.c | 64 +++++++++
57 mt7921/mac.c | 2 +-
58 mt7921/main.c | 118 ++++++++++++++++
59 mt7921/mcu.c | 24 ++++
60 mt7921/mt7921.h | 52 ++++++-
61 mt7921/pci.c | 33 ++++-
62 mt7921/sdio.c | 23 ++-
63 mt7921/usb.c | 12 +-
64 sdio.c | 2 +-
65 tx.c | 30 ++++
66 usb.c | 2 +-
67 36 files changed, 1438 insertions(+), 273 deletions(-)
68
69diff --git a/dma.c b/dma.c
70index 4b181305..ae22b959 100644
71--- a/dma.c
72+++ b/dma.c
73@@ -59,6 +59,19 @@ mt76_alloc_txwi(struct mt76_dev *dev)
74 return t;
75 }
76
77+static struct mt76_txwi_cache *
78+mt76_alloc_rxwi(struct mt76_dev *dev)
79+{
80+ struct mt76_txwi_cache *t;
81+
82+ t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
83+ if (!t)
84+ return NULL;
85+
86+ t->ptr = NULL;
87+ return t;
88+}
89+
90 static struct mt76_txwi_cache *
91 __mt76_get_txwi(struct mt76_dev *dev)
92 {
93@@ -75,6 +88,22 @@ __mt76_get_txwi(struct mt76_dev *dev)
94 return t;
95 }
96
97+static struct mt76_txwi_cache *
98+__mt76_get_rxwi(struct mt76_dev *dev)
99+{
100+ struct mt76_txwi_cache *t = NULL;
101+
102+ spin_lock(&dev->wed_lock);
103+ if (!list_empty(&dev->rxwi_cache)) {
104+ t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
105+ list);
106+ list_del(&t->list);
107+ }
108+ spin_unlock(&dev->wed_lock);
109+
110+ return t;
111+}
112+
113 static struct mt76_txwi_cache *
114 mt76_get_txwi(struct mt76_dev *dev)
115 {
116@@ -86,6 +115,18 @@ mt76_get_txwi(struct mt76_dev *dev)
117 return mt76_alloc_txwi(dev);
118 }
119
120+struct mt76_txwi_cache *
121+mt76_get_rxwi(struct mt76_dev *dev)
122+{
123+ struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
124+
125+ if (t)
126+ return t;
127+
128+ return mt76_alloc_rxwi(dev);
129+}
130+EXPORT_SYMBOL_GPL(mt76_get_rxwi);
131+
132 void
133 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
134 {
135@@ -98,6 +139,18 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
136 }
137 EXPORT_SYMBOL_GPL(mt76_put_txwi);
138
139+void
140+mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
141+{
142+ if (!t)
143+ return;
144+
145+ spin_lock(&dev->wed_lock);
146+ list_add(&t->list, &dev->rxwi_cache);
147+ spin_unlock(&dev->wed_lock);
148+}
149+EXPORT_SYMBOL_GPL(mt76_put_rxwi);
150+
151 static void
152 mt76_free_pending_txwi(struct mt76_dev *dev)
153 {
154@@ -112,6 +165,20 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
155 local_bh_enable();
156 }
157
158+static void
159+mt76_free_pending_rxwi(struct mt76_dev *dev)
160+{
161+ struct mt76_txwi_cache *t;
162+
163+ local_bh_disable();
164+ while ((t = __mt76_get_rxwi(dev)) != NULL) {
165+ if (t->ptr)
166+ skb_free_frag(t->ptr);
167+ kfree(t);
168+ }
169+ local_bh_enable();
170+}
171+
172 static void
173 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
174 {
175@@ -148,11 +215,6 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
176 u32 ctrl;
177 int i, idx = -1;
178
179- if (txwi) {
180- q->entry[q->head].txwi = DMA_DUMMY_DATA;
181- q->entry[q->head].skip_buf0 = true;
182- }
183-
184 for (i = 0; i < nbufs; i += 2, buf += 2) {
185 u32 buf0 = buf[0].addr, buf1 = 0;
186
187@@ -162,28 +224,48 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
188 desc = &q->desc[idx];
189 entry = &q->entry[idx];
190
191- if (buf[0].skip_unmap)
192- entry->skip_buf0 = true;
193- entry->skip_buf1 = i == nbufs - 1;
194-
195- entry->dma_addr[0] = buf[0].addr;
196- entry->dma_len[0] = buf[0].len;
197-
198- ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
199- if (i < nbufs - 1) {
200- entry->dma_addr[1] = buf[1].addr;
201- entry->dma_len[1] = buf[1].len;
202- buf1 = buf[1].addr;
203- ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
204- if (buf[1].skip_unmap)
205- entry->skip_buf1 = true;
206+ if ((q->flags & MT_QFLAG_WED) &&
207+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
208+ struct mt76_txwi_cache *t = txwi;
209+ int rx_token;
210+
211+ if (!t)
212+ return -ENOMEM;
213+
214+ rx_token = mt76_rx_token_consume(dev, (void *)skb, t,
215+ buf[0].addr);
216+ buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
217+ ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len) |
218+ MT_DMA_CTL_TO_HOST;
219+ } else {
220+ if (txwi) {
221+ q->entry[q->head].txwi = DMA_DUMMY_DATA;
222+ q->entry[q->head].skip_buf0 = true;
223+ }
224+
225+ if (buf[0].skip_unmap)
226+ entry->skip_buf0 = true;
227+ entry->skip_buf1 = i == nbufs - 1;
228+
229+ entry->dma_addr[0] = buf[0].addr;
230+ entry->dma_len[0] = buf[0].len;
231+
232+ ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
233+ if (i < nbufs - 1) {
234+ entry->dma_addr[1] = buf[1].addr;
235+ entry->dma_len[1] = buf[1].len;
236+ buf1 = buf[1].addr;
237+ ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
238+ if (buf[1].skip_unmap)
239+ entry->skip_buf1 = true;
240+ }
241+
242+ if (i == nbufs - 1)
243+ ctrl |= MT_DMA_CTL_LAST_SEC0;
244+ else if (i == nbufs - 2)
245+ ctrl |= MT_DMA_CTL_LAST_SEC1;
246 }
247
248- if (i == nbufs - 1)
249- ctrl |= MT_DMA_CTL_LAST_SEC0;
250- else if (i == nbufs - 2)
251- ctrl |= MT_DMA_CTL_LAST_SEC1;
252-
253 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
254 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
255 WRITE_ONCE(desc->info, cpu_to_le32(info));
256@@ -272,33 +354,60 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
257
258 static void *
259 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
260- int *len, u32 *info, bool *more)
261+ int *len, u32 *info, bool *more, bool *drop)
262 {
263 struct mt76_queue_entry *e = &q->entry[idx];
264 struct mt76_desc *desc = &q->desc[idx];
265- dma_addr_t buf_addr;
266- void *buf = e->buf;
267- int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
268+ void *buf;
269
270- buf_addr = e->dma_addr[0];
271 if (len) {
272- u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
273- *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
274- *more = !(ctl & MT_DMA_CTL_LAST_SEC0);
275+ u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
276+ *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
277+ *more = !(ctrl & MT_DMA_CTL_LAST_SEC0);
278 }
279
280 if (info)
281 *info = le32_to_cpu(desc->info);
282
283- dma_unmap_single(dev->dma_dev, buf_addr, buf_len, DMA_FROM_DEVICE);
284- e->buf = NULL;
285+ if ((q->flags & MT_QFLAG_WED) &&
286+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
287+ u32 token = FIELD_GET(MT_DMA_CTL_TOKEN,
288+ le32_to_cpu(desc->buf1));
289+ struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
290+
291+ if (!t)
292+ return NULL;
293+
294+ dma_unmap_single(dev->dma_dev, t->dma_addr,
295+ SKB_WITH_OVERHEAD(q->buf_size),
296+ DMA_FROM_DEVICE);
297+
298+ buf = t->ptr;
299+ t->dma_addr = 0;
300+ t->ptr = NULL;
301+
302+ mt76_put_rxwi(dev, t);
303+
304+ if (drop) {
305+ u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
306+
307+ *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A |
308+ MT_DMA_CTL_DROP));
309+ }
310+ } else {
311+ buf = e->buf;
312+ e->buf = NULL;
313+ dma_unmap_single(dev->dma_dev, e->dma_addr[0],
314+ SKB_WITH_OVERHEAD(q->buf_size),
315+ DMA_FROM_DEVICE);
316+ }
317
318 return buf;
319 }
320
321 static void *
322 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
323- int *len, u32 *info, bool *more)
324+ int *len, u32 *info, bool *more, bool *drop)
325 {
326 int idx = q->tail;
327
328@@ -314,7 +423,7 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
329 q->tail = (q->tail + 1) % q->ndesc;
330 q->queued--;
331
332- return mt76_dma_get_buf(dev, q, idx, len, info, more);
333+ return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
334 }
335
336 static int
337@@ -441,14 +550,26 @@ free_skb:
338 return ret;
339 }
340
341+static struct page_frag_cache *
342+mt76_dma_rx_get_frag_cache(struct mt76_dev *dev, struct mt76_queue *q)
343+{
344+ struct page_frag_cache *rx_page = &q->rx_page;
345+
346+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
347+ if ((q->flags & MT_QFLAG_WED) &&
348+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX)
349+ rx_page = &dev->mmio.wed.rx_buf_ring.rx_page;
350+#endif
351+ return rx_page;
352+}
353+
354 static int
355 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
356 {
357- dma_addr_t addr;
358- void *buf;
359- int frames = 0;
360+ struct page_frag_cache *rx_page = mt76_dma_rx_get_frag_cache(dev, q);
361 int len = SKB_WITH_OVERHEAD(q->buf_size);
362- int offset = q->buf_offset;
363+ int frames = 0, offset = q->buf_offset;
364+ dma_addr_t addr;
365
366 if (!q->ndesc)
367 return 0;
368@@ -456,9 +577,18 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
369 spin_lock_bh(&q->lock);
370
371 while (q->queued < q->ndesc - 1) {
372+ struct mt76_txwi_cache *t = NULL;
373 struct mt76_queue_buf qbuf;
374+ void *buf = NULL;
375
376- buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
377+ if ((q->flags & MT_QFLAG_WED) &&
378+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
379+ t = mt76_get_rxwi(dev);
380+ if (!t)
381+ break;
382+ }
383+
384+ buf = page_frag_alloc(rx_page, q->buf_size, GFP_ATOMIC);
385 if (!buf)
386 break;
387
388@@ -471,7 +601,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
389 qbuf.addr = addr + offset;
390 qbuf.len = len - offset;
391 qbuf.skip_unmap = false;
392- mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
393+ mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t);
394 frames++;
395 }
396
397@@ -517,6 +647,11 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
398 if (!ret)
399 q->wed_regs = wed->txfree_ring.reg_base;
400 break;
401+ case MT76_WED_Q_RX:
402+ ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs);
403+ if (!ret)
404+ q->wed_regs = wed->rx_ring[ring].reg_base;
405+ break;
406 default:
407 ret = -EINVAL;
408 }
409@@ -574,7 +709,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
410
411 spin_lock_bh(&q->lock);
412 do {
413- buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
414+ buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
415 if (!buf)
416 break;
417
418@@ -615,7 +750,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
419
420 static void
421 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
422- int len, bool more)
423+ int len, bool more, u32 info)
424 {
425 struct sk_buff *skb = q->rx_head;
426 struct skb_shared_info *shinfo = skb_shinfo(skb);
427@@ -635,7 +770,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
428
429 q->rx_head = NULL;
430 if (nr_frags < ARRAY_SIZE(shinfo->frags))
431- dev->drv->rx_skb(dev, q - dev->q_rx, skb);
432+ dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
433 else
434 dev_kfree_skb(skb);
435 }
436@@ -656,6 +791,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
437 }
438
439 while (done < budget) {
440+ bool drop = false;
441 u32 info;
442
443 if (check_ddone) {
444@@ -666,10 +802,14 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
445 break;
446 }
447
448- data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
449+ data = mt76_dma_dequeue(dev, q, false, &len, &info, &more,
450+ &drop);
451 if (!data)
452 break;
453
454+ if (drop)
455+ goto free_frag;
456+
457 if (q->rx_head)
458 data_len = q->buf_size;
459 else
460@@ -682,7 +822,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
461 }
462
463 if (q->rx_head) {
464- mt76_add_fragment(dev, q, data, len, more);
465+ mt76_add_fragment(dev, q, data, len, more, info);
466 continue;
467 }
468
469@@ -706,7 +846,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
470 continue;
471 }
472
473- dev->drv->rx_skb(dev, q - dev->q_rx, skb);
474+ dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
475 continue;
476
477 free_frag:
478@@ -803,11 +943,15 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
479 mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
480
481 mt76_for_each_q_rx(dev, i) {
482+ struct mt76_queue *q = &dev->q_rx[i];
483+
484 netif_napi_del(&dev->napi[i]);
485- mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
486+ if (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags))
487+ mt76_dma_rx_cleanup(dev, q);
488 }
489
490 mt76_free_pending_txwi(dev);
491+ mt76_free_pending_rxwi(dev);
492
493 if (mtk_wed_device_active(&dev->mmio.wed))
494 mtk_wed_device_detach(&dev->mmio.wed);
495diff --git a/dma.h b/dma.h
496index fdf786f9..53c6ce25 100644
497--- a/dma.h
498+++ b/dma.h
499@@ -15,6 +15,14 @@
500 #define MT_DMA_CTL_SD_LEN0 GENMASK(29, 16)
501 #define MT_DMA_CTL_LAST_SEC0 BIT(30)
502 #define MT_DMA_CTL_DMA_DONE BIT(31)
503+#define MT_DMA_CTL_TO_HOST BIT(8)
504+#define MT_DMA_CTL_TO_HOST_A BIT(12)
505+#define MT_DMA_CTL_DROP BIT(14)
506+#define MT_DMA_CTL_TOKEN GENMASK(31, 16)
507+
508+#define MT_DMA_PPE_CPU_REASON GENMASK(15, 11)
509+#define MT_DMA_PPE_ENTRY GENMASK(30, 16)
510+#define MT_DMA_INFO_PPE_VLD BIT(31)
511
512 #define MT_DMA_HDR_LEN 4
513 #define MT_RX_INFO_LEN 4
514diff --git a/mac80211.c b/mac80211.c
515index 30c1bc56..acac04ef 100644
516--- a/mac80211.c
517+++ b/mac80211.c
518@@ -572,6 +572,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
519 spin_lock_init(&dev->lock);
520 spin_lock_init(&dev->cc_lock);
521 spin_lock_init(&dev->status_lock);
522+ spin_lock_init(&dev->wed_lock);
523 mutex_init(&dev->mutex);
524 init_waitqueue_head(&dev->tx_wait);
525
526@@ -594,9 +595,13 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
527 spin_lock_init(&dev->token_lock);
528 idr_init(&dev->token);
529
530+ spin_lock_init(&dev->rx_token_lock);
531+ idr_init(&dev->rx_token);
532+
533 INIT_LIST_HEAD(&dev->wcid_list);
534
535 INIT_LIST_HEAD(&dev->txwi_cache);
536+ INIT_LIST_HEAD(&dev->rxwi_cache);
537 dev->token_size = dev->drv->token_size;
538
539 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
540@@ -1292,7 +1297,10 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
541
542 while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
543 mt76_check_sta(dev, skb);
544- mt76_rx_aggr_reorder(skb, &frames);
545+ if (mtk_wed_device_active(&dev->mmio.wed))
546+ __skb_queue_tail(&frames, skb);
547+ else
548+ mt76_rx_aggr_reorder(skb, &frames);
549 }
550
551 mt76_rx_complete(dev, &frames, napi);
552diff --git a/mt76.h b/mt76.h
553index a2bccf6b..33f87e51 100644
554--- a/mt76.h
555+++ b/mt76.h
556@@ -35,6 +35,7 @@
557 FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
558 FIELD_PREP(MT_QFLAG_WED_RING, _n))
559 #define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n)
560+#define MT_WED_Q_RX(_n) __MT_WED_Q(MT76_WED_Q_RX, _n)
561 #define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0)
562
563 struct mt76_dev;
564@@ -56,6 +57,7 @@ enum mt76_bus_type {
565 enum mt76_wed_type {
566 MT76_WED_Q_TX,
567 MT76_WED_Q_TXFREE,
568+ MT76_WED_Q_RX,
569 };
570
571 struct mt76_bus_ops {
572@@ -271,9 +273,15 @@ struct mt76_sta_stats {
573 u64 tx_nss[4]; /* 1, 2, 3, 4 */
574 u64 tx_mcs[16]; /* mcs idx */
575 u64 tx_bytes;
576+ /* WED TX */
577 u32 tx_packets;
578 u32 tx_retries;
579 u32 tx_failed;
580+ /* WED RX */
581+ u64 rx_bytes;
582+ u32 rx_packets;
583+ u32 rx_errors;
584+ u32 rx_drops;
585 };
586
587 enum mt76_wcid_flags {
588@@ -339,7 +347,10 @@ struct mt76_txwi_cache {
589 struct list_head list;
590 dma_addr_t dma_addr;
591
592- struct sk_buff *skb;
593+ union {
594+ struct sk_buff *skb;
595+ void *ptr;
596+ };
597 };
598
599 struct mt76_rx_tid {
600@@ -439,7 +450,7 @@ struct mt76_driver_ops {
601 bool (*rx_check)(struct mt76_dev *dev, void *data, int len);
602
603 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
604- struct sk_buff *skb);
605+ struct sk_buff *skb, u32 *info);
606
607 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
608
609@@ -728,6 +739,7 @@ struct mt76_dev {
610
611 struct ieee80211_hw *hw;
612
613+ spinlock_t wed_lock;
614 spinlock_t lock;
615 spinlock_t cc_lock;
616
617@@ -754,6 +766,7 @@ struct mt76_dev {
618 struct sk_buff_head rx_skb[__MT_RXQ_MAX];
619
620 struct list_head txwi_cache;
621+ struct list_head rxwi_cache;
622 struct mt76_queue *q_mcu[__MT_MCUQ_MAX];
623 struct mt76_queue q_rx[__MT_RXQ_MAX];
624 const struct mt76_queue_ops *queue_ops;
625@@ -768,6 +781,10 @@ struct mt76_dev {
626 u16 token_count;
627 u16 token_size;
628
629+ spinlock_t rx_token_lock;
630+ struct idr rx_token;
631+ u16 rx_token_size;
632+
633 wait_queue_head_t tx_wait;
634 /* spinclock used to protect wcid pktid linked list */
635 spinlock_t status_lock;
636@@ -1247,6 +1264,8 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
637 }
638
639 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
640+void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
641+struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
642 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
643 struct napi_struct *napi);
644 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
645@@ -1391,6 +1410,9 @@ struct mt76_txwi_cache *
646 mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
647 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
648 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
649+struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
650+int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
651+ struct mt76_txwi_cache *r, dma_addr_t phys);
652
653 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
654 {
655diff --git a/mt7603/dma.c b/mt7603/dma.c
656index 590cff9d..06a9e6ec 100644
657--- a/mt7603/dma.c
658+++ b/mt7603/dma.c
659@@ -69,7 +69,7 @@ free:
660 }
661
662 void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
663- struct sk_buff *skb)
664+ struct sk_buff *skb, u32 *info)
665 {
666 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
667 __le32 *rxd = (__le32 *)skb->data;
668diff --git a/mt7603/mt7603.h b/mt7603/mt7603.h
669index 0fd46d90..7c3be596 100644
670--- a/mt7603/mt7603.h
671+++ b/mt7603/mt7603.h
672@@ -244,7 +244,7 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
673 void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
674
675 void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
676- struct sk_buff *skb);
677+ struct sk_buff *skb, u32 *info);
678 void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
679 void mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
680 int mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
681diff --git a/mt7615/mac.c b/mt7615/mac.c
682index 305bf182..a9560247 100644
683--- a/mt7615/mac.c
684+++ b/mt7615/mac.c
685@@ -1666,7 +1666,7 @@ bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len)
686 EXPORT_SYMBOL_GPL(mt7615_rx_check);
687
688 void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
689- struct sk_buff *skb)
690+ struct sk_buff *skb, u32 *info)
691 {
692 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
693 __le32 *rxd = (__le32 *)skb->data;
694diff --git a/mt7615/mt7615.h b/mt7615/mt7615.h
695index 1080d202..43739ecf 100644
696--- a/mt7615/mt7615.h
697+++ b/mt7615/mt7615.h
698@@ -514,7 +514,7 @@ void mt7615_tx_worker(struct mt76_worker *w);
699 void mt7615_tx_token_put(struct mt7615_dev *dev);
700 bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len);
701 void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
702- struct sk_buff *skb);
703+ struct sk_buff *skb, u32 *info);
704 void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
705 int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
706 struct ieee80211_sta *sta);
707diff --git a/mt76_connac.h b/mt76_connac.h
708index 0915eb57..8ba883b0 100644
709--- a/mt76_connac.h
710+++ b/mt76_connac.h
711@@ -187,6 +187,11 @@ static inline bool is_mt7986(struct mt76_dev *dev)
712 return mt76_chip(dev) == 0x7986;
713 }
714
715+static inline bool is_mt7996(struct mt76_dev *dev)
716+{
717+ return mt76_chip(dev) == 0x7990;
718+}
719+
720 static inline bool is_mt7622(struct mt76_dev *dev)
721 {
722 if (!IS_ENABLED(CONFIG_MT7622_WMAC))
723diff --git a/mt76_connac_mcu.c b/mt76_connac_mcu.c
724index dfec416e..c65267b4 100644
725--- a/mt76_connac_mcu.c
726+++ b/mt76_connac_mcu.c
727@@ -65,7 +65,8 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
728 int cmd;
729
730 if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) ||
731- (is_mt7921(dev) && addr == 0x900000))
732+ (is_mt7921(dev) && addr == 0x900000) ||
733+ (is_mt7996(dev) && addr == 0x900000))
734 cmd = MCU_CMD(PATCH_START_REQ);
735 else
736 cmd = MCU_CMD(TARGET_ADDRESS_LEN_REQ);
737@@ -1183,6 +1184,16 @@ void mt76_connac_mcu_sta_ba_tlv(struct sk_buff *skb,
738 }
739 EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba_tlv);
740
741+int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb)
742+{
743+ if (!mtk_wed_device_active(&dev->mmio.wed))
744+ return 0;
745+
746+ return mtk_wed_device_update_msg(&dev->mmio.wed, WED_WO_STA_REC,
747+ skb->data, skb->len);
748+}
749+EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_wed_update);
750+
751 int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
752 struct ieee80211_ampdu_params *params,
753 int cmd, bool enable, bool tx)
754@@ -1208,6 +1219,10 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
755 mt76_connac_mcu_wtbl_ba_tlv(dev, skb, params, enable, tx, sta_wtbl,
756 wtbl_hdr);
757
758+ ret = mt76_connac_mcu_sta_wed_update(dev, skb);
759+ if (ret)
760+ return ret;
761+
762 ret = mt76_mcu_skb_send_msg(dev, skb, cmd, true);
763 if (ret)
764 return ret;
765@@ -1218,6 +1233,10 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
766
767 mt76_connac_mcu_sta_ba_tlv(skb, params, enable, tx);
768
769+ ret = mt76_connac_mcu_sta_wed_update(dev, skb);
770+ if (ret)
771+ return ret;
772+
773 return mt76_mcu_skb_send_msg(dev, skb, cmd, true);
774 }
775 EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba);
776@@ -2658,6 +2677,10 @@ int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
777 if (ret)
778 return ret;
779
780+ ret = mt76_connac_mcu_sta_wed_update(dev, skb);
781+ if (ret)
782+ return ret;
783+
784 return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
785 }
786 EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_key);
787diff --git a/mt76_connac_mcu.h b/mt76_connac_mcu.h
788index 87c65d25..72d235a1 100644
789--- a/mt76_connac_mcu.h
790+++ b/mt76_connac_mcu.h
791@@ -63,7 +63,7 @@ struct mt76_connac2_mcu_txd {
792 } __packed __aligned(4);
793
794 /**
795- * struct mt76_connac2_mcu_uni_txd - mcu command descriptor for firmware v3
796+ * struct mt76_connac2_mcu_uni_txd - mcu command descriptor for connac2 and connac3
797 * @txd: hardware descriptor
798 * @len: total length not including txd
799 * @cid: command identifier
800@@ -393,7 +393,8 @@ struct sta_rec_phy {
801 u8 ampdu;
802 u8 rts_policy;
803 u8 rcpi;
804- u8 rsv[2];
805+ u8 max_ampdu_len; /* connac3 */
806+ u8 rsv[1];
807 } __packed;
808
809 struct sta_rec_he_6g_capa {
810@@ -454,8 +455,8 @@ struct sta_rec_bf {
811 u8 ibf_dbw;
812 u8 ibf_ncol;
813 u8 ibf_nrow;
814- u8 nrow_bw160;
815- u8 ncol_bw160;
816+ u8 nrow_gt_bw80;
817+ u8 ncol_gt_bw80;
818 u8 ru_start_idx;
819 u8 ru_end_idx;
820
821@@ -781,6 +782,8 @@ enum {
822 STA_REC_BFEE,
823 STA_REC_PHY = 0x15,
824 STA_REC_HE_6G = 0x17,
825+ STA_REC_HDRT = 0x28,
826+ STA_REC_HDR_TRANS = 0x2B,
827 STA_REC_MAX_NUM
828 };
829
830@@ -986,6 +989,17 @@ enum {
831 MCU_EXT_EVENT_MURU_CTRL = 0x9f,
832 };
833
834+/* unified event table */
835+enum {
836+ MCU_UNI_EVENT_RESULT = 0x01,
837+ MCU_UNI_EVENT_FW_LOG_2_HOST = 0x04,
838+ MCU_UNI_EVENT_IE_COUNTDOWN = 0x09,
839+ MCU_UNI_EVENT_RDD_REPORT = 0x11,
840+};
841+
842+#define MCU_UNI_CMD_EVENT BIT(1)
843+#define MCU_UNI_CMD_UNSOLICITED_EVENT BIT(2)
844+
845 enum {
846 MCU_Q_QUERY,
847 MCU_Q_SET,
848@@ -1068,10 +1082,11 @@ enum {
849
850 #define MCU_CMD_ACK BIT(0)
851 #define MCU_CMD_UNI BIT(1)
852-#define MCU_CMD_QUERY BIT(2)
853+#define MCU_CMD_SET BIT(2)
854
855 #define MCU_CMD_UNI_EXT_ACK (MCU_CMD_ACK | MCU_CMD_UNI | \
856- MCU_CMD_QUERY)
857+ MCU_CMD_SET)
858+#define MCU_CMD_UNI_QUERY_ACK (MCU_CMD_ACK | MCU_CMD_UNI)
859
860 #define __MCU_CMD_FIELD_ID GENMASK(7, 0)
861 #define __MCU_CMD_FIELD_EXT_ID GENMASK(15, 8)
862@@ -1079,6 +1094,7 @@ enum {
863 #define __MCU_CMD_FIELD_UNI BIT(17)
864 #define __MCU_CMD_FIELD_CE BIT(18)
865 #define __MCU_CMD_FIELD_WA BIT(19)
866+#define __MCU_CMD_FIELD_WM BIT(20)
867
868 #define MCU_CMD(_t) FIELD_PREP(__MCU_CMD_FIELD_ID, \
869 MCU_CMD_##_t)
870@@ -1100,6 +1116,16 @@ enum {
871 FIELD_PREP(__MCU_CMD_FIELD_EXT_ID, \
872 MCU_WA_PARAM_CMD_##_t))
873
874+#define MCU_WM_UNI_CMD(_t) (MCU_UNI_CMD(_t) | \
875+ __MCU_CMD_FIELD_WM)
876+#define MCU_WM_UNI_CMD_QUERY(_t) (MCU_UNI_CMD(_t) | \
877+ __MCU_CMD_FIELD_QUERY | \
878+ __MCU_CMD_FIELD_WM)
879+#define MCU_WA_UNI_CMD(_t) (MCU_UNI_CMD(_t) | \
880+ __MCU_CMD_FIELD_WA)
881+#define MCU_WMWA_UNI_CMD(_t) (MCU_WM_UNI_CMD(_t) | \
882+ __MCU_CMD_FIELD_WA)
883+
884 enum {
885 MCU_EXT_CMD_EFUSE_ACCESS = 0x01,
886 MCU_EXT_CMD_RF_REG_ACCESS = 0x02,
887@@ -1153,11 +1179,33 @@ enum {
888 MCU_UNI_CMD_DEV_INFO_UPDATE = 0x01,
889 MCU_UNI_CMD_BSS_INFO_UPDATE = 0x02,
890 MCU_UNI_CMD_STA_REC_UPDATE = 0x03,
891+ MCU_UNI_CMD_EDCA_UPDATE = 0x04,
892 MCU_UNI_CMD_SUSPEND = 0x05,
893 MCU_UNI_CMD_OFFLOAD = 0x06,
894 MCU_UNI_CMD_HIF_CTRL = 0x07,
895+ MCU_UNI_CMD_BAND_CONFIG = 0x08,
896+ MCU_UNI_CMD_REPT_MUAR = 0x09,
897+ MCU_UNI_CMD_WSYS_CONFIG = 0x0b,
898+ MCU_UNI_CMD_REG_ACCESS = 0x0d,
899+ MCU_UNI_CMD_POWER_CREL = 0x0f,
900+ MCU_UNI_CMD_RX_HDR_TRANS = 0x12,
901+ MCU_UNI_CMD_SER = 0x13,
902+ MCU_UNI_CMD_TWT = 0x14,
903+ MCU_UNI_CMD_RDD_CTRL = 0x19,
904+ MCU_UNI_CMD_GET_MIB_INFO = 0x22,
905 MCU_UNI_CMD_SNIFFER = 0x24,
906+ MCU_UNI_CMD_SR = 0x25,
907 MCU_UNI_CMD_ROC = 0x27,
908+ MCU_UNI_CMD_TXPOWER = 0x2b,
909+ MCU_UNI_CMD_EFUSE_CTRL = 0x2d,
910+ MCU_UNI_CMD_RA = 0x2f,
911+ MCU_UNI_CMD_MURU = 0x31,
912+ MCU_UNI_CMD_BF = 0x33,
913+ MCU_UNI_CMD_CHANNEL_SWITCH = 0x34,
914+ MCU_UNI_CMD_THERMAL = 0x35,
915+ MCU_UNI_CMD_VOW = 0x37,
916+ MCU_UNI_CMD_RRO = 0x57,
917+ MCU_UNI_CMD_OFFCH_SCAN_CTRL = 0x58,
918 };
919
920 enum {
921@@ -1207,14 +1255,23 @@ enum {
922
923 enum {
924 UNI_BSS_INFO_BASIC = 0,
925+ UNI_BSS_INFO_RA = 1,
926 UNI_BSS_INFO_RLM = 2,
927 UNI_BSS_INFO_BSS_COLOR = 4,
928 UNI_BSS_INFO_HE_BASIC = 5,
929 UNI_BSS_INFO_BCN_CONTENT = 7,
930+ UNI_BSS_INFO_BCN_CSA = 8,
931+ UNI_BSS_INFO_BCN_BCC = 9,
932+ UNI_BSS_INFO_BCN_MBSSID = 10,
933+ UNI_BSS_INFO_RATE = 11,
934 UNI_BSS_INFO_QBSS = 15,
935+ UNI_BSS_INFO_SEC = 16,
936+ UNI_BSS_INFO_TXCMD = 18,
937 UNI_BSS_INFO_UAPSD = 19,
938 UNI_BSS_INFO_PS = 21,
939 UNI_BSS_INFO_BCNFT = 22,
940+ UNI_BSS_INFO_OFFLOAD = 25,
941+ UNI_BSS_INFO_MLD = 26,
942 };
943
944 enum {
945@@ -1823,6 +1880,7 @@ int mt76_connac_mcu_set_pm(struct mt76_dev *dev, int band, int enter);
946 int mt76_connac_mcu_restart(struct mt76_dev *dev);
947 int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
948 u8 rx_sel, u8 val);
949+int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb);
950 int mt76_connac2_load_ram(struct mt76_dev *dev, const char *fw_wm,
951 const char *fw_wa);
952 int mt76_connac2_load_patch(struct mt76_dev *dev, const char *fw_name);
953diff --git a/mt76x02.h b/mt76x02.h
954index 849c2644..3f2a9b7f 100644
955--- a/mt76x02.h
956+++ b/mt76x02.h
957@@ -187,7 +187,7 @@ int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val);
958 void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
959 bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
960 void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
961- struct sk_buff *skb);
962+ struct sk_buff *skb, u32 *info);
963 void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
964 irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance);
965 void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
966diff --git a/mt76x02_txrx.c b/mt76x02_txrx.c
967index 3a313075..d8bc4ae1 100644
968--- a/mt76x02_txrx.c
969+++ b/mt76x02_txrx.c
970@@ -33,7 +33,7 @@ void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
971 EXPORT_SYMBOL_GPL(mt76x02_tx);
972
973 void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
974- struct sk_buff *skb)
975+ struct sk_buff *skb, u32 *info)
976 {
977 struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
978 void *rxwi = skb->data;
979diff --git a/mt7915/coredump.c b/mt7915/coredump.c
980index bb4b7040..d097a56d 100644
981--- a/mt7915/coredump.c
982+++ b/mt7915/coredump.c
983@@ -9,6 +9,7 @@
984
985 static bool coredump_memdump;
986 module_param(coredump_memdump, bool, 0644);
987+MODULE_PARM_DESC(coredump_memdump, "Optional ability to dump firmware memory");
988
989 static const struct mt7915_mem_region mt7915_mem_regions[] = {
990 {
991diff --git a/mt7915/debugfs.c b/mt7915/debugfs.c
992index 766e6208..30f8f18b 100644
993--- a/mt7915/debugfs.c
994+++ b/mt7915/debugfs.c
995@@ -51,7 +51,7 @@ mt7915_sys_recovery_set(struct file *file, const char __user *user_buf,
996 {
997 struct mt7915_phy *phy = file->private_data;
998 struct mt7915_dev *dev = phy->dev;
999- bool ext_phy = phy != &dev->phy;
1000+ bool band = phy->band_idx;
1001 char buf[16];
1002 int ret = 0;
1003 u16 val;
1004@@ -83,7 +83,7 @@ mt7915_sys_recovery_set(struct file *file, const char __user *user_buf,
1005 * 8: trigger firmware crash.
1006 */
1007 case SER_QUERY:
1008- ret = mt7915_mcu_set_ser(dev, 0, 0, ext_phy);
1009+ ret = mt7915_mcu_set_ser(dev, 0, 0, band);
1010 break;
1011 case SER_SET_RECOVER_L1:
1012 case SER_SET_RECOVER_L2:
1013@@ -91,17 +91,17 @@ mt7915_sys_recovery_set(struct file *file, const char __user *user_buf,
1014 case SER_SET_RECOVER_L3_TX_ABORT:
1015 case SER_SET_RECOVER_L3_TX_DISABLE:
1016 case SER_SET_RECOVER_L3_BF:
1017- ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), ext_phy);
1018+ ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), band);
1019 if (ret)
1020 return ret;
1021
1022- ret = mt7915_mcu_set_ser(dev, SER_RECOVER, val, ext_phy);
1023+ ret = mt7915_mcu_set_ser(dev, SER_RECOVER, val, band);
1024 break;
1025
1026 /* enable full chip reset */
1027 case SER_SET_RECOVER_FULL:
1028 mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK);
1029- ret = mt7915_mcu_set_ser(dev, 1, 3, ext_phy);
1030+ ret = mt7915_mcu_set_ser(dev, 1, 3, band);
1031 if (ret)
1032 return ret;
1033
1034@@ -967,11 +967,18 @@ mt7915_rate_txpower_show(struct seq_file *file, void *data)
1035 "RU484/SU40", "RU996/SU80", "RU2x996/SU160"
1036 };
1037 struct mt7915_phy *phy = file->private;
1038+ struct mt7915_dev *dev = phy->dev;
1039 s8 txpower[MT7915_SKU_RATE_NUM], *buf;
1040- int i;
1041+ u32 reg;
1042+ int i, ret;
1043+
1044+ ret = mt7915_mcu_get_txpower_sku(phy, txpower, sizeof(txpower));
1045+ if (ret)
1046+ return ret;
1047+
1048+ /* Txpower propagation path: TMAC -> TXV -> BBP */
1049+ seq_printf(file, "\nPhy %d\n", phy != &dev->phy);
1050
1051- seq_printf(file, "\nBand %d\n", phy != &phy->dev->phy);
1052- mt7915_mcu_get_txpower_sku(phy, txpower, sizeof(txpower));
1053 for (i = 0, buf = txpower; i < ARRAY_SIZE(mt7915_sku_group_len); i++) {
1054 u8 mcs_num = mt7915_sku_group_len[i];
1055
1056@@ -982,6 +989,12 @@ mt7915_rate_txpower_show(struct seq_file *file, void *data)
1057 buf += mt7915_sku_group_len[i];
1058 }
1059
1060+ reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_TPC_CTRL_STAT(phy->band_idx) :
1061+ MT_WF_PHY_TPC_CTRL_STAT_MT7916(phy->band_idx);
1062+
1063+ seq_printf(file, "\nBaseband transmit power %ld\n",
1064+ mt76_get_field(dev, reg, MT_WF_PHY_TPC_POWER));
1065+
1066 return 0;
1067 }
1068
1069diff --git a/mt7915/dma.c b/mt7915/dma.c
1070index 9a57ad8f..27b67800 100644
1071--- a/mt7915/dma.c
1072+++ b/mt7915/dma.c
1073@@ -361,11 +361,18 @@ static int mt7915_dma_enable(struct mt7915_dev *dev)
1074
1075 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
1076 u32 wed_irq_mask = irq_mask;
1077+ int ret;
1078
1079 wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
1080 if (!is_mt7986(&dev->mt76))
1081 mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask);
1082- mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
1083+ else
1084+ mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
1085+
1086+ ret = mt7915_mcu_wed_enable_rx_stats(dev);
1087+ if (ret)
1088+ return ret;
1089+
1090 mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask);
1091 }
1092
1093@@ -401,6 +408,9 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
1094 FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) |
1095 FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1,
1096 wed_control_rx1));
1097+ if (is_mt7915(mdev))
1098+ mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP,
1099+ MT_WFDMA0_EXT0_RXWB_KEEP);
1100 }
1101 } else {
1102 mt76_clear(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED);
1103@@ -473,6 +483,13 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
1104
1105 /* rx data queue for band0 */
1106 if (!dev->phy.band_idx) {
1107+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
1108+ mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
1109+ dev->mt76.q_rx[MT_RXQ_MAIN].flags =
1110+ MT_WED_Q_RX(MT7915_RXQ_BAND0);
1111+ dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
1112+ }
1113+
1114 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
1115 MT_RXQ_ID(MT_RXQ_MAIN),
1116 MT7915_RX_RING_SIZE,
1117@@ -503,6 +520,13 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
1118 }
1119
1120 if (dev->dbdc_support || dev->phy.band_idx) {
1121+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
1122+ mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
1123+ dev->mt76.q_rx[MT_RXQ_BAND1].flags =
1124+ MT_WED_Q_RX(MT7915_RXQ_BAND1);
1125+ dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
1126+ }
1127+
1128 /* rx data queue for band1 */
1129 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1],
1130 MT_RXQ_ID(MT_RXQ_BAND1),
1131diff --git a/mt7915/init.c b/mt7915/init.c
1132index 0a5f7d85..9e69ab82 100644
1133--- a/mt7915/init.c
1134+++ b/mt7915/init.c
1135@@ -355,6 +355,9 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
1136 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_DISCOVERY);
1137 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
1138
1139+ if (!is_mt7915(&dev->mt76))
1140+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
1141+
1142 if (!mdev->dev->of_node ||
1143 !of_property_read_bool(mdev->dev->of_node,
1144 "mediatek,disable-radar-background"))
1145diff --git a/mt7915/mac.c b/mt7915/mac.c
1146index 99123e77..97a19bdb 100644
1147--- a/mt7915/mac.c
1148+++ b/mt7915/mac.c
1149@@ -165,9 +165,9 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
1150 sta = container_of((void *)msta, struct ieee80211_sta,
1151 drv_priv);
1152 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
1153- u8 q = mt76_connac_lmac_mapping(i);
1154- u32 tx_cur = tx_time[q];
1155- u32 rx_cur = rx_time[q];
1156+ u8 queue = mt76_connac_lmac_mapping(i);
1157+ u32 tx_cur = tx_time[queue];
1158+ u32 rx_cur = rx_time[queue];
1159 u8 tid = ac_to_tid[i];
1160
1161 if (!tx_cur && !rx_cur)
1162@@ -245,8 +245,38 @@ void mt7915_mac_enable_rtscts(struct mt7915_dev *dev,
1163 mt76_clear(dev, addr, BIT(5));
1164 }
1165
1166+static void
1167+mt7915_wed_check_ppe(struct mt7915_dev *dev, struct mt76_queue *q,
1168+ struct mt7915_sta *msta, struct sk_buff *skb,
1169+ u32 info)
1170+{
1171+ struct ieee80211_vif *vif;
1172+ struct wireless_dev *wdev;
1173+ u32 hash, reason;
1174+
1175+ if (!msta || !msta->vif)
1176+ return;
1177+
1178+ if (!(q->flags & MT_QFLAG_WED) ||
1179+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) != MT76_WED_Q_RX)
1180+ return;
1181+
1182+ if (!(info & MT_DMA_INFO_PPE_VLD))
1183+ return;
1184+
1185+ vif = container_of((void *)msta->vif, struct ieee80211_vif,
1186+ drv_priv);
1187+ wdev = ieee80211_vif_to_wdev(vif);
1188+ skb->dev = wdev->netdev;
1189+
1190+ reason = FIELD_GET(MT_DMA_PPE_CPU_REASON, info);
1191+ hash = FIELD_GET(MT_DMA_PPE_ENTRY, info);
1192+ mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb, reason, hash);
1193+}
1194+
1195 static int
1196-mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
1197+mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb,
1198+ enum mt76_rxq_id q, u32 *info)
1199 {
1200 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1201 struct mt76_phy *mphy = &dev->mt76.phy;
1202@@ -513,6 +543,8 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
1203 }
1204 } else {
1205 status->flag |= RX_FLAG_8023;
1206+ mt7915_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
1207+ *info);
1208 }
1209
1210 if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
1211@@ -1096,7 +1128,7 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
1212 }
1213
1214 void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1215- struct sk_buff *skb)
1216+ struct sk_buff *skb, u32 *info)
1217 {
1218 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
1219 __le32 *rxd = (__le32 *)skb->data;
1220@@ -1130,7 +1162,7 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1221 dev_kfree_skb(skb);
1222 break;
1223 case PKT_TYPE_NORMAL:
1224- if (!mt7915_mac_fill_rx(dev, skb)) {
1225+ if (!mt7915_mac_fill_rx(dev, skb, q, info)) {
1226 mt76_rx(&dev->mt76, q, skb);
1227 return;
1228 }
1229@@ -1228,18 +1260,18 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy)
1230 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1231 }
1232
1233-void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy)
1234+void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool band)
1235 {
1236 u32 reg;
1237
1238- reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(ext_phy) :
1239- MT_WF_PHY_RXTD12_MT7916(ext_phy);
1240+ reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(band) :
1241+ MT_WF_PHY_RXTD12_MT7916(band);
1242 mt76_set(dev, reg,
1243 MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY |
1244 MT_WF_PHY_RXTD12_IRPI_SW_CLR);
1245
1246- reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(ext_phy) :
1247- MT_WF_PHY_RX_CTRL1_MT7916(ext_phy);
1248+ reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(band) :
1249+ MT_WF_PHY_RX_CTRL1_MT7916(band);
1250 mt76_set(dev, reg, FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5));
1251 }
1252
1253@@ -1354,7 +1386,6 @@ mt7915_mac_restart(struct mt7915_dev *dev)
1254 struct mt76_phy *ext_phy;
1255 struct mt76_dev *mdev = &dev->mt76;
1256 int i, ret;
1257- u32 irq_mask;
1258
1259 ext_phy = dev->mt76.phys[MT_BAND1];
1260 phy2 = ext_phy ? ext_phy->priv : NULL;
1261@@ -1412,7 +1443,7 @@ mt7915_mac_restart(struct mt7915_dev *dev)
1262 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
1263
1264 if (dev->hif2) {
1265- mt76_wr(dev, MT_INT1_MASK_CSR, irq_mask);
1266+ mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask);
1267 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
1268 }
1269 if (dev_is_pci(mdev->dev)) {
1270@@ -1949,7 +1980,6 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
1271 static void mt7915_mac_severe_check(struct mt7915_phy *phy)
1272 {
1273 struct mt7915_dev *dev = phy->dev;
1274- bool ext_phy = phy != &dev->phy;
1275 u32 trb;
1276
1277 if (!phy->omac_mask)
1278@@ -1967,7 +1997,7 @@ static void mt7915_mac_severe_check(struct mt7915_phy *phy)
1279 FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, phy->trb_ts)) &&
1280 trb == phy->trb_ts)
1281 mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L3_RX_ABORT,
1282- ext_phy);
1283+ phy->band_idx);
1284
1285 phy->trb_ts = trb;
1286 }
1287diff --git a/mt7915/main.c b/mt7915/main.c
1288index fe5ec166..2505fa7e 100644
1289--- a/mt7915/main.c
1290+++ b/mt7915/main.c
1291@@ -30,31 +30,31 @@ int mt7915_run(struct ieee80211_hw *hw)
1292 running = mt7915_dev_running(dev);
1293
1294 if (!running) {
1295- ret = mt76_connac_mcu_set_pm(&dev->mt76, 0, 0);
1296+ ret = mt76_connac_mcu_set_pm(&dev->mt76, dev->phy.band_idx, 0);
1297 if (ret)
1298 goto out;
1299
1300- ret = mt7915_mcu_set_mac(dev, 0, true, true);
1301+ ret = mt7915_mcu_set_mac(dev, dev->phy.band_idx, true, true);
1302 if (ret)
1303 goto out;
1304
1305- mt7915_mac_enable_nf(dev, 0);
1306+ mt7915_mac_enable_nf(dev, dev->phy.band_idx);
1307 }
1308
1309- if (phy != &dev->phy || phy->band_idx) {
1310- ret = mt76_connac_mcu_set_pm(&dev->mt76, 1, 0);
1311+ if (phy != &dev->phy) {
1312+ ret = mt76_connac_mcu_set_pm(&dev->mt76, phy->band_idx, 0);
1313 if (ret)
1314 goto out;
1315
1316- ret = mt7915_mcu_set_mac(dev, 1, true, true);
1317+ ret = mt7915_mcu_set_mac(dev, phy->band_idx, true, true);
1318 if (ret)
1319 goto out;
1320
1321- mt7915_mac_enable_nf(dev, 1);
1322+ mt7915_mac_enable_nf(dev, phy->band_idx);
1323 }
1324
1325 ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b,
1326- phy != &dev->phy);
1327+ phy->band_idx);
1328 if (ret)
1329 goto out;
1330
1331@@ -107,13 +107,13 @@ static void mt7915_stop(struct ieee80211_hw *hw)
1332 clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
1333
1334 if (phy != &dev->phy) {
1335- mt76_connac_mcu_set_pm(&dev->mt76, 1, 1);
1336- mt7915_mcu_set_mac(dev, 1, false, false);
1337+ mt76_connac_mcu_set_pm(&dev->mt76, phy->band_idx, 1);
1338+ mt7915_mcu_set_mac(dev, phy->band_idx, false, false);
1339 }
1340
1341 if (!mt7915_dev_running(dev)) {
1342- mt76_connac_mcu_set_pm(&dev->mt76, 0, 1);
1343- mt7915_mcu_set_mac(dev, 0, false, false);
1344+ mt76_connac_mcu_set_pm(&dev->mt76, dev->phy.band_idx, 1);
1345+ mt7915_mcu_set_mac(dev, dev->phy.band_idx, false, false);
1346 }
1347
1348 mutex_unlock(&dev->mt76.mutex);
1349@@ -440,7 +440,6 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
1350 {
1351 struct mt7915_dev *dev = mt7915_hw_dev(hw);
1352 struct mt7915_phy *phy = mt7915_hw_phy(hw);
1353- bool band = phy != &dev->phy;
1354 int ret;
1355
1356 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1357@@ -468,6 +467,7 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
1358
1359 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
1360 bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
1361+ bool band = phy->band_idx;
1362
1363 if (!enabled)
1364 phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
1365@@ -505,7 +505,7 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw,
1366 {
1367 struct mt7915_dev *dev = mt7915_hw_dev(hw);
1368 struct mt7915_phy *phy = mt7915_hw_phy(hw);
1369- bool band = phy != &dev->phy;
1370+ bool band = phy->band_idx;
1371 u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
1372 MT_WF_RFCR1_DROP_BF_POLL |
1373 MT_WF_RFCR1_DROP_BA |
1374@@ -600,10 +600,8 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
1375 mt7915_mcu_add_sta(dev, vif, NULL, join);
1376 }
1377
1378- if (changed & BSS_CHANGED_ASSOC) {
1379+ if (changed & BSS_CHANGED_ASSOC)
1380 mt7915_mcu_add_bss_info(phy, vif, info->assoc);
1381- mt7915_mcu_add_obss_spr(dev, vif, info->he_obss_pd.enable);
1382- }
1383
1384 if (changed & BSS_CHANGED_ERP_CTS_PROT)
1385 mt7915_mac_enable_rtscts(dev, vif, info->use_cts_prot);
1386@@ -627,7 +625,7 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
1387 mt7915_mcu_set_tx(dev, vif);
1388
1389 if (changed & BSS_CHANGED_HE_OBSS_PD)
1390- mt7915_mcu_add_obss_spr(dev, vif, info->he_obss_pd.enable);
1391+ mt7915_mcu_add_obss_spr(phy, vif, &info->he_obss_pd);
1392
1393 if (changed & BSS_CHANGED_HE_BSS_COLOR)
1394 mt7915_update_bss_color(hw, vif, &info->he_bss_color);
1395@@ -744,7 +742,7 @@ static int mt7915_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
1396 int ret;
1397
1398 mutex_lock(&dev->mt76.mutex);
1399- ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, phy != &dev->phy);
1400+ ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, phy->band_idx);
1401 mutex_unlock(&dev->mt76.mutex);
1402
1403 return ret;
1404@@ -847,7 +845,7 @@ u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif)
1405 {
1406 struct mt7915_dev *dev = mt7915_hw_dev(hw);
1407 struct mt7915_phy *phy = mt7915_hw_phy(hw);
1408- bool band = phy != &dev->phy;
1409+ bool band = phy->band_idx;
1410 union {
1411 u64 t64;
1412 u32 t32[2];
1413@@ -892,7 +890,7 @@ mt7915_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1414 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1415 struct mt7915_dev *dev = mt7915_hw_dev(hw);
1416 struct mt7915_phy *phy = mt7915_hw_phy(hw);
1417- bool band = phy != &dev->phy;
1418+ bool band = phy->band_idx;
1419 union {
1420 u64 t64;
1421 u32 t32[2];
1422@@ -923,7 +921,7 @@ mt7915_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1423 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1424 struct mt7915_dev *dev = mt7915_hw_dev(hw);
1425 struct mt7915_phy *phy = mt7915_hw_phy(hw);
1426- bool band = phy != &dev->phy;
1427+ bool band = phy->band_idx;
1428 union {
1429 u64 t64;
1430 u32 t32[2];
1431@@ -1036,6 +1034,14 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
1432
1433 sinfo->tx_retries = msta->wcid.stats.tx_retries;
1434 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
1435+
1436+ if (mtk_wed_get_rx_capa(&phy->dev->mt76.mmio.wed)) {
1437+ sinfo->rx_bytes = msta->wcid.stats.rx_bytes;
1438+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64);
1439+
1440+ sinfo->rx_packets = msta->wcid.stats.rx_packets;
1441+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
1442+ }
1443 }
1444
1445 sinfo->ack_signal = (s8)msta->ack_signal;
1446@@ -1127,6 +1133,39 @@ static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw,
1447 mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
1448 }
1449
1450+static int mt7915_sta_set_txpwr(struct ieee80211_hw *hw,
1451+ struct ieee80211_vif *vif,
1452+ struct ieee80211_sta *sta)
1453+{
1454+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
1455+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
1456+ s16 txpower = sta->txpwr.power;
1457+ int ret;
1458+
1459+ if (sta->txpwr.type == NL80211_TX_POWER_AUTOMATIC)
1460+ txpower = 0;
1461+
1462+ mutex_lock(&dev->mt76.mutex);
1463+
1464+ /* NOTE: temporarily use 0 as minimum limit, which is a
1465+ * global setting and will be applied to all stations.
1466+ */
1467+ ret = mt7915_mcu_set_txpower_frame_min(phy, 0);
1468+ if (ret)
1469+ goto out;
1470+
1471+ /* This only applies to data frames while pushing traffic,
1472+ * whereas the management frames or other packets that are
1473+ * using fixed rate can be configured via TxD.
1474+ */
1475+ ret = mt7915_mcu_set_txpower_frame(phy, vif, sta, txpower);
1476+
1477+out:
1478+ mutex_unlock(&dev->mt76.mutex);
1479+
1480+ return ret;
1481+}
1482+
1483 static const char mt7915_gstrings_stats[][ETH_GSTRING_LEN] = {
1484 "tx_ampdu_cnt",
1485 "tx_stop_q_empty_cnt",
1486@@ -1492,6 +1531,7 @@ const struct ieee80211_ops mt7915_ops = {
1487 .set_bitrate_mask = mt7915_set_bitrate_mask,
1488 .set_coverage_class = mt7915_set_coverage_class,
1489 .sta_statistics = mt7915_sta_statistics,
1490+ .sta_set_txpwr = mt7915_sta_set_txpwr,
1491 .sta_set_4addr = mt7915_sta_set_4addr,
1492 .sta_set_decap_offload = mt7915_sta_set_decap_offload,
1493 .add_twt_setup = mt7915_mac_add_twt_setup,
1494diff --git a/mt7915/mcu.c b/mt7915/mcu.c
1495index 09e3dd8e..36c21596 100644
1496--- a/mt7915/mcu.c
1497+++ b/mt7915/mcu.c
1498@@ -32,6 +32,10 @@
1499 #define HE_PHY(p, c) u8_get_bits(c, IEEE80211_HE_PHY_##p)
1500 #define HE_MAC(m, c) u8_get_bits(c, IEEE80211_HE_MAC_##m)
1501
1502+static bool sr_scene_detect = true;
1503+module_param(sr_scene_detect, bool, 0644);
1504+MODULE_PARM_DESC(sr_scene_detect, "Enable firmware scene detection algorithm");
1505+
1506 static u8
1507 mt7915_mcu_get_sta_nss(u16 mcs_map)
1508 {
1509@@ -595,7 +599,7 @@ mt7915_mcu_muar_config(struct mt7915_phy *phy, struct ieee80211_vif *vif,
1510 .mode = !!mask || enable,
1511 .entry_count = 1,
1512 .write = 1,
1513- .band = phy != &dev->phy,
1514+ .band = phy->band_idx,
1515 .index = idx * 2 + bssid,
1516 };
1517
1518@@ -1131,7 +1135,7 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
1519 mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_160);
1520 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
1521
1522- bf->ncol_bw160 = nss_mcs;
1523+ bf->ncol_gt_bw80 = nss_mcs;
1524 }
1525
1526 if (pe->phy_cap_info[0] &
1527@@ -1139,10 +1143,10 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
1528 mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80p80);
1529 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
1530
1531- if (bf->ncol_bw160)
1532- bf->ncol_bw160 = min_t(u8, bf->ncol_bw160, nss_mcs);
1533+ if (bf->ncol_gt_bw80)
1534+ bf->ncol_gt_bw80 = min_t(u8, bf->ncol_gt_bw80, nss_mcs);
1535 else
1536- bf->ncol_bw160 = nss_mcs;
1537+ bf->ncol_gt_bw80 = nss_mcs;
1538 }
1539
1540 snd_dim = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
1541@@ -1150,7 +1154,7 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
1542 sts = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK,
1543 pe->phy_cap_info[4]);
1544
1545- bf->nrow_bw160 = min_t(int, snd_dim, sts);
1546+ bf->nrow_gt_bw80 = min_t(int, snd_dim, sts);
1547 }
1548
1549 static void
1550@@ -1677,10 +1681,32 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
1551 return ret;
1552 }
1553 out:
1554+ ret = mt76_connac_mcu_sta_wed_update(&dev->mt76, skb);
1555+ if (ret)
1556+ return ret;
1557+
1558 return mt76_mcu_skb_send_msg(&dev->mt76, skb,
1559 MCU_EXT_CMD(STA_REC_UPDATE), true);
1560 }
1561
1562+int mt7915_mcu_wed_enable_rx_stats(struct mt7915_dev *dev)
1563+{
1564+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
1565+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
1566+ struct {
1567+ __le32 args[2];
1568+ } req = {
1569+ .args[0] = cpu_to_le32(1),
1570+ .args[1] = cpu_to_le32(6),
1571+ };
1572+
1573+ return mtk_wed_device_update_msg(wed, MTK_WED_WO_CMD_RXCNT_CTRL,
1574+ &req, sizeof(req));
1575+#else
1576+ return 0;
1577+#endif
1578+}
1579+
1580 int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
1581 struct ieee80211_vif *vif, bool enable)
1582 {
1583@@ -1689,7 +1715,7 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
1584 struct {
1585 struct req_hdr {
1586 u8 omac_idx;
1587- u8 dbdc_idx;
1588+ u8 band_idx;
1589 __le16 tlv_num;
1590 u8 is_tlv_append;
1591 u8 rsv[3];
1592@@ -1698,13 +1724,13 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
1593 __le16 tag;
1594 __le16 len;
1595 u8 active;
1596- u8 dbdc_idx;
1597+ u8 band_idx;
1598 u8 omac_addr[ETH_ALEN];
1599 } __packed tlv;
1600 } data = {
1601 .hdr = {
1602 .omac_idx = mvif->mt76.omac_idx,
1603- .dbdc_idx = mvif->mt76.band_idx,
1604+ .band_idx = mvif->mt76.band_idx,
1605 .tlv_num = cpu_to_le16(1),
1606 .is_tlv_append = 1,
1607 },
1608@@ -1712,7 +1738,7 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
1609 .tag = cpu_to_le16(DEV_INFO_ACTIVE),
1610 .len = cpu_to_le16(sizeof(struct req_tlv)),
1611 .active = enable,
1612- .dbdc_idx = mvif->mt76.band_idx,
1613+ .band_idx = mvif->mt76.band_idx,
1614 },
1615 };
1616
1617@@ -2559,7 +2585,7 @@ mt7915_mcu_background_chain_ctrl(struct mt7915_phy *phy,
1618 req.monitor_central_chan =
1619 ieee80211_frequency_to_channel(chandef->center_freq1);
1620 req.monitor_bw = mt76_connac_chan_bw(chandef);
1621- req.band_idx = phy != &dev->phy;
1622+ req.band_idx = phy->band_idx;
1623 req.scan_mode = 1;
1624 break;
1625 }
1626@@ -2567,7 +2593,7 @@ mt7915_mcu_background_chain_ctrl(struct mt7915_phy *phy,
1627 req.monitor_chan = chandef->chan->hw_value;
1628 req.monitor_central_chan =
1629 ieee80211_frequency_to_channel(chandef->center_freq1);
1630- req.band_idx = phy != &dev->phy;
1631+ req.band_idx = phy->band_idx;
1632 req.scan_mode = 2;
1633 break;
1634 case CH_SWITCH_BACKGROUND_SCAN_STOP:
1635@@ -2971,7 +2997,7 @@ int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
1636 }
1637
1638 for (i = 0; i < 5; i++) {
1639- req[i].band = cpu_to_le32(phy != &dev->phy);
1640+ req[i].band = cpu_to_le32(phy->band_idx);
1641 req[i].offs = cpu_to_le32(offs[i + start]);
1642
1643 if (!is_mt7915(&dev->mt76) && i == 3)
1644@@ -3016,11 +3042,11 @@ int mt7915_mcu_get_temperature(struct mt7915_phy *phy)
1645 struct {
1646 u8 ctrl_id;
1647 u8 action;
1648- u8 dbdc_idx;
1649+ u8 band_idx;
1650 u8 rsv[5];
1651 } req = {
1652 .ctrl_id = THERMAL_SENSOR_TEMP_QUERY,
1653- .dbdc_idx = phy != &dev->phy,
1654+ .band_idx = phy->band_idx,
1655 };
1656
1657 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_CTRL), &req,
1658@@ -3079,6 +3105,88 @@ out:
1659 &req, sizeof(req), false);
1660 }
1661
1662+int mt7915_mcu_set_txpower_frame_min(struct mt7915_phy *phy, s8 txpower)
1663+{
1664+ struct mt7915_dev *dev = phy->dev;
1665+ struct {
1666+ u8 format_id;
1667+ u8 rsv;
1668+ u8 band_idx;
1669+ s8 txpower_min;
1670+ } __packed req = {
1671+ .format_id = TX_POWER_LIMIT_FRAME_MIN,
1672+ .band_idx = phy->band_idx,
1673+ .txpower_min = txpower * 2, /* 0.5db */
1674+ };
1675+
1676+ return mt76_mcu_send_msg(&dev->mt76,
1677+ MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req,
1678+ sizeof(req), true);
1679+}
1680+
1681+int mt7915_mcu_set_txpower_frame(struct mt7915_phy *phy,
1682+ struct ieee80211_vif *vif,
1683+ struct ieee80211_sta *sta, s8 txpower)
1684+{
1685+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
1686+ struct mt7915_dev *dev = phy->dev;
1687+ struct mt76_phy *mphy = phy->mt76;
1688+ struct {
1689+ u8 format_id;
1690+ u8 rsv[3];
1691+ u8 band_idx;
1692+ s8 txpower_max;
1693+ __le16 wcid;
1694+ s8 txpower_offs[48];
1695+ } __packed req = {
1696+ .format_id = TX_POWER_LIMIT_FRAME,
1697+ .band_idx = phy->band_idx,
1698+ .txpower_max = DIV_ROUND_UP(mphy->txpower_cur, 2),
1699+ .wcid = cpu_to_le16(msta->wcid.idx),
1700+ };
1701+ int ret, n_chains = hweight8(mphy->antenna_mask);
1702+ s8 txpower_sku[MT7915_SKU_RATE_NUM];
1703+
1704+ ret = mt7915_mcu_get_txpower_sku(phy, txpower_sku, sizeof(txpower_sku));
1705+ if (ret)
1706+ return ret;
1707+
1708+ txpower = txpower * 2 - mt76_tx_power_nss_delta(n_chains);
1709+ if (txpower > mphy->txpower_cur || txpower < 0)
1710+ return -EINVAL;
1711+
1712+ if (txpower) {
1713+ u32 offs, len, i;
1714+
1715+ if (sta->ht_cap.ht_supported) {
1716+ const u8 *sku_len = mt7915_sku_group_len;
1717+
1718+ offs = sku_len[SKU_CCK] + sku_len[SKU_OFDM];
1719+ len = sku_len[SKU_HT_BW20] + sku_len[SKU_HT_BW40];
1720+
1721+ if (sta->vht_cap.vht_supported) {
1722+ offs += len;
1723+ len = sku_len[SKU_VHT_BW20] * 4;
1724+
1725+ if (sta->he_cap.has_he) {
1726+ offs += len + sku_len[SKU_HE_RU26] * 3;
1727+ len = sku_len[SKU_HE_RU242] * 4;
1728+ }
1729+ }
1730+ } else {
1731+ return -EINVAL;
1732+ }
1733+
1734+ for (i = 0; i < len; i++, offs++)
1735+ req.txpower_offs[i] =
1736+ DIV_ROUND_UP(txpower - txpower_sku[offs], 2);
1737+ }
1738+
1739+ return mt76_mcu_send_msg(&dev->mt76,
1740+ MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req,
1741+ sizeof(req), true);
1742+}
1743+
1744 int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy)
1745 {
1746 struct mt7915_dev *dev = phy->dev;
1747@@ -3087,11 +3195,11 @@ int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy)
1748 struct mt7915_sku_val {
1749 u8 format_id;
1750 u8 limit_type;
1751- u8 dbdc_idx;
1752+ u8 band_idx;
1753 s8 val[MT7915_SKU_RATE_NUM];
1754 } __packed req = {
1755- .format_id = 4,
1756- .dbdc_idx = phy != &dev->phy,
1757+ .format_id = TX_POWER_LIMIT_TABLE,
1758+ .band_idx = phy->band_idx,
1759 };
1760 struct mt76_power_limits limits_array;
1761 s8 *la = (s8 *)&limits_array;
1762@@ -3137,14 +3245,14 @@ int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len)
1763 struct {
1764 u8 format_id;
1765 u8 category;
1766- u8 band;
1767+ u8 band_idx;
1768 u8 _rsv;
1769 } __packed req = {
1770- .format_id = 7,
1771+ .format_id = TX_POWER_LIMIT_INFO,
1772 .category = RATE_POWER_INFO,
1773- .band = phy != &dev->phy,
1774+ .band_idx = phy->band_idx,
1775 };
1776- s8 res[MT7915_SKU_RATE_NUM][2];
1777+ s8 txpower_sku[MT7915_SKU_RATE_NUM][2];
1778 struct sk_buff *skb;
1779 int ret, i;
1780
1781@@ -3154,9 +3262,9 @@ int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len)
1782 if (ret)
1783 return ret;
1784
1785- memcpy(res, skb->data + 4, sizeof(res));
1786+ memcpy(txpower_sku, skb->data + 4, sizeof(txpower_sku));
1787 for (i = 0; i < len; i++)
1788- txpower[i] = res[i][req.band];
1789+ txpower[i] = txpower_sku[i][req.band_idx];
1790
1791 dev_kfree_skb(skb);
1792
1793@@ -3191,11 +3299,11 @@ int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable)
1794 struct mt7915_sku {
1795 u8 format_id;
1796 u8 sku_enable;
1797- u8 dbdc_idx;
1798+ u8 band_idx;
1799 u8 rsv;
1800 } __packed req = {
1801- .format_id = 0,
1802- .dbdc_idx = phy != &dev->phy,
1803+ .format_id = TX_POWER_LIMIT_ENABLE,
1804+ .band_idx = phy->band_idx,
1805 .sku_enable = enable,
1806 };
1807
1808@@ -3270,31 +3378,193 @@ int mt7915_mcu_set_txbf(struct mt7915_dev *dev, u8 action)
1809 sizeof(req), true);
1810 }
1811
1812-int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif,
1813- bool enable)
1814+static int
1815+mt7915_mcu_enable_obss_spr(struct mt7915_phy *phy, u8 action, u8 val)
1816+{
1817+ struct mt7915_dev *dev = phy->dev;
1818+ struct mt7915_mcu_sr_ctrl req = {
1819+ .action = action,
1820+ .argnum = 1,
1821+ .band_idx = phy->band_idx,
1822+ .val = cpu_to_le32(val),
1823+ };
1824+
1825+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req,
1826+ sizeof(req), true);
1827+}
1828+
1829+static int
1830+mt7915_mcu_set_obss_spr_pd(struct mt7915_phy *phy,
1831+ struct ieee80211_he_obss_pd *he_obss_pd)
1832+{
1833+ struct mt7915_dev *dev = phy->dev;
1834+ struct {
1835+ struct mt7915_mcu_sr_ctrl ctrl;
1836+ struct {
1837+ u8 pd_th_non_srg;
1838+ u8 pd_th_srg;
1839+ u8 period_offs;
1840+ u8 rcpi_src;
1841+ __le16 obss_pd_min;
1842+ __le16 obss_pd_min_srg;
1843+ u8 resp_txpwr_mode;
1844+ u8 txpwr_restrict_mode;
1845+ u8 txpwr_ref;
1846+ u8 rsv[3];
1847+ } __packed param;
1848+ } __packed req = {
1849+ .ctrl = {
1850+ .action = SPR_SET_PARAM,
1851+ .argnum = 9,
1852+ .band_idx = phy->band_idx,
1853+ },
1854+ };
1855+ int ret;
1856+ u8 max_th = 82, non_srg_max_th = 62;
1857+
1858+ /* disable firmware dynamical PD asjustment */
1859+ ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_DPD, false);
1860+ if (ret)
1861+ return ret;
1862+
1863+ if (he_obss_pd->sr_ctrl &
1864+ IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED)
1865+ req.param.pd_th_non_srg = max_th;
1866+ else if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
1867+ req.param.pd_th_non_srg = max_th - he_obss_pd->non_srg_max_offset;
1868+ else
1869+ req.param.pd_th_non_srg = non_srg_max_th;
1870+
1871+ if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT)
1872+ req.param.pd_th_srg = max_th - he_obss_pd->max_offset;
1873+
1874+ req.param.obss_pd_min = 82;
1875+ req.param.obss_pd_min_srg = 82;
1876+ req.param.txpwr_restrict_mode = 2;
1877+ req.param.txpwr_ref = 21;
1878+
1879+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req,
1880+ sizeof(req), true);
1881+}
1882+
1883+static int
1884+mt7915_mcu_set_obss_spr_siga(struct mt7915_phy *phy, struct ieee80211_vif *vif,
1885+ struct ieee80211_he_obss_pd *he_obss_pd)
1886 {
1887-#define MT_SPR_ENABLE 1
1888 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1889+ struct mt7915_dev *dev = phy->dev;
1890+ u8 omac = mvif->mt76.omac_idx;
1891 struct {
1892- u8 action;
1893- u8 arg_num;
1894- u8 band_idx;
1895- u8 status;
1896- u8 drop_tx_idx;
1897- u8 sta_idx; /* 256 sta */
1898- u8 rsv[2];
1899- __le32 val;
1900+ struct mt7915_mcu_sr_ctrl ctrl;
1901+ struct {
1902+ u8 omac;
1903+ u8 rsv[3];
1904+ u8 flag[20];
1905+ } __packed siga;
1906 } __packed req = {
1907- .action = MT_SPR_ENABLE,
1908- .arg_num = 1,
1909- .band_idx = mvif->mt76.band_idx,
1910- .val = cpu_to_le32(enable),
1911+ .ctrl = {
1912+ .action = SPR_SET_SIGA,
1913+ .argnum = 1,
1914+ .band_idx = phy->band_idx,
1915+ },
1916+ .siga = {
1917+ .omac = omac > HW_BSSID_MAX ? omac - 12 : omac,
1918+ },
1919 };
1920+ int ret;
1921+
1922+ if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED)
1923+ req.siga.flag[req.siga.omac] = 0xf;
1924+ else
1925+ return 0;
1926+
1927+ /* switch to normal AP mode */
1928+ ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_MODE, 0);
1929+ if (ret)
1930+ return ret;
1931
1932 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req,
1933 sizeof(req), true);
1934 }
1935
1936+static int
1937+mt7915_mcu_set_obss_spr_bitmap(struct mt7915_phy *phy,
1938+ struct ieee80211_he_obss_pd *he_obss_pd)
1939+{
1940+ struct mt7915_dev *dev = phy->dev;
1941+ struct {
1942+ struct mt7915_mcu_sr_ctrl ctrl;
1943+ struct {
1944+ __le32 color_l[2];
1945+ __le32 color_h[2];
1946+ __le32 bssid_l[2];
1947+ __le32 bssid_h[2];
1948+ } __packed bitmap;
1949+ } __packed req = {
1950+ .ctrl = {
1951+ .action = SPR_SET_SRG_BITMAP,
1952+ .argnum = 4,
1953+ .band_idx = phy->band_idx,
1954+ },
1955+ };
1956+ u32 bitmap;
1957+
1958+ memcpy(&bitmap, he_obss_pd->bss_color_bitmap, sizeof(bitmap));
1959+ req.bitmap.color_l[req.ctrl.band_idx] = cpu_to_le32(bitmap);
1960+
1961+ memcpy(&bitmap, he_obss_pd->bss_color_bitmap + 4, sizeof(bitmap));
1962+ req.bitmap.color_h[req.ctrl.band_idx] = cpu_to_le32(bitmap);
1963+
1964+ memcpy(&bitmap, he_obss_pd->partial_bssid_bitmap, sizeof(bitmap));
1965+ req.bitmap.bssid_l[req.ctrl.band_idx] = cpu_to_le32(bitmap);
1966+
1967+ memcpy(&bitmap, he_obss_pd->partial_bssid_bitmap + 4, sizeof(bitmap));
1968+ req.bitmap.bssid_h[req.ctrl.band_idx] = cpu_to_le32(bitmap);
1969+
1970+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req,
1971+ sizeof(req), true);
1972+}
1973+
1974+int mt7915_mcu_add_obss_spr(struct mt7915_phy *phy, struct ieee80211_vif *vif,
1975+ struct ieee80211_he_obss_pd *he_obss_pd)
1976+{
1977+ int ret;
1978+
1979+ /* enable firmware scene detection algorithms */
1980+ ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_SD, sr_scene_detect);
1981+ if (ret)
1982+ return ret;
1983+
1984+ /* enable spatial reuse */
1985+ ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE, he_obss_pd->enable);
1986+ if (ret)
1987+ return ret;
1988+
1989+ if (!he_obss_pd->enable)
1990+ return 0;
1991+
1992+ ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_TX, true);
1993+ if (ret)
1994+ return ret;
1995+
1996+ /* firmware dynamically adjusts PD threshold so skip manual control */
1997+ if (sr_scene_detect)
1998+ return 0;
1999+
2000+ /* set SRG/non-SRG OBSS PD threshold */
2001+ ret = mt7915_mcu_set_obss_spr_pd(phy, he_obss_pd);
2002+ if (ret)
2003+ return ret;
2004+
2005+ /* Set SR prohibit */
2006+ ret = mt7915_mcu_set_obss_spr_siga(phy, vif, he_obss_pd);
2007+ if (ret)
2008+ return ret;
2009+
2010+ /* set SRG BSS color/BSSID bitmap */
2011+ return mt7915_mcu_set_obss_spr_bitmap(phy, he_obss_pd);
2012+}
2013+
2014 int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
2015 struct ieee80211_sta *sta, struct rate_info *rate)
2016 {
2017diff --git a/mt7915/mcu.h b/mt7915/mcu.h
2018index c19b5d66..46c517e5 100644
2019--- a/mt7915/mcu.h
2020+++ b/mt7915/mcu.h
2021@@ -129,6 +129,17 @@ struct mt7915_mcu_background_chain_ctrl {
2022 u8 rsv[2];
2023 } __packed;
2024
2025+struct mt7915_mcu_sr_ctrl {
2026+ u8 action;
2027+ u8 argnum;
2028+ u8 band_idx;
2029+ u8 status;
2030+ u8 drop_ta_idx;
2031+ u8 sta_idx; /* 256 sta */
2032+ u8 rsv[2];
2033+ __le32 val;
2034+} __packed;
2035+
2036 struct mt7915_mcu_eeprom {
2037 u8 buffer_mode;
2038 u8 format;
2039@@ -408,6 +419,25 @@ enum {
2040 #define RATE_CFG_PHY_TYPE GENMASK(27, 24)
2041 #define RATE_CFG_HE_LTF GENMASK(31, 28)
2042
2043+enum {
2044+ TX_POWER_LIMIT_ENABLE,
2045+ TX_POWER_LIMIT_TABLE = 0x4,
2046+ TX_POWER_LIMIT_INFO = 0x7,
2047+ TX_POWER_LIMIT_FRAME = 0x11,
2048+ TX_POWER_LIMIT_FRAME_MIN = 0x12,
2049+};
2050+
2051+enum {
2052+ SPR_ENABLE = 0x1,
2053+ SPR_ENABLE_SD = 0x3,
2054+ SPR_ENABLE_MODE = 0x5,
2055+ SPR_ENABLE_DPD = 0x23,
2056+ SPR_ENABLE_TX = 0x25,
2057+ SPR_SET_SRG_BITMAP = 0x80,
2058+ SPR_SET_PARAM = 0xc2,
2059+ SPR_SET_SIGA = 0xdc,
2060+};
2061+
2062 enum {
2063 THERMAL_PROTECT_PARAMETER_CTRL,
2064 THERMAL_PROTECT_BASIC_INFO,
2065diff --git a/mt7915/mmio.c b/mt7915/mmio.c
2066index 3c840853..3b4ede3b 100644
2067--- a/mt7915/mmio.c
2068+++ b/mt7915/mmio.c
2069@@ -9,107 +9,112 @@
2070 #include "mt7915.h"
2071 #include "mac.h"
2072 #include "../trace.h"
2073+#include "../dma.h"
2074
2075 static bool wed_enable;
2076 module_param(wed_enable, bool, 0644);
2077+MODULE_PARM_DESC(wed_enable, "Enable Wireless Ethernet Dispatch support");
2078
2079 static const u32 mt7915_reg[] = {
2080- [INT_SOURCE_CSR] = 0xd7010,
2081- [INT_MASK_CSR] = 0xd7014,
2082- [INT1_SOURCE_CSR] = 0xd7088,
2083- [INT1_MASK_CSR] = 0xd708c,
2084- [INT_MCU_CMD_SOURCE] = 0xd51f0,
2085- [INT_MCU_CMD_EVENT] = 0x3108,
2086- [WFDMA0_ADDR] = 0xd4000,
2087- [WFDMA0_PCIE1_ADDR] = 0xd8000,
2088- [WFDMA_EXT_CSR_ADDR] = 0xd7000,
2089- [CBTOP1_PHY_END] = 0x77ffffff,
2090- [INFRA_MCU_ADDR_END] = 0x7c3fffff,
2091- [FW_ASSERT_STAT_ADDR] = 0x219848,
2092- [FW_EXCEPT_TYPE_ADDR] = 0x21987c,
2093- [FW_EXCEPT_COUNT_ADDR] = 0x219848,
2094- [FW_CIRQ_COUNT_ADDR] = 0x216f94,
2095- [FW_CIRQ_IDX_ADDR] = 0x216ef8,
2096- [FW_CIRQ_LISR_ADDR] = 0x2170ac,
2097- [FW_TASK_ID_ADDR] = 0x216f90,
2098- [FW_TASK_IDX_ADDR] = 0x216f9c,
2099- [FW_TASK_QID1_ADDR] = 0x219680,
2100- [FW_TASK_QID2_ADDR] = 0x219760,
2101- [FW_TASK_START_ADDR] = 0x219558,
2102- [FW_TASK_END_ADDR] = 0x219554,
2103- [FW_TASK_SIZE_ADDR] = 0x219560,
2104- [FW_LAST_MSG_ID_ADDR] = 0x216f70,
2105- [FW_EINT_INFO_ADDR] = 0x219818,
2106- [FW_SCHED_INFO_ADDR] = 0x219828,
2107- [SWDEF_BASE_ADDR] = 0x41f200,
2108- [TXQ_WED_RING_BASE] = 0xd7300,
2109- [RXQ_WED_RING_BASE] = 0xd7410,
2110+ [INT_SOURCE_CSR] = 0xd7010,
2111+ [INT_MASK_CSR] = 0xd7014,
2112+ [INT1_SOURCE_CSR] = 0xd7088,
2113+ [INT1_MASK_CSR] = 0xd708c,
2114+ [INT_MCU_CMD_SOURCE] = 0xd51f0,
2115+ [INT_MCU_CMD_EVENT] = 0x3108,
2116+ [WFDMA0_ADDR] = 0xd4000,
2117+ [WFDMA0_PCIE1_ADDR] = 0xd8000,
2118+ [WFDMA_EXT_CSR_ADDR] = 0xd7000,
2119+ [CBTOP1_PHY_END] = 0x77ffffff,
2120+ [INFRA_MCU_ADDR_END] = 0x7c3fffff,
2121+ [FW_ASSERT_STAT_ADDR] = 0x219848,
2122+ [FW_EXCEPT_TYPE_ADDR] = 0x21987c,
2123+ [FW_EXCEPT_COUNT_ADDR] = 0x219848,
2124+ [FW_CIRQ_COUNT_ADDR] = 0x216f94,
2125+ [FW_CIRQ_IDX_ADDR] = 0x216ef8,
2126+ [FW_CIRQ_LISR_ADDR] = 0x2170ac,
2127+ [FW_TASK_ID_ADDR] = 0x216f90,
2128+ [FW_TASK_IDX_ADDR] = 0x216f9c,
2129+ [FW_TASK_QID1_ADDR] = 0x219680,
2130+ [FW_TASK_QID2_ADDR] = 0x219760,
2131+ [FW_TASK_START_ADDR] = 0x219558,
2132+ [FW_TASK_END_ADDR] = 0x219554,
2133+ [FW_TASK_SIZE_ADDR] = 0x219560,
2134+ [FW_LAST_MSG_ID_ADDR] = 0x216f70,
2135+ [FW_EINT_INFO_ADDR] = 0x219818,
2136+ [FW_SCHED_INFO_ADDR] = 0x219828,
2137+ [SWDEF_BASE_ADDR] = 0x41f200,
2138+ [TXQ_WED_RING_BASE] = 0xd7300,
2139+ [RXQ_WED_RING_BASE] = 0xd7410,
2140+ [RXQ_WED_DATA_RING_BASE] = 0xd4500,
2141 };
2142
2143 static const u32 mt7916_reg[] = {
2144- [INT_SOURCE_CSR] = 0xd4200,
2145- [INT_MASK_CSR] = 0xd4204,
2146- [INT1_SOURCE_CSR] = 0xd8200,
2147- [INT1_MASK_CSR] = 0xd8204,
2148- [INT_MCU_CMD_SOURCE] = 0xd41f0,
2149- [INT_MCU_CMD_EVENT] = 0x2108,
2150- [WFDMA0_ADDR] = 0xd4000,
2151- [WFDMA0_PCIE1_ADDR] = 0xd8000,
2152- [WFDMA_EXT_CSR_ADDR] = 0xd7000,
2153- [CBTOP1_PHY_END] = 0x7fffffff,
2154- [INFRA_MCU_ADDR_END] = 0x7c085fff,
2155- [FW_ASSERT_STAT_ADDR] = 0x02204c14,
2156- [FW_EXCEPT_TYPE_ADDR] = 0x022051a4,
2157- [FW_EXCEPT_COUNT_ADDR] = 0x022050bc,
2158- [FW_CIRQ_COUNT_ADDR] = 0x022001ac,
2159- [FW_CIRQ_IDX_ADDR] = 0x02204f84,
2160- [FW_CIRQ_LISR_ADDR] = 0x022050d0,
2161- [FW_TASK_ID_ADDR] = 0x0220406c,
2162- [FW_TASK_IDX_ADDR] = 0x0220500c,
2163- [FW_TASK_QID1_ADDR] = 0x022028c8,
2164- [FW_TASK_QID2_ADDR] = 0x02202a38,
2165- [FW_TASK_START_ADDR] = 0x0220286c,
2166- [FW_TASK_END_ADDR] = 0x02202870,
2167- [FW_TASK_SIZE_ADDR] = 0x02202878,
2168- [FW_LAST_MSG_ID_ADDR] = 0x02204fe8,
2169- [FW_EINT_INFO_ADDR] = 0x0220525c,
2170- [FW_SCHED_INFO_ADDR] = 0x0220516c,
2171- [SWDEF_BASE_ADDR] = 0x411400,
2172- [TXQ_WED_RING_BASE] = 0xd7300,
2173- [RXQ_WED_RING_BASE] = 0xd7410,
2174+ [INT_SOURCE_CSR] = 0xd4200,
2175+ [INT_MASK_CSR] = 0xd4204,
2176+ [INT1_SOURCE_CSR] = 0xd8200,
2177+ [INT1_MASK_CSR] = 0xd8204,
2178+ [INT_MCU_CMD_SOURCE] = 0xd41f0,
2179+ [INT_MCU_CMD_EVENT] = 0x2108,
2180+ [WFDMA0_ADDR] = 0xd4000,
2181+ [WFDMA0_PCIE1_ADDR] = 0xd8000,
2182+ [WFDMA_EXT_CSR_ADDR] = 0xd7000,
2183+ [CBTOP1_PHY_END] = 0x7fffffff,
2184+ [INFRA_MCU_ADDR_END] = 0x7c085fff,
2185+ [FW_ASSERT_STAT_ADDR] = 0x02204c14,
2186+ [FW_EXCEPT_TYPE_ADDR] = 0x022051a4,
2187+ [FW_EXCEPT_COUNT_ADDR] = 0x022050bc,
2188+ [FW_CIRQ_COUNT_ADDR] = 0x022001ac,
2189+ [FW_CIRQ_IDX_ADDR] = 0x02204f84,
2190+ [FW_CIRQ_LISR_ADDR] = 0x022050d0,
2191+ [FW_TASK_ID_ADDR] = 0x0220406c,
2192+ [FW_TASK_IDX_ADDR] = 0x0220500c,
2193+ [FW_TASK_QID1_ADDR] = 0x022028c8,
2194+ [FW_TASK_QID2_ADDR] = 0x02202a38,
2195+ [FW_TASK_START_ADDR] = 0x0220286c,
2196+ [FW_TASK_END_ADDR] = 0x02202870,
2197+ [FW_TASK_SIZE_ADDR] = 0x02202878,
2198+ [FW_LAST_MSG_ID_ADDR] = 0x02204fe8,
2199+ [FW_EINT_INFO_ADDR] = 0x0220525c,
2200+ [FW_SCHED_INFO_ADDR] = 0x0220516c,
2201+ [SWDEF_BASE_ADDR] = 0x411400,
2202+ [TXQ_WED_RING_BASE] = 0xd7300,
2203+ [RXQ_WED_RING_BASE] = 0xd7410,
2204+ [RXQ_WED_DATA_RING_BASE] = 0xd4540,
2205 };
2206
2207 static const u32 mt7986_reg[] = {
2208- [INT_SOURCE_CSR] = 0x24200,
2209- [INT_MASK_CSR] = 0x24204,
2210- [INT1_SOURCE_CSR] = 0x28200,
2211- [INT1_MASK_CSR] = 0x28204,
2212- [INT_MCU_CMD_SOURCE] = 0x241f0,
2213- [INT_MCU_CMD_EVENT] = 0x54000108,
2214- [WFDMA0_ADDR] = 0x24000,
2215- [WFDMA0_PCIE1_ADDR] = 0x28000,
2216- [WFDMA_EXT_CSR_ADDR] = 0x27000,
2217- [CBTOP1_PHY_END] = 0x7fffffff,
2218- [INFRA_MCU_ADDR_END] = 0x7c085fff,
2219- [FW_ASSERT_STAT_ADDR] = 0x02204b54,
2220- [FW_EXCEPT_TYPE_ADDR] = 0x022050dc,
2221- [FW_EXCEPT_COUNT_ADDR] = 0x02204ffc,
2222- [FW_CIRQ_COUNT_ADDR] = 0x022001ac,
2223- [FW_CIRQ_IDX_ADDR] = 0x02204ec4,
2224- [FW_CIRQ_LISR_ADDR] = 0x02205010,
2225- [FW_TASK_ID_ADDR] = 0x02204fac,
2226- [FW_TASK_IDX_ADDR] = 0x02204f4c,
2227- [FW_TASK_QID1_ADDR] = 0x02202814,
2228- [FW_TASK_QID2_ADDR] = 0x02202984,
2229- [FW_TASK_START_ADDR] = 0x022027b8,
2230- [FW_TASK_END_ADDR] = 0x022027bc,
2231- [FW_TASK_SIZE_ADDR] = 0x022027c4,
2232- [FW_LAST_MSG_ID_ADDR] = 0x02204f28,
2233- [FW_EINT_INFO_ADDR] = 0x02205194,
2234- [FW_SCHED_INFO_ADDR] = 0x022051a4,
2235- [SWDEF_BASE_ADDR] = 0x411400,
2236- [TXQ_WED_RING_BASE] = 0x24420,
2237- [RXQ_WED_RING_BASE] = 0x24520,
2238+ [INT_SOURCE_CSR] = 0x24200,
2239+ [INT_MASK_CSR] = 0x24204,
2240+ [INT1_SOURCE_CSR] = 0x28200,
2241+ [INT1_MASK_CSR] = 0x28204,
2242+ [INT_MCU_CMD_SOURCE] = 0x241f0,
2243+ [INT_MCU_CMD_EVENT] = 0x54000108,
2244+ [WFDMA0_ADDR] = 0x24000,
2245+ [WFDMA0_PCIE1_ADDR] = 0x28000,
2246+ [WFDMA_EXT_CSR_ADDR] = 0x27000,
2247+ [CBTOP1_PHY_END] = 0x7fffffff,
2248+ [INFRA_MCU_ADDR_END] = 0x7c085fff,
2249+ [FW_ASSERT_STAT_ADDR] = 0x02204b54,
2250+ [FW_EXCEPT_TYPE_ADDR] = 0x022050dc,
2251+ [FW_EXCEPT_COUNT_ADDR] = 0x02204ffc,
2252+ [FW_CIRQ_COUNT_ADDR] = 0x022001ac,
2253+ [FW_CIRQ_IDX_ADDR] = 0x02204ec4,
2254+ [FW_CIRQ_LISR_ADDR] = 0x02205010,
2255+ [FW_TASK_ID_ADDR] = 0x02204fac,
2256+ [FW_TASK_IDX_ADDR] = 0x02204f4c,
2257+ [FW_TASK_QID1_ADDR] = 0x02202814,
2258+ [FW_TASK_QID2_ADDR] = 0x02202984,
2259+ [FW_TASK_START_ADDR] = 0x022027b8,
2260+ [FW_TASK_END_ADDR] = 0x022027bc,
2261+ [FW_TASK_SIZE_ADDR] = 0x022027c4,
2262+ [FW_LAST_MSG_ID_ADDR] = 0x02204f28,
2263+ [FW_EINT_INFO_ADDR] = 0x02205194,
2264+ [FW_SCHED_INFO_ADDR] = 0x022051a4,
2265+ [SWDEF_BASE_ADDR] = 0x411400,
2266+ [TXQ_WED_RING_BASE] = 0x24420,
2267+ [RXQ_WED_RING_BASE] = 0x24520,
2268+ [RXQ_WED_DATA_RING_BASE] = 0x24540,
2269 };
2270
2271 static const u32 mt7915_offs[] = {
2272@@ -585,6 +590,105 @@ static void mt7915_mmio_wed_offload_disable(struct mtk_wed_device *wed)
2273 mt76_clear(dev, MT_AGG_ACR4(phy->band_idx),
2274 MT_AGG_ACR_PPDU_TXS2H);
2275 }
2276+
2277+static void mt7915_wed_release_rx_buf(struct mtk_wed_device *wed)
2278+{
2279+ struct mt7915_dev *dev;
2280+ struct page *page;
2281+ int i;
2282+
2283+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
2284+ for (i = 0; i < dev->mt76.rx_token_size; i++) {
2285+ struct mt76_txwi_cache *t;
2286+
2287+ t = mt76_rx_token_release(&dev->mt76, i);
2288+ if (!t || !t->ptr)
2289+ continue;
2290+
2291+ dma_unmap_single(dev->mt76.dma_dev, t->dma_addr,
2292+ wed->wlan.rx_size, DMA_FROM_DEVICE);
2293+ skb_free_frag(t->ptr);
2294+ t->ptr = NULL;
2295+
2296+ mt76_put_rxwi(&dev->mt76, t);
2297+ }
2298+
2299+ if (!wed->rx_buf_ring.rx_page.va)
2300+ return;
2301+
2302+ page = virt_to_page(wed->rx_buf_ring.rx_page.va);
2303+ __page_frag_cache_drain(page, wed->rx_buf_ring.rx_page.pagecnt_bias);
2304+ memset(&wed->rx_buf_ring.rx_page, 0, sizeof(wed->rx_buf_ring.rx_page));
2305+}
2306+
2307+static u32 mt7915_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
2308+{
2309+ struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
2310+ struct mt7915_dev *dev;
2311+ u32 length;
2312+ int i;
2313+
2314+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
2315+ length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
2316+ sizeof(struct skb_shared_info));
2317+
2318+ for (i = 0; i < size; i++) {
2319+ struct mt76_txwi_cache *t = mt76_get_rxwi(&dev->mt76);
2320+ dma_addr_t phy_addr;
2321+ int token;
2322+ void *ptr;
2323+
2324+ ptr = page_frag_alloc(&wed->rx_buf_ring.rx_page, length,
2325+ GFP_KERNEL);
2326+ if (!ptr)
2327+ goto unmap;
2328+
2329+ phy_addr = dma_map_single(dev->mt76.dma_dev, ptr,
2330+ wed->wlan.rx_size,
2331+ DMA_TO_DEVICE);
2332+ if (unlikely(dma_mapping_error(dev->mt76.dev, phy_addr))) {
2333+ skb_free_frag(ptr);
2334+ goto unmap;
2335+ }
2336+
2337+ desc->buf0 = cpu_to_le32(phy_addr);
2338+ token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr);
2339+ desc->token |= cpu_to_le32(FIELD_PREP(MT_DMA_CTL_TOKEN,
2340+ token));
2341+ desc++;
2342+ }
2343+
2344+ return 0;
2345+
2346+unmap:
2347+ mt7915_wed_release_rx_buf(wed);
2348+ return -ENOMEM;
2349+}
2350+
2351+static void mt7915_mmio_wed_update_rx_stats(struct mtk_wed_device *wed,
2352+ struct mtk_wed_wo_rx_stats *stats)
2353+{
2354+ int idx = le16_to_cpu(stats->wlan_idx);
2355+ struct mt7915_dev *dev;
2356+ struct mt76_wcid *wcid;
2357+
2358+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
2359+
2360+ if (idx >= mt7915_wtbl_size(dev))
2361+ return;
2362+
2363+ rcu_read_lock();
2364+
2365+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
2366+ if (wcid) {
2367+ wcid->stats.rx_bytes += le32_to_cpu(stats->rx_byte_cnt);
2368+ wcid->stats.rx_packets += le32_to_cpu(stats->rx_pkt_cnt);
2369+ wcid->stats.rx_errors += le32_to_cpu(stats->rx_err_cnt);
2370+ wcid->stats.rx_drops += le32_to_cpu(stats->rx_drop_cnt);
2371+ }
2372+
2373+ rcu_read_unlock();
2374+}
2375 #endif
2376
2377 int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
2378@@ -602,6 +706,10 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
2379
2380 wed->wlan.pci_dev = pci_dev;
2381 wed->wlan.bus_type = MTK_WED_BUS_PCIE;
2382+ wed->wlan.base = devm_ioremap(dev->mt76.dev,
2383+ pci_resource_start(pci_dev, 0),
2384+ pci_resource_len(pci_dev, 0));
2385+ wed->wlan.phy_base = pci_resource_start(pci_dev, 0);
2386 wed->wlan.wpdma_int = pci_resource_start(pci_dev, 0) +
2387 MT_INT_WED_SOURCE_CSR;
2388 wed->wlan.wpdma_mask = pci_resource_start(pci_dev, 0) +
2389@@ -612,6 +720,10 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
2390 MT_TXQ_WED_RING_BASE;
2391 wed->wlan.wpdma_txfree = pci_resource_start(pci_dev, 0) +
2392 MT_RXQ_WED_RING_BASE;
2393+ wed->wlan.wpdma_rx_glo = pci_resource_start(pci_dev, 0) +
2394+ MT_WPDMA_GLO_CFG;
2395+ wed->wlan.wpdma_rx = pci_resource_start(pci_dev, 0) +
2396+ MT_RXQ_WED_DATA_RING_BASE;
2397 } else {
2398 struct platform_device *plat_dev = pdev_ptr;
2399 struct resource *res;
2400@@ -622,19 +734,45 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
2401
2402 wed->wlan.platform_dev = plat_dev;
2403 wed->wlan.bus_type = MTK_WED_BUS_AXI;
2404+ wed->wlan.base = devm_ioremap(dev->mt76.dev, res->start,
2405+ resource_size(res));
2406+ wed->wlan.phy_base = res->start;
2407 wed->wlan.wpdma_int = res->start + MT_INT_SOURCE_CSR;
2408 wed->wlan.wpdma_mask = res->start + MT_INT_MASK_CSR;
2409 wed->wlan.wpdma_tx = res->start + MT_TXQ_WED_RING_BASE;
2410 wed->wlan.wpdma_txfree = res->start + MT_RXQ_WED_RING_BASE;
2411+ wed->wlan.wpdma_rx_glo = res->start + MT_WPDMA_GLO_CFG;
2412+ wed->wlan.wpdma_rx = res->start + MT_RXQ_WED_DATA_RING_BASE;
2413 }
2414 wed->wlan.nbuf = 4096;
2415 wed->wlan.tx_tbit[0] = is_mt7915(&dev->mt76) ? 4 : 30;
2416 wed->wlan.tx_tbit[1] = is_mt7915(&dev->mt76) ? 5 : 31;
2417- wed->wlan.txfree_tbit = is_mt7915(&dev->mt76) ? 1 : 2;
2418+ wed->wlan.txfree_tbit = is_mt7986(&dev->mt76) ? 2 : 1;
2419 wed->wlan.token_start = MT7915_TOKEN_SIZE - wed->wlan.nbuf;
2420+ wed->wlan.wcid_512 = !is_mt7915(&dev->mt76);
2421+
2422+ wed->wlan.rx_nbuf = 65536;
2423+ wed->wlan.rx_npkt = MT7915_WED_RX_TOKEN_SIZE;
2424+ wed->wlan.rx_size = SKB_WITH_OVERHEAD(MT_RX_BUF_SIZE);
2425+ if (is_mt7915(&dev->mt76)) {
2426+ wed->wlan.rx_tbit[0] = 16;
2427+ wed->wlan.rx_tbit[1] = 17;
2428+ } else if (is_mt7986(&dev->mt76)) {
2429+ wed->wlan.rx_tbit[0] = 22;
2430+ wed->wlan.rx_tbit[1] = 23;
2431+ } else {
2432+ wed->wlan.rx_tbit[0] = 18;
2433+ wed->wlan.rx_tbit[1] = 19;
2434+ }
2435+
2436 wed->wlan.init_buf = mt7915_wed_init_buf;
2437 wed->wlan.offload_enable = mt7915_mmio_wed_offload_enable;
2438 wed->wlan.offload_disable = mt7915_mmio_wed_offload_disable;
2439+ wed->wlan.init_rx_buf = mt7915_wed_init_rx_buf;
2440+ wed->wlan.release_rx_buf = mt7915_wed_release_rx_buf;
2441+ wed->wlan.update_wo_rx_stats = mt7915_mmio_wed_update_rx_stats;
2442+
2443+ dev->mt76.rx_token_size = wed->wlan.rx_npkt;
2444
2445 if (mtk_wed_device_attach(wed))
2446 return 0;
2447diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
2448index 9cb680e7..42f21343 100644
2449--- a/mt7915/mt7915.h
2450+++ b/mt7915/mt7915.h
2451@@ -68,6 +68,8 @@
2452 #define MT7915_MIN_TWT_DUR 64
2453 #define MT7915_MAX_QUEUE (MT_RXQ_BAND2 + __MT_MCUQ_MAX + 2)
2454
2455+#define MT7915_WED_RX_TOKEN_SIZE 12288
2456+
2457 struct mt7915_vif;
2458 struct mt7915_sta;
2459 struct mt7915_dfs_pulse;
2460@@ -501,8 +503,8 @@ int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vi
2461 struct cfg80211_he_bss_color *he_bss_color);
2462 int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2463 int enable, u32 changed);
2464-int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif,
2465- bool enable);
2466+int mt7915_mcu_add_obss_spr(struct mt7915_phy *phy, struct ieee80211_vif *vif,
2467+ struct ieee80211_he_obss_pd *he_obss_pd);
2468 int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
2469 struct ieee80211_sta *sta, bool changed);
2470 int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
2471@@ -526,6 +528,10 @@ int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band);
2472 int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable);
2473 int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy);
2474 int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len);
2475+int mt7915_mcu_set_txpower_frame_min(struct mt7915_phy *phy, s8 txpower);
2476+int mt7915_mcu_set_txpower_frame(struct mt7915_phy *phy,
2477+ struct ieee80211_vif *vif,
2478+ struct ieee80211_sta *sta, s8 txpower);
2479 int mt7915_mcu_set_txbf(struct mt7915_dev *dev, u8 action);
2480 int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val);
2481 int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev,
2482@@ -617,7 +623,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
2483 struct mt76_tx_info *tx_info);
2484 void mt7915_tx_token_put(struct mt7915_dev *dev);
2485 void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
2486- struct sk_buff *skb);
2487+ struct sk_buff *skb, u32 *info);
2488 bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len);
2489 void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
2490 void mt7915_stats_work(struct work_struct *work);
2491@@ -628,6 +634,7 @@ void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy);
2492 void mt7915_update_channel(struct mt76_phy *mphy);
2493 int mt7915_mcu_muru_debug_set(struct mt7915_dev *dev, bool enable);
2494 int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms);
2495+int mt7915_mcu_wed_enable_rx_stats(struct mt7915_dev *dev);
2496 int mt7915_init_debugfs(struct mt7915_phy *phy);
2497 void mt7915_debugfs_rx_fw_monitor(struct mt7915_dev *dev, const void *data, int len);
2498 bool mt7915_debugfs_rx_log(struct mt7915_dev *dev, const void *data, int len);
2499diff --git a/mt7915/regs.h b/mt7915/regs.h
2500index 0c61f125..aca1b2f1 100644
2501--- a/mt7915/regs.h
2502+++ b/mt7915/regs.h
2503@@ -43,6 +43,7 @@ enum reg_rev {
2504 SWDEF_BASE_ADDR,
2505 TXQ_WED_RING_BASE,
2506 RXQ_WED_RING_BASE,
2507+ RXQ_WED_DATA_RING_BASE,
2508 __MT_REG_MAX,
2509 };
2510
2511@@ -588,9 +589,14 @@ enum offs_rev {
2512 #define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
2513
2514 #define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c)
2515+
2516+#define MT_WFDMA0_EXT0_CFG MT_WFDMA0(0x2b0)
2517+#define MT_WFDMA0_EXT0_RXWB_KEEP BIT(10)
2518+
2519 #define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0)
2520 #define MT_WFDMA0_PRI_DLY_INT_CFG1 MT_WFDMA0(0x2f4)
2521 #define MT_WFDMA0_PRI_DLY_INT_CFG2 MT_WFDMA0(0x2f8)
2522+#define MT_WPDMA_GLO_CFG MT_WFDMA0(0x208)
2523
2524 /* WFDMA1 */
2525 #define MT_WFDMA1_BASE 0xd5000
2526@@ -686,6 +692,7 @@ enum offs_rev {
2527
2528 #define MT_TXQ_WED_RING_BASE __REG(TXQ_WED_RING_BASE)
2529 #define MT_RXQ_WED_RING_BASE __REG(RXQ_WED_RING_BASE)
2530+#define MT_RXQ_WED_DATA_RING_BASE __REG(RXQ_WED_DATA_RING_BASE)
2531
2532 #define MT_INT_SOURCE_CSR __REG(INT_SOURCE_CSR)
2533 #define MT_INT_MASK_CSR __REG(INT_MASK_CSR)
2534@@ -1179,6 +1186,10 @@ enum offs_rev {
2535 #define MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY BIT(18)
2536 #define MT_WF_PHY_RXTD12_IRPI_SW_CLR BIT(29)
2537
2538+#define MT_WF_PHY_TPC_CTRL_STAT(_phy) MT_WF_PHY(0xe7a0 + ((_phy) << 16))
2539+#define MT_WF_PHY_TPC_CTRL_STAT_MT7916(_phy) MT_WF_PHY(0xe7a0 + ((_phy) << 20))
2540+#define MT_WF_PHY_TPC_POWER GENMASK(15, 8)
2541+
2542 #define MT_MCU_WM_CIRQ_BASE 0x89010000
2543 #define MT_MCU_WM_CIRQ(ofs) (MT_MCU_WM_CIRQ_BASE + (ofs))
2544 #define MT_MCU_WM_CIRQ_IRQ_MASK_CLR_ADDR MT_MCU_WM_CIRQ(0x80)
2545diff --git a/mt7915/testmode.c b/mt7915/testmode.c
2546index a979460f..7ace05e0 100644
2547--- a/mt7915/testmode.c
2548+++ b/mt7915/testmode.c
2549@@ -44,14 +44,14 @@ mt7915_tm_set_tx_power(struct mt7915_phy *phy)
2550 int ret;
2551 struct {
2552 u8 format_id;
2553- u8 dbdc_idx;
2554+ u8 band_idx;
2555 s8 tx_power;
2556 u8 ant_idx; /* Only 0 is valid */
2557 u8 center_chan;
2558 u8 rsv[3];
2559 } __packed req = {
2560 .format_id = 0xf,
2561- .dbdc_idx = phy != &dev->phy,
2562+ .band_idx = phy->band_idx,
2563 .center_chan = ieee80211_frequency_to_channel(freq),
2564 };
2565 u8 *tx_power = NULL;
2566@@ -77,7 +77,7 @@ mt7915_tm_set_freq_offset(struct mt7915_phy *phy, bool en, u32 val)
2567 struct mt7915_tm_cmd req = {
2568 .testmode_en = en,
2569 .param_idx = MCU_ATE_SET_FREQ_OFFSET,
2570- .param.freq.band = phy != &dev->phy,
2571+ .param.freq.band = phy->band_idx,
2572 .param.freq.freq_offset = cpu_to_le32(val),
2573 };
2574
2575@@ -111,7 +111,7 @@ mt7915_tm_set_trx(struct mt7915_phy *phy, int type, bool en)
2576 .param_idx = MCU_ATE_SET_TRX,
2577 .param.trx.type = type,
2578 .param.trx.enable = en,
2579- .param.trx.band = phy != &dev->phy,
2580+ .param.trx.band = phy->band_idx,
2581 };
2582
2583 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
2584@@ -126,7 +126,7 @@ mt7915_tm_clean_hwq(struct mt7915_phy *phy, u8 wcid)
2585 .testmode_en = 1,
2586 .param_idx = MCU_ATE_CLEAN_TXQUEUE,
2587 .param.clean.wcid = wcid,
2588- .param.clean.band = phy != &dev->phy,
2589+ .param.clean.band = phy->band_idx,
2590 };
2591
2592 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
2593@@ -144,7 +144,7 @@ mt7915_tm_set_slot_time(struct mt7915_phy *phy, u8 slot_time, u8 sifs)
2594 .param.slot.sifs = sifs,
2595 .param.slot.rifs = 2,
2596 .param.slot.eifs = cpu_to_le16(60),
2597- .param.slot.band = phy != &dev->phy,
2598+ .param.slot.band = phy->band_idx,
2599 };
2600
2601 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
2602@@ -488,7 +488,7 @@ mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
2603 mt7915_tm_update_channel(phy);
2604
2605 /* read-clear */
2606- mt76_rr(dev, MT_MIB_SDR3(phy != &dev->phy));
2607+ mt76_rr(dev, MT_MIB_SDR3(phy->band_idx));
2608 mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
2609 }
2610 }
2611@@ -526,7 +526,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
2612 tx_cont->control_ch = chandef->chan->hw_value;
2613 tx_cont->center_ch = freq1;
2614 tx_cont->tx_ant = td->tx_antenna_mask;
2615- tx_cont->band = phy != &dev->phy;
2616+ tx_cont->band = phy->band_idx;
2617
2618 switch (chandef->width) {
2619 case NL80211_CHAN_WIDTH_40:
2620@@ -558,7 +558,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
2621 }
2622
2623 if (!en) {
2624- req.op.rf.param.func_data = cpu_to_le32(phy != &dev->phy);
2625+ req.op.rf.param.func_data = cpu_to_le32(phy->band_idx);
2626 goto out;
2627 }
2628
2629diff --git a/mt7921/init.c b/mt7921/init.c
2630index 739d18fc..e42cb6be 100644
2631--- a/mt7921/init.c
2632+++ b/mt7921/init.c
2633@@ -2,6 +2,7 @@
2634 /* Copyright (C) 2020 MediaTek Inc. */
2635
2636 #include <linux/etherdevice.h>
2637+#include <linux/firmware.h>
2638 #include "mt7921.h"
2639 #include "mac.h"
2640 #include "mcu.h"
2641@@ -37,6 +38,7 @@ mt7921_regd_notifier(struct wiphy *wiphy,
2642
2643 memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
2644 dev->mt76.region = request->dfs_region;
2645+ dev->country_ie_env = request->country_ie_env;
2646
2647 mt7921_mutex_acquire(dev);
2648 mt7921_mcu_set_clc(dev, request->alpha2, request->country_ie_env);
2649@@ -65,12 +67,18 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
2650 hw->sta_data_size = sizeof(struct mt7921_sta);
2651 hw->vif_data_size = sizeof(struct mt7921_vif);
2652
2653+ if (dev->fw_features & MT7921_FW_CAP_CNM)
2654+ wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
2655+ else
2656+ wiphy->flags &= ~WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
2657+
2658 wiphy->iface_combinations = if_comb;
2659 wiphy->flags &= ~(WIPHY_FLAG_IBSS_RSN | WIPHY_FLAG_4ADDR_AP |
2660 WIPHY_FLAG_4ADDR_STATION);
2661 wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
2662 BIT(NL80211_IFTYPE_AP);
2663 wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
2664+ wiphy->max_remain_on_channel_duration = 5000;
2665 wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN;
2666 wiphy->max_scan_ssids = 4;
2667 wiphy->max_sched_scan_plan_interval =
2668@@ -129,6 +137,58 @@ mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
2669 mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
2670 }
2671
2672+u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm)
2673+{
2674+ struct mt7921_fw_features *features = NULL;
2675+ const struct mt76_connac2_fw_trailer *hdr;
2676+ struct mt7921_realease_info *rel_info;
2677+ const struct firmware *fw;
2678+ int ret, i, offset = 0;
2679+ const u8 *data, *end;
2680+
2681+ ret = request_firmware(&fw, fw_wm, dev);
2682+ if (ret)
2683+ return ret;
2684+
2685+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
2686+ dev_err(dev, "Invalid firmware\n");
2687+ return -EINVAL;
2688+ }
2689+
2690+ data = fw->data;
2691+ hdr = (const void *)(fw->data + fw->size - sizeof(*hdr));
2692+
2693+ for (i = 0; i < hdr->n_region; i++) {
2694+ const struct mt76_connac2_fw_region *region;
2695+
2696+ region = (const void *)((const u8 *)hdr -
2697+ (hdr->n_region - i) * sizeof(*region));
2698+ offset += le32_to_cpu(region->len);
2699+ }
2700+
2701+ data += offset + 16;
2702+ rel_info = (struct mt7921_realease_info *)data;
2703+ data += sizeof(*rel_info);
2704+ end = data + le16_to_cpu(rel_info->len);
2705+
2706+ while (data < end) {
2707+ rel_info = (struct mt7921_realease_info *)data;
2708+ data += sizeof(*rel_info);
2709+
2710+ if (rel_info->tag == MT7921_FW_TAG_FEATURE) {
2711+ features = (struct mt7921_fw_features *)data;
2712+ break;
2713+ }
2714+
2715+ data += le16_to_cpu(rel_info->len) + rel_info->pad_len;
2716+ }
2717+
2718+ release_firmware(fw);
2719+
2720+ return features ? features->data : 0;
2721+}
2722+EXPORT_SYMBOL_GPL(mt7921_check_offload_capability);
2723+
2724 int mt7921_mac_init(struct mt7921_dev *dev)
2725 {
2726 int i;
2727@@ -278,6 +338,10 @@ int mt7921_register_device(struct mt7921_dev *dev)
2728 INIT_WORK(&dev->reset_work, mt7921_mac_reset_work);
2729 INIT_WORK(&dev->init_work, mt7921_init_work);
2730
2731+ INIT_WORK(&dev->phy.roc_work, mt7921_roc_work);
2732+ timer_setup(&dev->phy.roc_timer, mt7921_roc_timer, 0);
2733+ init_waitqueue_head(&dev->phy.roc_wait);
2734+
2735 dev->pm.idle_timeout = MT7921_PM_TIMEOUT;
2736 dev->pm.stats.last_wake_event = jiffies;
2737 dev->pm.stats.last_doze_event = jiffies;
2738diff --git a/mt7921/mac.c b/mt7921/mac.c
2739index 7b15193c..639614b0 100644
2740--- a/mt7921/mac.c
2741+++ b/mt7921/mac.c
2742@@ -692,7 +692,7 @@ bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len)
2743 EXPORT_SYMBOL_GPL(mt7921_rx_check);
2744
2745 void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
2746- struct sk_buff *skb)
2747+ struct sk_buff *skb, u32 *info)
2748 {
2749 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
2750 __le32 *rxd = (__le32 *)skb->data;
2751diff --git a/mt7921/main.c b/mt7921/main.c
2752index 00085b12..1b7219e3 100644
2753--- a/mt7921/main.c
2754+++ b/mt7921/main.c
2755@@ -385,6 +385,116 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
2756 mt76_packet_id_flush(&dev->mt76, &msta->wcid);
2757 }
2758
2759+static void mt7921_roc_iter(void *priv, u8 *mac,
2760+ struct ieee80211_vif *vif)
2761+{
2762+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
2763+ struct mt7921_phy *phy = priv;
2764+
2765+ mt7921_mcu_abort_roc(phy, mvif, phy->roc_token_id);
2766+}
2767+
2768+void mt7921_roc_work(struct work_struct *work)
2769+{
2770+ struct mt7921_phy *phy;
2771+
2772+ phy = (struct mt7921_phy *)container_of(work, struct mt7921_phy,
2773+ roc_work);
2774+
2775+ if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
2776+ return;
2777+
2778+ mt7921_mutex_acquire(phy->dev);
2779+ ieee80211_iterate_active_interfaces(phy->mt76->hw,
2780+ IEEE80211_IFACE_ITER_RESUME_ALL,
2781+ mt7921_roc_iter, phy);
2782+ mt7921_mutex_release(phy->dev);
2783+ ieee80211_remain_on_channel_expired(phy->mt76->hw);
2784+}
2785+
2786+void mt7921_roc_timer(struct timer_list *timer)
2787+{
2788+ struct mt7921_phy *phy = from_timer(phy, timer, roc_timer);
2789+
2790+ ieee80211_queue_work(phy->mt76->hw, &phy->roc_work);
2791+}
2792+
2793+static int mt7921_abort_roc(struct mt7921_phy *phy, struct mt7921_vif *vif)
2794+{
2795+ int err;
2796+
2797+ if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
2798+ return 0;
2799+
2800+ del_timer_sync(&phy->roc_timer);
2801+ cancel_work_sync(&phy->roc_work);
2802+ err = mt7921_mcu_abort_roc(phy, vif, phy->roc_token_id);
2803+ clear_bit(MT76_STATE_ROC, &phy->mt76->state);
2804+
2805+ return err;
2806+}
2807+
2808+static int mt7921_set_roc(struct mt7921_phy *phy,
2809+ struct mt7921_vif *vif,
2810+ struct ieee80211_channel *chan,
2811+ int duration,
2812+ enum mt7921_roc_req type)
2813+{
2814+ int err;
2815+
2816+ if (test_and_set_bit(MT76_STATE_ROC, &phy->mt76->state))
2817+ return -EBUSY;
2818+
2819+ phy->roc_grant = false;
2820+
2821+ err = mt7921_mcu_set_roc(phy, vif, chan, duration, type,
2822+ ++phy->roc_token_id);
2823+ if (err < 0) {
2824+ clear_bit(MT76_STATE_ROC, &phy->mt76->state);
2825+ goto out;
2826+ }
2827+
2828+ if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, HZ)) {
2829+ mt7921_mcu_abort_roc(phy, vif, phy->roc_token_id);
2830+ clear_bit(MT76_STATE_ROC, &phy->mt76->state);
2831+ err = -ETIMEDOUT;
2832+ }
2833+
2834+out:
2835+ return err;
2836+}
2837+
2838+static int mt7921_remain_on_channel(struct ieee80211_hw *hw,
2839+ struct ieee80211_vif *vif,
2840+ struct ieee80211_channel *chan,
2841+ int duration,
2842+ enum ieee80211_roc_type type)
2843+{
2844+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
2845+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
2846+ int err;
2847+
2848+ mt7921_mutex_acquire(phy->dev);
2849+ err = mt7921_set_roc(phy, mvif, chan, duration, MT7921_ROC_REQ_ROC);
2850+ mt7921_mutex_release(phy->dev);
2851+
2852+ return err;
2853+}
2854+
2855+static int mt7921_cancel_remain_on_channel(struct ieee80211_hw *hw,
2856+ struct ieee80211_vif *vif)
2857+{
2858+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
2859+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
2860+ int err;
2861+
2862+ mt7921_mutex_acquire(phy->dev);
2863+ err = mt7921_abort_roc(phy, mvif);
2864+ mt7921_mutex_release(phy->dev);
2865+
2866+ return err;
2867+}
2868+
2869 static int mt7921_set_channel(struct mt7921_phy *phy)
2870 {
2871 struct mt7921_dev *dev = phy->dev;
2872@@ -1503,7 +1613,13 @@ static int mt7921_set_sar_specs(struct ieee80211_hw *hw,
2873 int err;
2874
2875 mt7921_mutex_acquire(dev);
2876+ err = mt7921_mcu_set_clc(dev, dev->mt76.alpha2,
2877+ dev->country_ie_env);
2878+ if (err < 0)
2879+ goto out;
2880+
2881 err = mt7921_set_tx_sar_pwr(hw, sar);
2882+out:
2883 mt7921_mutex_release(dev);
2884
2885 return err;
2886@@ -1621,6 +1737,8 @@ const struct ieee80211_ops mt7921_ops = {
2887 #endif /* CONFIG_PM */
2888 .flush = mt7921_flush,
2889 .set_sar_specs = mt7921_set_sar_specs,
2890+ .remain_on_channel = mt7921_remain_on_channel,
2891+ .cancel_remain_on_channel = mt7921_cancel_remain_on_channel,
2892 };
2893 EXPORT_SYMBOL_GPL(mt7921_ops);
2894
2895diff --git a/mt7921/mcu.c b/mt7921/mcu.c
2896index 104da7e1..b7ed744f 100644
2897--- a/mt7921/mcu.c
2898+++ b/mt7921/mcu.c
2899@@ -154,6 +154,29 @@ void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
2900
2901 #endif /* CONFIG_PM */
2902
2903+static void
2904+mt7921_mcu_uni_roc_event(struct mt7921_dev *dev, struct sk_buff *skb)
2905+{
2906+ struct mt7921_roc_grant_tlv *grant;
2907+ struct mt76_connac2_mcu_rxd *rxd;
2908+ int duration;
2909+
2910+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
2911+ grant = (struct mt7921_roc_grant_tlv *)(rxd->tlv + 4);
2912+
2913+ /* should never happen */
2914+ WARN_ON_ONCE((le16_to_cpu(grant->tag) != UNI_EVENT_ROC_GRANT));
2915+
2916+ if (grant->reqtype == MT7921_ROC_REQ_ROC)
2917+ ieee80211_ready_on_channel(dev->mt76.phy.hw);
2918+
2919+ dev->phy.roc_grant = true;
2920+ wake_up(&dev->phy.roc_wait);
2921+ duration = le32_to_cpu(grant->max_interval);
2922+ mod_timer(&dev->phy.roc_timer,
2923+ round_jiffies_up(jiffies + msecs_to_jiffies(duration)));
2924+}
2925+
2926 static void
2927 mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb)
2928 {
2929@@ -295,6 +318,7 @@ mt7921_mcu_uni_rx_unsolicited_event(struct mt7921_dev *dev,
2930
2931 switch (rxd->eid) {
2932 case MCU_UNI_EVENT_ROC:
2933+ mt7921_mcu_uni_roc_event(dev, skb);
2934 break;
2935 default:
2936 break;
2937diff --git a/mt7921/mt7921.h b/mt7921/mt7921.h
2938index d9d78f6b..e915dfce 100644
2939--- a/mt7921/mt7921.h
2940+++ b/mt7921/mt7921.h
2941@@ -32,6 +32,9 @@
2942 #define MT7921_MCU_INIT_RETRY_COUNT 10
2943 #define MT7921_WFSYS_INIT_RETRY_COUNT 2
2944
2945+#define MT7921_FW_TAG_FEATURE 4
2946+#define MT7921_FW_CAP_CNM BIT(7)
2947+
2948 #define MT7921_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1.bin"
2949 #define MT7921_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1_2_hdr.bin"
2950
2951@@ -67,6 +70,41 @@ enum mt7921_roc_req {
2952 MT7921_ROC_REQ_NUM
2953 };
2954
2955+enum {
2956+ UNI_EVENT_ROC_GRANT = 0,
2957+ UNI_EVENT_ROC_TAG_NUM
2958+};
2959+
2960+struct mt7921_realease_info {
2961+ __le16 len;
2962+ u8 pad_len;
2963+ u8 tag;
2964+} __packed;
2965+
2966+struct mt7921_fw_features {
2967+ u8 segment;
2968+ u8 data;
2969+ u8 rsv[14];
2970+} __packed;
2971+
2972+struct mt7921_roc_grant_tlv {
2973+ __le16 tag;
2974+ __le16 len;
2975+ u8 bss_idx;
2976+ u8 tokenid;
2977+ u8 status;
2978+ u8 primarychannel;
2979+ u8 rfsco;
2980+ u8 rfband;
2981+ u8 channelwidth;
2982+ u8 centerfreqseg1;
2983+ u8 centerfreqseg2;
2984+ u8 reqtype;
2985+ u8 dbdcband;
2986+ u8 rsv[1];
2987+ __le32 max_interval;
2988+} __packed;
2989+
2990 enum mt7921_sdio_pkt_type {
2991 MT7921_SDIO_TXD,
2992 MT7921_SDIO_DATA,
2993@@ -214,6 +252,12 @@ struct mt7921_phy {
2994 #endif
2995
2996 struct mt7921_clc *clc[MT7921_CLC_MAX_NUM];
2997+
2998+ struct work_struct roc_work;
2999+ struct timer_list roc_timer;
3000+ wait_queue_head_t roc_wait;
3001+ u8 roc_token_id;
3002+ bool roc_grant;
3003 };
3004
3005 #define mt7921_init_reset(dev) ((dev)->hif_ops->init_reset(dev))
3006@@ -250,6 +294,7 @@ struct mt7921_dev {
3007 struct work_struct init_work;
3008
3009 u8 fw_debug;
3010+ u8 fw_features;
3011
3012 struct mt76_connac_pm pm;
3013 struct mt76_connac_coredump coredump;
3014@@ -258,6 +303,8 @@ struct mt7921_dev {
3015 struct work_struct ipv6_ns_work;
3016 /* IPv6 addresses for WoWLAN */
3017 struct sk_buff_head ipv6_ns_list;
3018+
3019+ enum environment_cap country_ie_env;
3020 };
3021
3022 enum {
3023@@ -422,7 +469,7 @@ void mt7921_tx_worker(struct mt76_worker *w);
3024 void mt7921_tx_token_put(struct mt7921_dev *dev);
3025 bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len);
3026 void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
3027- struct sk_buff *skb);
3028+ struct sk_buff *skb, u32 *info);
3029 void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
3030 void mt7921_stats_work(struct work_struct *work);
3031 void mt7921_set_stream_he_caps(struct mt7921_phy *phy);
3032@@ -439,6 +486,8 @@ int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev,
3033 struct ieee80211_ampdu_params *params,
3034 bool enable);
3035 void mt7921_scan_work(struct work_struct *work);
3036+void mt7921_roc_work(struct work_struct *work);
3037+void mt7921_roc_timer(struct timer_list *timer);
3038 int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif);
3039 int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev);
3040 int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev);
3041@@ -527,4 +576,5 @@ int mt7921_mcu_set_roc(struct mt7921_phy *phy, struct mt7921_vif *vif,
3042 enum mt7921_roc_req type, u8 token_id);
3043 int mt7921_mcu_abort_roc(struct mt7921_phy *phy, struct mt7921_vif *vif,
3044 u8 token_id);
3045+u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm);
3046 #endif
3047diff --git a/mt7921/pci.c b/mt7921/pci.c
3048index 4f34cb9e..fbb06f04 100644
3049--- a/mt7921/pci.c
3050+++ b/mt7921/pci.c
3051@@ -13,10 +13,14 @@
3052 #include "../trace.h"
3053
3054 static const struct pci_device_id mt7921_pci_device_table[] = {
3055- { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7961) },
3056- { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7922) },
3057- { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608) },
3058- { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616) },
3059+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7961),
3060+ .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
3061+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7922),
3062+ .driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
3063+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608),
3064+ .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
3065+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616),
3066+ .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
3067 { },
3068 };
3069
3070@@ -253,9 +257,11 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
3071 .fw_own = mt7921e_mcu_fw_pmctrl,
3072 };
3073
3074+ struct ieee80211_ops *ops;
3075 struct mt76_bus_ops *bus_ops;
3076 struct mt7921_dev *dev;
3077 struct mt76_dev *mdev;
3078+ u8 features;
3079 int ret;
3080
3081 ret = pcim_enable_device(pdev);
3082@@ -279,8 +285,21 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
3083 if (mt7921_disable_aspm)
3084 mt76_pci_disable_aspm(pdev);
3085
3086- mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7921_ops,
3087- &drv_ops);
3088+ features = mt7921_check_offload_capability(&pdev->dev, (const char *)
3089+ id->driver_data);
3090+ ops = devm_kmemdup(&pdev->dev, &mt7921_ops, sizeof(mt7921_ops),
3091+ GFP_KERNEL);
3092+ if (!ops) {
3093+ ret = -ENOMEM;
3094+ goto err_free_pci_vec;
3095+ }
3096+
3097+ if (!(features & MT7921_FW_CAP_CNM)) {
3098+ ops->remain_on_channel = NULL;
3099+ ops->cancel_remain_on_channel = NULL;
3100+ }
3101+
3102+ mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), ops, &drv_ops);
3103 if (!mdev) {
3104 ret = -ENOMEM;
3105 goto err_free_pci_vec;
3106@@ -289,8 +308,8 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
3107 pci_set_drvdata(pdev, mdev);
3108
3109 dev = container_of(mdev, struct mt7921_dev, mt76);
3110+ dev->fw_features = features;
3111 dev->hif_ops = &mt7921_pcie_ops;
3112-
3113 mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
3114 tasklet_init(&dev->irq_tasklet, mt7921_irq_tasklet, (unsigned long)dev);
3115
3116diff --git a/mt7921/sdio.c b/mt7921/sdio.c
3117index 031d99d4..f6b35087 100644
3118--- a/mt7921/sdio.c
3119+++ b/mt7921/sdio.c
3120@@ -17,7 +17,8 @@
3121 #include "mcu.h"
3122
3123 static const struct sdio_device_id mt7921s_table[] = {
3124- { SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7901) },
3125+ { SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7901),
3126+ .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
3127 { } /* Terminating entry */
3128 };
3129
3130@@ -122,18 +123,32 @@ static int mt7921s_probe(struct sdio_func *func,
3131 .fw_own = mt7921s_mcu_fw_pmctrl,
3132 };
3133
3134+ struct ieee80211_ops *ops;
3135 struct mt7921_dev *dev;
3136 struct mt76_dev *mdev;
3137+ u8 features;
3138 int ret;
3139
3140- mdev = mt76_alloc_device(&func->dev, sizeof(*dev), &mt7921_ops,
3141- &drv_ops);
3142+ features = mt7921_check_offload_capability(&func->dev, (const char *)
3143+ id->driver_data);
3144+
3145+ ops = devm_kmemdup(&func->dev, &mt7921_ops, sizeof(mt7921_ops),
3146+ GFP_KERNEL);
3147+ if (!ops)
3148+ return -ENOMEM;
3149+
3150+ if (!(features & MT7921_FW_CAP_CNM)) {
3151+ ops->remain_on_channel = NULL;
3152+ ops->cancel_remain_on_channel = NULL;
3153+ }
3154+
3155+ mdev = mt76_alloc_device(&func->dev, sizeof(*dev), ops, &drv_ops);
3156 if (!mdev)
3157 return -ENOMEM;
3158
3159 dev = container_of(mdev, struct mt7921_dev, mt76);
3160+ dev->fw_features = features;
3161 dev->hif_ops = &mt7921_sdio_ops;
3162-
3163 sdio_set_drvdata(func, dev);
3164
3165 ret = mt76s_init(mdev, func, &mt7921s_ops);
3166diff --git a/mt7921/usb.c b/mt7921/usb.c
3167index 89249f0b..8a49d3de 100644
3168--- a/mt7921/usb.c
3169+++ b/mt7921/usb.c
3170@@ -13,7 +13,8 @@
3171 #include "mac.h"
3172
3173 static const struct usb_device_id mt7921u_device_table[] = {
3174- { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7961, 0xff, 0xff, 0xff) },
3175+ { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7961, 0xff, 0xff, 0xff),
3176+ .driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
3177 { },
3178 };
3179
3180@@ -204,13 +205,21 @@ static int mt7921u_probe(struct usb_interface *usb_intf,
3181 struct ieee80211_hw *hw;
3182 struct mt7921_dev *dev;
3183 struct mt76_dev *mdev;
3184+ u8 features;
3185 int ret;
3186
3187+ features = mt7921_check_offload_capability(&usb_intf->dev, (const char *)
3188+ id->driver_info);
3189 ops = devm_kmemdup(&usb_intf->dev, &mt7921_ops, sizeof(mt7921_ops),
3190 GFP_KERNEL);
3191 if (!ops)
3192 return -ENOMEM;
3193
3194+ if (!(features & MT7921_FW_CAP_CNM)) {
3195+ ops->remain_on_channel = NULL;
3196+ ops->cancel_remain_on_channel = NULL;
3197+ }
3198+
3199 ops->stop = mt7921u_stop;
3200
3201 mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), ops, &drv_ops);
3202@@ -218,6 +227,7 @@ static int mt7921u_probe(struct usb_interface *usb_intf,
3203 return -ENOMEM;
3204
3205 dev = container_of(mdev, struct mt7921_dev, mt76);
3206+ dev->fw_features = features;
3207 dev->hif_ops = &hif_ops;
3208
3209 udev = usb_get_dev(udev);
3210diff --git a/sdio.c b/sdio.c
3211index 0ec308f9..228bc7d4 100644
3212--- a/sdio.c
3213+++ b/sdio.c
3214@@ -395,7 +395,7 @@ mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
3215 if (!e || !e->skb)
3216 break;
3217
3218- dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb);
3219+ dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb, NULL);
3220 e->skb = NULL;
3221 nframes++;
3222 }
3223diff --git a/tx.c b/tx.c
3224index 65e2b7c1..c8d78b0a 100644
3225--- a/tx.c
3226+++ b/tx.c
3227@@ -751,6 +751,23 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
3228 }
3229 EXPORT_SYMBOL_GPL(mt76_token_consume);
3230
3231+int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
3232+ struct mt76_txwi_cache *t, dma_addr_t phys)
3233+{
3234+ int token;
3235+
3236+ spin_lock_bh(&dev->rx_token_lock);
3237+ token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size,
3238+ GFP_ATOMIC);
3239+ spin_unlock_bh(&dev->rx_token_lock);
3240+
3241+ t->ptr = ptr;
3242+ t->dma_addr = phys;
3243+
3244+ return token;
3245+}
3246+EXPORT_SYMBOL_GPL(mt76_rx_token_consume);
3247+
3248 struct mt76_txwi_cache *
3249 mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
3250 {
3251@@ -779,3 +796,16 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
3252 return txwi;
3253 }
3254 EXPORT_SYMBOL_GPL(mt76_token_release);
3255+
3256+struct mt76_txwi_cache *
3257+mt76_rx_token_release(struct mt76_dev *dev, int token)
3258+{
3259+ struct mt76_txwi_cache *t;
3260+
3261+ spin_lock_bh(&dev->rx_token_lock);
3262+ t = idr_remove(&dev->rx_token, token);
3263+ spin_unlock_bh(&dev->rx_token_lock);
3264+
3265+ return t;
3266+}
3267+EXPORT_SYMBOL_GPL(mt76_rx_token_release);
3268diff --git a/usb.c b/usb.c
3269index 50d07d91..369c27ab 100644
3270--- a/usb.c
3271+++ b/usb.c
3272@@ -547,7 +547,7 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
3273 len -= data_len;
3274 nsgs++;
3275 }
3276- dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
3277+ dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL);
3278
3279 return nsgs;
3280 }
3281--
32822.36.1
3283