blob: 25dfc5d52f2f97d4548a7b92a9c198a7a8320d9f [file] [log] [blame]
developer7e2761e2023-10-12 08:11:13 +08001From 4709ca02ba0332508ac6885acbc779bdfac3f0be Mon Sep 17 00:00:00 2001
2From: mtk27745 <rex.lu@mediatek.com>
3Date: Fri, 6 Oct 2023 21:20:25 +0800
4Subject: [PATCH] wifi: mt76: wed: sync to wed upstream
5
6---
7 dma.c | 219 +++++++++++++++++++++++++++------------
8 dma.h | 12 +++
9 mac80211.c | 19 +++-
10 mmio.c | 97 +++++++++++++++++
11 mt76.h | 102 ++++++++++++++++--
12 mt7603/dma.c | 9 +-
13 mt7615/dma.c | 6 +-
14 mt76_connac.h | 3 +-
15 mt76_connac_mac.c | 5 +-
16 mt76x02_mmio.c | 5 +-
17 mt7915/dma.c | 18 ++--
18 mt7915/main.c | 16 +--
19 mt7915/mmio.c | 107 +------------------
20 mt7921/pci.c | 2 +-
21 mt7925/pci.c | 2 +-
22 mt7996/dma.c | 258 ++++++++++++++++++++++++++++++++++++++++++----
23 mt7996/init.c | 156 ++++++++++++++++++++++++++--
24 mt7996/mac.c | 72 +++++++++++--
25 mt7996/main.c | 42 ++++++++
26 mt7996/mcu.c | 13 ++-
27 mt7996/mmio.c | 208 +++++++++++++++++++++++++++++++++----
28 mt7996/mt7996.h | 67 +++++++++++-
29 mt7996/pci.c | 72 ++++++++++---
30 mt7996/regs.h | 65 +++++++++++-
31 24 files changed, 1276 insertions(+), 299 deletions(-)
32
33diff --git a/dma.c b/dma.c
34index 643e18e..dd20271 100644
35--- a/dma.c
36+++ b/dma.c
37@@ -9,11 +9,11 @@
38
39 #if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
40
41-#define Q_READ(_dev, _q, _field) ({ \
42+#define Q_READ(_q, _field) ({ \
43 u32 _offset = offsetof(struct mt76_queue_regs, _field); \
44 u32 _val; \
45 if ((_q)->flags & MT_QFLAG_WED) \
46- _val = mtk_wed_device_reg_read(&(_dev)->mmio.wed, \
47+ _val = mtk_wed_device_reg_read((_q)->wed, \
48 ((_q)->wed_regs + \
49 _offset)); \
50 else \
51@@ -21,10 +21,10 @@
52 _val; \
53 })
54
55-#define Q_WRITE(_dev, _q, _field, _val) do { \
56+#define Q_WRITE(_q, _field, _val) do { \
57 u32 _offset = offsetof(struct mt76_queue_regs, _field); \
58 if ((_q)->flags & MT_QFLAG_WED) \
59- mtk_wed_device_reg_write(&(_dev)->mmio.wed, \
60+ mtk_wed_device_reg_write((_q)->wed, \
61 ((_q)->wed_regs + _offset), \
62 _val); \
63 else \
64@@ -33,8 +33,8 @@
65
66 #else
67
68-#define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field)
69-#define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field)
70+#define Q_READ(_q, _field) readl(&(_q)->regs->_field)
71+#define Q_WRITE(_q, _field, _val) writel(_val, &(_q)->regs->_field)
72
73 #endif
74
75@@ -188,40 +188,63 @@ EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
76 static void
77 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
78 {
79- Q_WRITE(dev, q, desc_base, q->desc_dma);
80- Q_WRITE(dev, q, ring_size, q->ndesc);
81- q->head = Q_READ(dev, q, dma_idx);
82+ Q_WRITE(q, desc_base, q->desc_dma);
83+ if (q->flags & MT_QFLAG_WED_RRO_EN)
84+ Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc);
85+ else
86+ Q_WRITE(q, ring_size, q->ndesc);
87+ q->head = Q_READ(q, dma_idx);
88 q->tail = q->head;
89 }
90
91 static void
92-mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
93+__mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
94+ bool reset_idx)
95 {
96- int i;
97-
98 if (!q || !q->ndesc)
99 return;
100
101- /* clear descriptors */
102- for (i = 0; i < q->ndesc; i++)
103- q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
104+ if (!mt76_queue_is_wed_rro_ind(q)) {
105+ int i;
106
107- Q_WRITE(dev, q, cpu_idx, 0);
108- Q_WRITE(dev, q, dma_idx, 0);
109+ /* clear descriptors */
110+ for (i = 0; i < q->ndesc; i++)
111+ q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
112+ }
113+
114+ if (reset_idx) {
115+ Q_WRITE(q, cpu_idx, 0);
116+ Q_WRITE(q, dma_idx, 0);
117+ }
118 mt76_dma_sync_idx(dev, q);
119 }
120
121+static void
122+mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
123+{
124+ __mt76_dma_queue_reset(dev, q, true);
125+}
126+
127 static int
128 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
129 struct mt76_queue_buf *buf, void *data)
130 {
131- struct mt76_desc *desc = &q->desc[q->head];
132 struct mt76_queue_entry *entry = &q->entry[q->head];
133 struct mt76_txwi_cache *txwi = NULL;
134+ struct mt76_desc *desc;
135 u32 buf1 = 0, ctrl;
136 int idx = q->head;
137 int rx_token;
138
139+ if (mt76_queue_is_wed_rro_ind(q)) {
140+ struct mt76_wed_rro_desc *rro_desc;
141+
142+ rro_desc = (struct mt76_wed_rro_desc *)q->desc;
143+ data = &rro_desc[q->head];
144+ goto done;
145+ }
146+
147+ desc = &q->desc[q->head];
148 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
149
150 if (mt76_queue_is_wed_rx(q)) {
151@@ -244,6 +267,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
152 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
153 WRITE_ONCE(desc->info, 0);
154
155+done:
156 entry->dma_addr[0] = buf->addr;
157 entry->dma_len[0] = buf->len;
158 entry->txwi = txwi;
159@@ -343,7 +367,7 @@ static void
160 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
161 {
162 wmb();
163- Q_WRITE(dev, q, cpu_idx, q->head);
164+ Q_WRITE(q, cpu_idx, q->head);
165 }
166
167 static void
168@@ -359,7 +383,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
169 if (flush)
170 last = -1;
171 else
172- last = Q_READ(dev, q, dma_idx);
173+ last = Q_READ(q, dma_idx);
174
175 while (q->queued > 0 && q->tail != last) {
176 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
177@@ -371,7 +395,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
178 }
179
180 if (!flush && q->tail == last)
181- last = Q_READ(dev, q, dma_idx);
182+ last = Q_READ(q, dma_idx);
183 }
184 spin_unlock_bh(&q->cleanup_lock);
185
186@@ -392,10 +416,14 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
187 {
188 struct mt76_queue_entry *e = &q->entry[idx];
189 struct mt76_desc *desc = &q->desc[idx];
190- void *buf;
191+ void *buf = e->buf;
192+ u32 ctrl;
193
194+ if (mt76_queue_is_wed_rro_ind(q))
195+ goto done;
196+
197+ ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
198 if (len) {
199- u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
200 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
201 *more = !(ctrl & MT_DMA_CTL_LAST_SEC0);
202 }
203@@ -403,6 +431,12 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
204 if (info)
205 *info = le32_to_cpu(desc->info);
206
207+ if (drop) {
208+ *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | MT_DMA_CTL_DROP));
209+ if (ctrl & MT_DMA_CTL_VER_MASK)
210+ *drop = !!(ctrl & MT_DMA_CTL_PN_CHK_FAIL);
211+ }
212+
213 if (mt76_queue_is_wed_rx(q)) {
214 u32 buf1 = le32_to_cpu(desc->buf1);
215 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
216@@ -420,23 +454,16 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
217 t->ptr = NULL;
218
219 mt76_put_rxwi(dev, t);
220-
221- if (drop) {
222- u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
223-
224- *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A |
225- MT_DMA_CTL_DROP));
226-
227+ if (drop)
228 *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
229- }
230 } else {
231- buf = e->buf;
232- e->buf = NULL;
233 dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
234 SKB_WITH_OVERHEAD(q->buf_size),
235 page_pool_get_dma_dir(q->page_pool));
236 }
237
238+done:
239+ e->buf = NULL;
240 return buf;
241 }
242
243@@ -450,11 +477,16 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
244 if (!q->queued)
245 return NULL;
246
247- if (flush)
248- q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
249- else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
250+ if (mt76_queue_is_wed_rro_data(q))
251 return NULL;
252
253+ if (!mt76_queue_is_wed_rro_ind(q)) {
254+ if (flush)
255+ q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
256+ else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
257+ return NULL;
258+ }
259+
260 q->tail = (q->tail + 1) % q->ndesc;
261 q->queued--;
262
263@@ -606,11 +638,14 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
264 spin_lock_bh(&q->lock);
265
266 while (q->queued < q->ndesc - 1) {
267+ struct mt76_queue_buf qbuf = {};
268 enum dma_data_direction dir;
269- struct mt76_queue_buf qbuf;
270 dma_addr_t addr;
271 int offset;
272- void *buf;
273+ void *buf = NULL;
274+
275+ if (mt76_queue_is_wed_rro_ind(q))
276+ goto done;
277
278 buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
279 if (!buf)
280@@ -621,6 +656,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
281 dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
282
283 qbuf.addr = addr + q->buf_offset;
284+done:
285 qbuf.len = len - q->buf_offset;
286 qbuf.skip_unmap = false;
287 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
288@@ -630,7 +666,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
289 frames++;
290 }
291
292- if (frames)
293+ if (frames || mt76_queue_is_wed_rx(q))
294 mt76_dma_kick_queue(dev, q);
295
296 spin_unlock_bh(&q->lock);
297@@ -641,15 +677,14 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
298 int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
299 {
300 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
301- struct mtk_wed_device *wed = &dev->mmio.wed;
302- int ret, type, ring;
303- u8 flags;
304+ int ret = 0, type, ring;
305+ u16 flags;
306
307 if (!q || !q->ndesc)
308 return -EINVAL;
309
310 flags = q->flags;
311- if (!mtk_wed_device_active(wed))
312+ if (!q->wed || !mtk_wed_device_active(q->wed))
313 q->flags &= ~MT_QFLAG_WED;
314
315 if (!(q->flags & MT_QFLAG_WED))
316@@ -660,29 +695,52 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
317
318 switch (type) {
319 case MT76_WED_Q_TX:
320- ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, reset);
321+ ret = mtk_wed_device_tx_ring_setup(q->wed, ring, q->regs,
322+ reset);
323 if (!ret)
324- q->wed_regs = wed->tx_ring[ring].reg_base;
325+ q->wed_regs = q->wed->tx_ring[ring].reg_base;
326 break;
327 case MT76_WED_Q_TXFREE:
328 /* WED txfree queue needs ring to be initialized before setup */
329 q->flags = 0;
330 mt76_dma_queue_reset(dev, q);
331 mt76_dma_rx_fill(dev, q, false);
332- q->flags = flags;
333
334- ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
335+ ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
336 if (!ret)
337- q->wed_regs = wed->txfree_ring.reg_base;
338+ q->wed_regs = q->wed->txfree_ring.reg_base;
339 break;
340 case MT76_WED_Q_RX:
341- ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset);
342+ ret = mtk_wed_device_rx_ring_setup(q->wed, ring, q->regs,
343+ reset);
344 if (!ret)
345- q->wed_regs = wed->rx_ring[ring].reg_base;
346+ q->wed_regs = q->wed->rx_ring[ring].reg_base;
347+ break;
348+ case MT76_WED_RRO_Q_DATA:
349+ q->flags &= ~MT_QFLAG_WED;
350+ __mt76_dma_queue_reset(dev, q, false);
351+ mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs);
352+ q->head = q->ndesc - 1;
353+ q->queued = q->head;
354+ break;
355+ case MT76_WED_RRO_Q_MSDU_PG:
356+ q->flags &= ~MT_QFLAG_WED;
357+ __mt76_dma_queue_reset(dev, q, false);
358+ mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs);
359+ q->head = q->ndesc - 1;
360+ q->queued = q->head;
361+ break;
362+ case MT76_WED_RRO_Q_IND:
363+ q->flags &= ~MT_QFLAG_WED;
364+ mt76_dma_queue_reset(dev, q);
365+ mt76_dma_rx_fill(dev, q, false);
366+ mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs);
367 break;
368 default:
369 ret = -EINVAL;
370+ break;
371 }
372+ q->flags = flags;
373
374 return ret;
375 #else
376@@ -706,11 +764,26 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
377 q->buf_size = bufsize;
378 q->hw_idx = idx;
379
380- size = q->ndesc * sizeof(struct mt76_desc);
381- q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
382+ size = mt76_queue_is_wed_rro_ind(q) ? sizeof(struct mt76_wed_rro_desc)
383+ : sizeof(struct mt76_desc);
384+ q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size,
385+ &q->desc_dma, GFP_KERNEL);
386 if (!q->desc)
387 return -ENOMEM;
388
389+ if (mt76_queue_is_wed_rro_ind(q)) {
390+ struct mt76_wed_rro_desc *rro_desc;
391+ int i;
392+
393+ rro_desc = (struct mt76_wed_rro_desc *)q->desc;
394+ for (i = 0; i < q->ndesc; i++) {
395+ struct mt76_wed_rro_ind *cmd;
396+
397+ cmd = (struct mt76_wed_rro_ind *)&rro_desc[i];
398+ cmd->magic_cnt = MT_DMA_WED_IND_CMD_CNT - 1;
399+ }
400+ }
401+
402 size = q->ndesc * sizeof(*q->entry);
403 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
404 if (!q->entry)
405@@ -724,8 +797,13 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
406 if (ret)
407 return ret;
408
409- if (q->flags != MT_WED_Q_TXFREE)
410- mt76_dma_queue_reset(dev, q);
411+ if (mtk_wed_device_active(&dev->mmio.wed)) {
412+ if ((mtk_wed_get_rx_capa(&dev->mmio.wed) && mt76_queue_is_wed_rro(q)) ||
413+ mt76_queue_is_wed_tx_free(q))
414+ return 0;
415+ }
416+
417+ mt76_dma_queue_reset(dev, q);
418
419 return 0;
420 }
421@@ -746,7 +824,8 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
422 if (!buf)
423 break;
424
425- mt76_put_page_pool_buf(buf, false);
426+ if (!mt76_queue_is_wed_rro(q))
427+ mt76_put_page_pool_buf(buf, false);
428 } while (1);
429
430 if (q->rx_head) {
431@@ -761,19 +840,22 @@ static void
432 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
433 {
434 struct mt76_queue *q = &dev->q_rx[qid];
435- int i;
436
437 if (!q->ndesc)
438 return;
439
440- for (i = 0; i < q->ndesc; i++)
441- q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
442+ if (!mt76_queue_is_wed_rro_ind(q)) {
443+ int i;
444+
445+ for (i = 0; i < q->ndesc; i++)
446+ q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
447+ }
448
449 mt76_dma_rx_cleanup(dev, q);
450
451 /* reset WED rx queues */
452 mt76_dma_wed_setup(dev, q, true);
453- if (q->flags != MT_WED_Q_TXFREE) {
454+ if (!mt76_queue_is_wed_tx_free(q)) {
455 mt76_dma_sync_idx(dev, q);
456 mt76_dma_rx_fill(dev, q, false);
457 }
458@@ -816,8 +898,8 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
459 bool more;
460
461 if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
462- q->flags == MT_WED_Q_TXFREE) {
463- dma_idx = Q_READ(dev, q, dma_idx);
464+ mt76_queue_is_wed_tx_free(q)) {
465+ dma_idx = Q_READ(q, dma_idx);
466 check_ddone = true;
467 }
468
469@@ -827,7 +909,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
470
471 if (check_ddone) {
472 if (q->tail == dma_idx)
473- dma_idx = Q_READ(dev, q, dma_idx);
474+ dma_idx = Q_READ(q, dma_idx);
475
476 if (q->tail == dma_idx)
477 break;
478@@ -979,16 +1061,23 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
479 mt76_for_each_q_rx(dev, i) {
480 struct mt76_queue *q = &dev->q_rx[i];
481
482+ if (mtk_wed_device_active(&dev->mmio.wed) &&
483+ mt76_queue_is_wed_rro(q))
484+ continue;
485+
486 netif_napi_del(&dev->napi[i]);
487 mt76_dma_rx_cleanup(dev, q);
488
489 page_pool_destroy(q->page_pool);
490 }
491
492- mt76_free_pending_txwi(dev);
493- mt76_free_pending_rxwi(dev);
494-
495 if (mtk_wed_device_active(&dev->mmio.wed))
496 mtk_wed_device_detach(&dev->mmio.wed);
497+
498+ if (mtk_wed_device_active(&dev->mmio.wed_hif2))
499+ mtk_wed_device_detach(&dev->mmio.wed_hif2);
500+
501+ mt76_free_pending_txwi(dev);
502+ mt76_free_pending_rxwi(dev);
503 }
504 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
505diff --git a/dma.h b/dma.h
506index 1b090d7..22b79d5 100644
507--- a/dma.h
508+++ b/dma.h
509@@ -25,6 +25,13 @@
510 #define MT_DMA_PPE_ENTRY GENMASK(30, 16)
511 #define MT_DMA_INFO_PPE_VLD BIT(31)
512
513+#define MT_DMA_CTL_PN_CHK_FAIL BIT(13)
514+#define MT_DMA_CTL_VER_MASK BIT(7)
515+
516+#define MT_DMA_RRO_EN BIT(13)
517+
518+#define MT_DMA_WED_IND_CMD_CNT 8
519+
520 #define MT_DMA_HDR_LEN 4
521 #define MT_RX_INFO_LEN 4
522 #define MT_FCE_INFO_LEN 4
523@@ -37,6 +44,11 @@ struct mt76_desc {
524 __le32 info;
525 } __packed __aligned(4);
526
527+struct mt76_wed_rro_desc {
528+ __le32 buf0;
529+ __le32 buf1;
530+} __packed __aligned(4);
531+
532 enum mt76_qsel {
533 MT_QSEL_MGMT,
534 MT_QSEL_HCCA,
535diff --git a/mac80211.c b/mac80211.c
536index 12fcb2b..cd102dd 100644
537--- a/mac80211.c
538+++ b/mac80211.c
539@@ -1726,7 +1726,7 @@ EXPORT_SYMBOL_GPL(mt76_get_antenna);
540
541 struct mt76_queue *
542 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
543- int ring_base, u32 flags)
544+ int ring_base, void *wed, u32 flags)
545 {
546 struct mt76_queue *hwq;
547 int err;
548@@ -1736,6 +1736,7 @@ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
549 return ERR_PTR(-ENOMEM);
550
551 hwq->flags = flags;
552+ hwq->wed = wed;
553
554 err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
555 if (err < 0)
556@@ -1843,3 +1844,19 @@ enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
557 return MT_DFS_STATE_ACTIVE;
558 }
559 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
560+
561+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
562+int mt76_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
563+ struct net_device *netdev, enum tc_setup_type type,
564+ void *type_data)
565+{
566+ struct mt76_phy *phy = hw->priv;
567+ struct mtk_wed_device *wed = &phy->dev->mmio.wed;
568+
569+ if (!mtk_wed_device_active(wed))
570+ return -EOPNOTSUPP;
571+
572+ return mtk_wed_device_setup_tc(wed, netdev, type, type_data);
573+}
574+EXPORT_SYMBOL_GPL(mt76_net_setup_tc);
575+#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
576diff --git a/mmio.c b/mmio.c
577index 86e3d2a..c346249 100644
578--- a/mmio.c
579+++ b/mmio.c
580@@ -4,6 +4,7 @@
581 */
582
583 #include "mt76.h"
584+#include "dma.h"
585 #include "trace.h"
586
587 static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
588@@ -84,6 +85,102 @@ void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
589 }
590 EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
591
592+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
593+void mt76_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
594+{
595+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
596+ int i;
597+
598+ for (i = 0; i < dev->rx_token_size; i++) {
599+ struct mt76_txwi_cache *t;
600+
601+ t = mt76_rx_token_release(dev, i);
602+ if (!t || !t->ptr)
603+ continue;
604+
605+ mt76_put_page_pool_buf(t->ptr, false);
606+ t->ptr = NULL;
607+
608+ mt76_put_rxwi(dev, t);
609+ }
610+
611+ mt76_free_pending_rxwi(dev);
612+}
613+EXPORT_SYMBOL_GPL(mt76_mmio_wed_release_rx_buf);
614+
615+u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
616+{
617+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
618+ struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
619+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
620+ int i, len = SKB_WITH_OVERHEAD(q->buf_size);
621+ struct mt76_txwi_cache *t = NULL;
622+
623+ for (i = 0; i < size; i++) {
624+ enum dma_data_direction dir;
625+ dma_addr_t addr;
626+ u32 offset;
627+ int token;
628+ void *buf;
629+
630+ t = mt76_get_rxwi(dev);
631+ if (!t)
632+ goto unmap;
633+
634+ buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
635+ if (!buf)
636+ goto unmap;
637+
638+ addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
639+ dir = page_pool_get_dma_dir(q->page_pool);
640+ dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
641+
642+ desc->buf0 = cpu_to_le32(addr);
643+ token = mt76_rx_token_consume(dev, buf, t, addr);
644+ if (token < 0) {
645+ mt76_put_page_pool_buf(buf, false);
646+ goto unmap;
647+ }
648+
649+ desc->token |= cpu_to_le32(FIELD_PREP(MT_DMA_CTL_TOKEN,
650+ token));
651+ desc++;
652+ }
653+
654+ return 0;
655+
656+unmap:
657+ if (t)
658+ mt76_put_rxwi(dev, t);
659+ mt76_mmio_wed_release_rx_buf(wed);
660+
661+ return -ENOMEM;
662+}
663+EXPORT_SYMBOL_GPL(mt76_mmio_wed_init_rx_buf);
664+
665+int mt76_mmio_wed_offload_enable(struct mtk_wed_device *wed)
666+{
667+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
668+
669+ spin_lock_bh(&dev->token_lock);
670+ dev->token_size = wed->wlan.token_start;
671+ spin_unlock_bh(&dev->token_lock);
672+
673+ return !wait_event_timeout(dev->tx_wait, !dev->wed_token_count, HZ);
674+}
675+EXPORT_SYMBOL_GPL(mt76_mmio_wed_offload_enable);
676+
677+void mt76_mmio_wed_offload_disable(struct mtk_wed_device *wed)
678+{
679+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
680+
681+ spin_lock_bh(&dev->token_lock);
682+ dev->token_size = dev->drv->token_size;
683+ spin_unlock_bh(&dev->token_lock);
684+}
685+EXPORT_SYMBOL_GPL(mt76_mmio_wed_offload_disable);
686+#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
687+
688 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
689 {
690 static const struct mt76_bus_ops mt76_mmio_ops = {
691diff --git a/mt76.h b/mt76.h
692index a238216..7f93210 100644
693--- a/mt76.h
694+++ b/mt76.h
695@@ -28,15 +28,22 @@
696 #define MT76_TOKEN_FREE_THR 64
697
698 #define MT_QFLAG_WED_RING GENMASK(1, 0)
699-#define MT_QFLAG_WED_TYPE GENMASK(3, 2)
700-#define MT_QFLAG_WED BIT(4)
701+#define MT_QFLAG_WED_TYPE GENMASK(4, 2)
702+#define MT_QFLAG_WED BIT(5)
703+#define MT_QFLAG_WED_RRO BIT(6)
704+#define MT_QFLAG_WED_RRO_EN BIT(7)
705
706 #define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \
707 FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
708 FIELD_PREP(MT_QFLAG_WED_RING, _n))
709+#define __MT_WED_RRO_Q(_type, _n) (MT_QFLAG_WED_RRO | __MT_WED_Q(_type, _n))
710+
711 #define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n)
712 #define MT_WED_Q_RX(_n) __MT_WED_Q(MT76_WED_Q_RX, _n)
713 #define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0)
714+#define MT_WED_RRO_Q_DATA(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_DATA, _n)
715+#define MT_WED_RRO_Q_MSDU_PG(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_MSDU_PG, _n)
716+#define MT_WED_RRO_Q_IND __MT_WED_RRO_Q(MT76_WED_RRO_Q_IND, 0)
717
718 struct mt76_dev;
719 struct mt76_phy;
720@@ -58,6 +65,9 @@ enum mt76_wed_type {
721 MT76_WED_Q_TX,
722 MT76_WED_Q_TXFREE,
723 MT76_WED_Q_RX,
724+ MT76_WED_RRO_Q_DATA,
725+ MT76_WED_RRO_Q_MSDU_PG,
726+ MT76_WED_RRO_Q_IND,
727 };
728
729 struct mt76_bus_ops {
730@@ -106,6 +116,16 @@ enum mt76_rxq_id {
731 MT_RXQ_MAIN_WA,
732 MT_RXQ_BAND2,
733 MT_RXQ_BAND2_WA,
734+ MT_RXQ_RRO_BAND0,
735+ MT_RXQ_RRO_BAND1,
736+ MT_RXQ_RRO_BAND2,
737+ MT_RXQ_MSDU_PAGE_BAND0,
738+ MT_RXQ_MSDU_PAGE_BAND1,
739+ MT_RXQ_MSDU_PAGE_BAND2,
740+ MT_RXQ_TXFREE_BAND0,
741+ MT_RXQ_TXFREE_BAND1,
742+ MT_RXQ_TXFREE_BAND2,
743+ MT_RXQ_RRO_IND,
744 __MT_RXQ_MAX
745 };
746
747@@ -183,6 +203,7 @@ struct mt76_queue {
748 spinlock_t lock;
749 spinlock_t cleanup_lock;
750 struct mt76_queue_entry *entry;
751+ struct mt76_rro_desc *rro_desc;
752 struct mt76_desc *desc;
753
754 u16 first;
755@@ -196,8 +217,9 @@ struct mt76_queue {
756
757 u8 buf_offset;
758 u8 hw_idx;
759- u8 flags;
760+ u16 flags;
761
762+ struct mtk_wed_device *wed;
763 u32 wed_regs;
764
765 dma_addr_t desc_dma;
766@@ -352,6 +374,17 @@ struct mt76_txq {
767 bool aggr;
768 };
769
770+struct mt76_wed_rro_ind {
771+ u32 se_id : 12;
772+ u32 rsv : 4;
773+ u32 start_sn : 12;
774+ u32 ind_reason : 4;
775+ u32 ind_cnt : 13;
776+ u32 win_sz : 3;
777+ u32 rsv2 : 13;
778+ u32 magic_cnt : 3;
779+};
780+
781 struct mt76_txwi_cache {
782 struct list_head list;
783 dma_addr_t dma_addr;
784@@ -602,6 +635,7 @@ struct mt76_mmio {
785 u32 irqmask;
786
787 struct mtk_wed_device wed;
788+ struct mtk_wed_device wed_hif2;
789 struct completion wed_reset;
790 struct completion wed_reset_complete;
791 };
792@@ -1046,6 +1080,12 @@ bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
793 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
794 void mt76_pci_disable_aspm(struct pci_dev *pdev);
795
796+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
797+int mt76_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
798+ struct net_device *netdev, enum tc_setup_type type,
799+ void *type_data);
800+#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
801+
802 static inline u16 mt76_chip(struct mt76_dev *dev)
803 {
804 return dev->rev >> 16;
805@@ -1056,6 +1096,13 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
806 return dev->rev & 0xffff;
807 }
808
809+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
810+u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size);
811+void mt76_mmio_wed_release_rx_buf(struct mtk_wed_device *wed);
812+int mt76_mmio_wed_offload_enable(struct mtk_wed_device *wed);
813+void mt76_mmio_wed_offload_disable(struct mtk_wed_device *wed);
814+#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
815+
816 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
817 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
818
819@@ -1105,15 +1152,16 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len);
820
821 struct mt76_queue *
822 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
823- int ring_base, u32 flags);
824+ int ring_base, void *wed, u32 flags);
825 u16 mt76_calculate_default_rate(struct mt76_phy *phy,
826 struct ieee80211_vif *vif, int rateidx);
827 static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
828- int n_desc, int ring_base, u32 flags)
829+ int n_desc, int ring_base, void *wed,
830+ u32 flags)
831 {
832 struct mt76_queue *q;
833
834- q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, flags);
835+ q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, wed, flags);
836 if (IS_ERR(q))
837 return PTR_ERR(q);
838
839@@ -1127,7 +1175,7 @@ static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx,
840 {
841 struct mt76_queue *q;
842
843- q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, 0);
844+ q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, NULL, 0);
845 if (IS_ERR(q))
846 return PTR_ERR(q);
847
848@@ -1541,10 +1589,38 @@ s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
849 struct mt76_power_limits *dest,
850 s8 target_power);
851
852-static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q)
853+static inline bool mt76_queue_is_wed_tx_free(struct mt76_queue *q)
854 {
855 return (q->flags & MT_QFLAG_WED) &&
856- FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX;
857+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TXFREE;
858+}
859+
860+static inline bool mt76_queue_is_wed_rro(struct mt76_queue *q)
861+{
862+ return q->flags & MT_QFLAG_WED_RRO;
863+}
864+
865+static inline bool mt76_queue_is_wed_rro_ind(struct mt76_queue *q)
866+{
867+ return mt76_queue_is_wed_rro(q) &&
868+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_IND;
869+}
870+
871+static inline bool mt76_queue_is_wed_rro_data(struct mt76_queue *q)
872+{
873+ return mt76_queue_is_wed_rro(q) &&
874+ (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_DATA ||
875+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_MSDU_PG);
876+}
877+
878+static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q)
879+{
880+ if (!(q->flags & MT_QFLAG_WED))
881+ return false;
882+
883+ return FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX ||
884+ mt76_queue_is_wed_rro_ind(q) || mt76_queue_is_wed_rro_data(q);
885+
886 }
887
888 struct mt76_txwi_cache *
889@@ -1584,10 +1660,14 @@ static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
890 static inline int
891 mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
892 {
893- int token;
894+ int token, start = 0;
895+
896+ if (mtk_wed_device_active(&dev->mmio.wed))
897+ start = dev->mmio.wed.wlan.nbuf;
898
899 spin_lock_bh(&dev->token_lock);
900- token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC);
901+ token = idr_alloc(&dev->token, *ptxwi, start, start + dev->token_size,
902+ GFP_ATOMIC);
903 spin_unlock_bh(&dev->token_lock);
904
905 return token;
906diff --git a/mt7603/dma.c b/mt7603/dma.c
907index 03ba11a..7a2f5d3 100644
908--- a/mt7603/dma.c
909+++ b/mt7603/dma.c
910@@ -173,13 +173,14 @@ int mt7603_dma_init(struct mt7603_dev *dev)
911
912 for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
913 ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
914- MT7603_TX_RING_SIZE, MT_TX_RING_BASE, 0);
915+ MT7603_TX_RING_SIZE, MT_TX_RING_BASE,
916+ NULL, 0);
917 if (ret)
918 return ret;
919 }
920
921 ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
922- MT7603_PSD_RING_SIZE, MT_TX_RING_BASE, 0);
923+ MT7603_PSD_RING_SIZE, MT_TX_RING_BASE, NULL, 0);
924 if (ret)
925 return ret;
926
927@@ -189,12 +190,12 @@ int mt7603_dma_init(struct mt7603_dev *dev)
928 return ret;
929
930 ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_BEACON, MT_TX_HW_QUEUE_BCN,
931- MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0);
932+ MT_MCU_RING_SIZE, MT_TX_RING_BASE, NULL, 0);
933 if (ret)
934 return ret;
935
936 ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_CAB, MT_TX_HW_QUEUE_BMC,
937- MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0);
938+ MT_MCU_RING_SIZE, MT_TX_RING_BASE, NULL, 0);
939 if (ret)
940 return ret;
941
942diff --git a/mt7615/dma.c b/mt7615/dma.c
943index 0ce01cc..e7135b2 100644
944--- a/mt7615/dma.c
945+++ b/mt7615/dma.c
946@@ -26,14 +26,14 @@ mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
947 for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
948 ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
949 MT7615_TX_RING_SIZE / 2,
950- MT_TX_RING_BASE, 0);
951+ MT_TX_RING_BASE, NULL, 0);
952 if (ret)
953 return ret;
954 }
955
956 ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT7622_TXQ_MGMT,
957 MT7615_TX_MGMT_RING_SIZE,
958- MT_TX_RING_BASE, 0);
959+ MT_TX_RING_BASE, NULL, 0);
960 if (ret)
961 return ret;
962
963@@ -55,7 +55,7 @@ mt7615_init_tx_queues(struct mt7615_dev *dev)
964 return mt7622_init_tx_queues_multi(dev);
965
966 ret = mt76_connac_init_tx_queues(&dev->mphy, 0, MT7615_TX_RING_SIZE,
967- MT_TX_RING_BASE, 0);
968+ MT_TX_RING_BASE, NULL, 0);
969 if (ret)
970 return ret;
971
972diff --git a/mt76_connac.h b/mt76_connac.h
973index 1f29d8c..e5ebde1 100644
974--- a/mt76_connac.h
975+++ b/mt76_connac.h
976@@ -391,7 +391,8 @@ mt76_connac_mutex_release(struct mt76_dev *dev, struct mt76_connac_pm *pm)
977
978 void mt76_connac_gen_ppe_thresh(u8 *he_ppet, int nss);
979 int mt76_connac_init_tx_queues(struct mt76_phy *phy, int idx, int n_desc,
980- int ring_base, u32 flags);
981+ int ring_base, void *wed, u32 flags);
982+
983 void mt76_connac_write_hw_txp(struct mt76_dev *dev,
984 struct mt76_tx_info *tx_info,
985 void *txp_ptr, u32 id);
986diff --git a/mt76_connac_mac.c b/mt76_connac_mac.c
987index 93402d2..c791464 100644
988--- a/mt76_connac_mac.c
989+++ b/mt76_connac_mac.c
990@@ -256,11 +256,12 @@ void mt76_connac_txp_skb_unmap(struct mt76_dev *dev,
991 EXPORT_SYMBOL_GPL(mt76_connac_txp_skb_unmap);
992
993 int mt76_connac_init_tx_queues(struct mt76_phy *phy, int idx, int n_desc,
994- int ring_base, u32 flags)
995+ int ring_base, void *wed, u32 flags)
996 {
997 int i, err;
998
999- err = mt76_init_tx_queue(phy, 0, idx, n_desc, ring_base, flags);
1000+ err = mt76_init_tx_queue(phy, 0, idx, n_desc, ring_base,
1001+ wed, flags);
1002 if (err < 0)
1003 return err;
1004
1005diff --git a/mt76x02_mmio.c b/mt76x02_mmio.c
1006index 9b5e3fb..e5ad635 100644
1007--- a/mt76x02_mmio.c
1008+++ b/mt76x02_mmio.c
1009@@ -199,13 +199,14 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
1010 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
1011 ret = mt76_init_tx_queue(&dev->mphy, i, mt76_ac_to_hwq(i),
1012 MT76x02_TX_RING_SIZE,
1013- MT_TX_RING_BASE, 0);
1014+ MT_TX_RING_BASE, NULL, 0);
1015 if (ret)
1016 return ret;
1017 }
1018
1019 ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
1020- MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE, 0);
1021+ MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE,
1022+ NULL, 0);
1023 if (ret)
1024 return ret;
1025
1026diff --git a/mt7915/dma.c b/mt7915/dma.c
1027index 59a44d7..1bceeb5 100644
1028--- a/mt7915/dma.c
1029+++ b/mt7915/dma.c
1030@@ -9,18 +9,20 @@ static int
1031 mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base)
1032 {
1033 struct mt7915_dev *dev = phy->dev;
1034+ struct mtk_wed_device *wed = NULL;
1035
1036- if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
1037+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
1038 if (is_mt798x(&dev->mt76))
1039 ring_base += MT_TXQ_ID(0) * MT_RING_SIZE;
1040 else
1041 ring_base = MT_WED_TX_RING_BASE;
1042
1043 idx -= MT_TXQ_ID(0);
1044+ wed = &dev->mt76.mmio.wed;
1045 }
1046
1047 return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc, ring_base,
1048- MT_WED_Q_TX(idx));
1049+ wed, MT_WED_Q_TX(idx));
1050 }
1051
1052 static int mt7915_poll_tx(struct napi_struct *napi, int budget)
1053@@ -492,7 +494,8 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
1054 if (mtk_wed_device_active(&mdev->mmio.wed) && is_mt7915(mdev)) {
1055 wa_rx_base = MT_WED_RX_RING_BASE;
1056 wa_rx_idx = MT7915_RXQ_MCU_WA;
1057- dev->mt76.q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE;
1058+ mdev->q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE;
1059+ mdev->q_rx[MT_RXQ_MCU_WA].wed = &mdev->mmio.wed;
1060 } else {
1061 wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MCU_WA);
1062 wa_rx_idx = MT_RXQ_ID(MT_RXQ_MCU_WA);
1063@@ -507,9 +510,10 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
1064 if (!dev->phy.mt76->band_idx) {
1065 if (mtk_wed_device_active(&mdev->mmio.wed) &&
1066 mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
1067- dev->mt76.q_rx[MT_RXQ_MAIN].flags =
1068+ mdev->q_rx[MT_RXQ_MAIN].flags =
1069 MT_WED_Q_RX(MT7915_RXQ_BAND0);
1070 dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
1071+ mdev->q_rx[MT_RXQ_MAIN].wed = &mdev->mmio.wed;
1072 }
1073
1074 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
1075@@ -528,6 +532,7 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
1076
1077 if (mtk_wed_device_active(&mdev->mmio.wed)) {
1078 mdev->q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
1079+ mdev->q_rx[MT_RXQ_MAIN_WA].wed = &mdev->mmio.wed;
1080 if (is_mt7916(mdev)) {
1081 wa_rx_base = MT_WED_RX_RING_BASE;
1082 wa_rx_idx = MT7915_RXQ_MCU_WA;
1083@@ -544,9 +549,10 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
1084 if (dev->dbdc_support || dev->phy.mt76->band_idx) {
1085 if (mtk_wed_device_active(&mdev->mmio.wed) &&
1086 mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
1087- dev->mt76.q_rx[MT_RXQ_BAND1].flags =
1088+ mdev->q_rx[MT_RXQ_BAND1].flags =
1089 MT_WED_Q_RX(MT7915_RXQ_BAND1);
1090 dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
1091+ mdev->q_rx[MT_RXQ_BAND1].wed = &mdev->mmio.wed;
1092 }
1093
1094 /* rx data queue for band1 */
1095@@ -643,7 +649,7 @@ int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
1096 mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
1097
1098 mt76_for_each_q_rx(&dev->mt76, i) {
1099- if (dev->mt76.q_rx[i].flags == MT_WED_Q_TXFREE)
1100+ if (mt76_queue_is_wed_tx_free(&dev->mt76.q_rx[i]))
1101 continue;
1102
1103 mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
1104diff --git a/mt7915/main.c b/mt7915/main.c
1105index a3fd54c..ba34c8e 100644
1106--- a/mt7915/main.c
1107+++ b/mt7915/main.c
1108@@ -1653,20 +1653,6 @@ mt7915_net_fill_forward_path(struct ieee80211_hw *hw,
1109
1110 return 0;
1111 }
1112-
1113-static int
1114-mt7915_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1115- struct net_device *netdev, enum tc_setup_type type,
1116- void *type_data)
1117-{
1118- struct mt7915_dev *dev = mt7915_hw_dev(hw);
1119- struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
1120-
1121- if (!mtk_wed_device_active(wed))
1122- return -EOPNOTSUPP;
1123-
1124- return mtk_wed_device_setup_tc(wed, netdev, type, type_data);
1125-}
1126 #endif
1127
1128 const struct ieee80211_ops mt7915_ops = {
1129@@ -1721,6 +1707,6 @@ const struct ieee80211_ops mt7915_ops = {
1130 .set_radar_background = mt7915_set_radar_background,
1131 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
1132 .net_fill_forward_path = mt7915_net_fill_forward_path,
1133- .net_setup_tc = mt7915_net_setup_tc,
1134+ .net_setup_tc = mt76_net_setup_tc,
1135 #endif
1136 };
1137diff --git a/mt7915/mmio.c b/mt7915/mmio.c
1138index fc7ace6..85cb3fe 100644
1139--- a/mt7915/mmio.c
1140+++ b/mt7915/mmio.c
1141@@ -542,105 +542,6 @@ static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
1142 }
1143
1144 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
1145-static int mt7915_mmio_wed_offload_enable(struct mtk_wed_device *wed)
1146-{
1147- struct mt7915_dev *dev;
1148-
1149- dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
1150-
1151- spin_lock_bh(&dev->mt76.token_lock);
1152- dev->mt76.token_size = wed->wlan.token_start;
1153- spin_unlock_bh(&dev->mt76.token_lock);
1154-
1155- return !wait_event_timeout(dev->mt76.tx_wait,
1156- !dev->mt76.wed_token_count, HZ);
1157-}
1158-
1159-static void mt7915_mmio_wed_offload_disable(struct mtk_wed_device *wed)
1160-{
1161- struct mt7915_dev *dev;
1162-
1163- dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
1164-
1165- spin_lock_bh(&dev->mt76.token_lock);
1166- dev->mt76.token_size = MT7915_TOKEN_SIZE;
1167- spin_unlock_bh(&dev->mt76.token_lock);
1168-}
1169-
1170-static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
1171-{
1172- struct mt7915_dev *dev;
1173- int i;
1174-
1175- dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
1176- for (i = 0; i < dev->mt76.rx_token_size; i++) {
1177- struct mt76_txwi_cache *t;
1178-
1179- t = mt76_rx_token_release(&dev->mt76, i);
1180- if (!t || !t->ptr)
1181- continue;
1182-
1183- mt76_put_page_pool_buf(t->ptr, false);
1184- t->ptr = NULL;
1185-
1186- mt76_put_rxwi(&dev->mt76, t);
1187- }
1188-
1189- mt76_free_pending_rxwi(&dev->mt76);
1190-}
1191-
1192-static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
1193-{
1194- struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
1195- struct mt76_txwi_cache *t = NULL;
1196- struct mt7915_dev *dev;
1197- struct mt76_queue *q;
1198- int i, len;
1199-
1200- dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
1201- q = &dev->mt76.q_rx[MT_RXQ_MAIN];
1202- len = SKB_WITH_OVERHEAD(q->buf_size);
1203-
1204- for (i = 0; i < size; i++) {
1205- enum dma_data_direction dir;
1206- dma_addr_t addr;
1207- u32 offset;
1208- int token;
1209- void *buf;
1210-
1211- t = mt76_get_rxwi(&dev->mt76);
1212- if (!t)
1213- goto unmap;
1214-
1215- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
1216- if (!buf)
1217- goto unmap;
1218-
1219- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
1220- dir = page_pool_get_dma_dir(q->page_pool);
1221- dma_sync_single_for_device(dev->mt76.dma_dev, addr, len, dir);
1222-
1223- desc->buf0 = cpu_to_le32(addr);
1224- token = mt76_rx_token_consume(&dev->mt76, buf, t, addr);
1225- if (token < 0) {
1226- mt76_put_page_pool_buf(buf, false);
1227- goto unmap;
1228- }
1229-
1230- desc->token |= cpu_to_le32(FIELD_PREP(MT_DMA_CTL_TOKEN,
1231- token));
1232- desc++;
1233- }
1234-
1235- return 0;
1236-
1237-unmap:
1238- if (t)
1239- mt76_put_rxwi(&dev->mt76, t);
1240- mt7915_mmio_wed_release_rx_buf(wed);
1241- return -ENOMEM;
1242-}
1243-
1244 static void mt7915_mmio_wed_update_rx_stats(struct mtk_wed_device *wed,
1245 struct mtk_wed_wo_rx_stats *stats)
1246 {
1247@@ -778,10 +679,10 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
1248 }
1249
1250 wed->wlan.init_buf = mt7915_wed_init_buf;
1251- wed->wlan.offload_enable = mt7915_mmio_wed_offload_enable;
1252- wed->wlan.offload_disable = mt7915_mmio_wed_offload_disable;
1253- wed->wlan.init_rx_buf = mt7915_mmio_wed_init_rx_buf;
1254- wed->wlan.release_rx_buf = mt7915_mmio_wed_release_rx_buf;
1255+ wed->wlan.offload_enable = mt76_mmio_wed_offload_enable;
1256+ wed->wlan.offload_disable = mt76_mmio_wed_offload_disable;
1257+ wed->wlan.init_rx_buf = mt76_mmio_wed_init_rx_buf;
1258+ wed->wlan.release_rx_buf = mt76_mmio_wed_release_rx_buf;
1259 wed->wlan.update_wo_rx_stats = mt7915_mmio_wed_update_rx_stats;
1260 wed->wlan.reset = mt7915_mmio_wed_reset;
1261 wed->wlan.reset_complete = mt7915_mmio_wed_reset_complete;
1262diff --git a/mt7921/pci.c b/mt7921/pci.c
1263index 9647e4b..9ea7e0c 100644
1264--- a/mt7921/pci.c
1265+++ b/mt7921/pci.c
1266@@ -171,7 +171,7 @@ static int mt7921_dma_init(struct mt792x_dev *dev)
1267 /* init tx queue */
1268 ret = mt76_connac_init_tx_queues(dev->phy.mt76, MT7921_TXQ_BAND0,
1269 MT7921_TX_RING_SIZE,
1270- MT_TX_RING_BASE, 0);
1271+ MT_TX_RING_BASE, NULL, 0);
1272 if (ret)
1273 return ret;
1274
1275diff --git a/mt7925/pci.c b/mt7925/pci.c
1276index 08ef75e..734f31e 100644
1277--- a/mt7925/pci.c
1278+++ b/mt7925/pci.c
1279@@ -218,7 +218,7 @@ static int mt7925_dma_init(struct mt792x_dev *dev)
1280 /* init tx queue */
1281 ret = mt76_connac_init_tx_queues(dev->phy.mt76, MT7925_TXQ_BAND0,
1282 MT7925_TX_RING_SIZE,
1283- MT_TX_RING_BASE, 0);
1284+ MT_TX_RING_BASE, NULL, 0);
1285 if (ret)
1286 return ret;
1287
1288diff --git a/mt7996/dma.c b/mt7996/dma.c
1289index 586e247..2221d22 100644
1290--- a/mt7996/dma.c
1291+++ b/mt7996/dma.c
1292@@ -7,6 +7,26 @@
1293 #include "../dma.h"
1294 #include "mac.h"
1295
1296+int mt7996_init_tx_queues(struct mt7996_phy *phy, int idx, int n_desc,
1297+ int ring_base, struct mtk_wed_device *wed)
1298+{
1299+ struct mt7996_dev *dev = phy->dev;
1300+ u32 flags = 0;
1301+
1302+ if (mtk_wed_device_active(wed)) {
1303+ ring_base += MT_TXQ_ID(0) * MT_RING_SIZE;
1304+ idx -= MT_TXQ_ID(0);
1305+
1306+ if (phy->mt76->band_idx == MT_BAND2)
1307+ flags = MT_WED_Q_TX(0);
1308+ else
1309+ flags = MT_WED_Q_TX(idx);
1310+ }
1311+
1312+ return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc,
1313+ ring_base, wed, flags);
1314+}
1315+
1316 static int mt7996_poll_tx(struct napi_struct *napi, int budget)
1317 {
1318 struct mt7996_dev *dev;
1319@@ -45,6 +65,29 @@ static void mt7996_dma_config(struct mt7996_dev *dev)
1320 RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2);
1321 RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI);
1322
1323+ if (dev->has_rro) {
1324+ /* band0 */
1325+ RXQ_CONFIG(MT_RXQ_RRO_BAND0, WFDMA0, MT_INT_RX_DONE_RRO_BAND0,
1326+ MT7996_RXQ_RRO_BAND0);
1327+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND0,
1328+ MT7996_RXQ_MSDU_PG_BAND0);
1329+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0, MT_INT_RX_TXFREE_MAIN,
1330+ MT7996_RXQ_TXFREE0);
1331+ /* band1 */
1332+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND1,
1333+ MT7996_RXQ_MSDU_PG_BAND1);
1334+ /* band2 */
1335+ RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0, MT_INT_RX_DONE_RRO_BAND2,
1336+ MT7996_RXQ_RRO_BAND2);
1337+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND2,
1338+ MT7996_RXQ_MSDU_PG_BAND2);
1339+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0, MT_INT_RX_TXFREE_TRI,
1340+ MT7996_RXQ_TXFREE2);
1341+
1342+ RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0, MT_INT_RX_DONE_RRO_IND,
1343+ MT7996_RXQ_RRO_IND);
1344+ }
1345+
1346 /* data tx queue */
1347 TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0);
1348 TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
1349@@ -73,6 +116,24 @@ static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
1350 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x1a0, 0x10));
1351 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND2) + ofs, PREFETCH(0x2a0, 0x10));
1352
1353+ if (dev->has_rro) {
1354+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND0) + ofs,
1355+ PREFETCH(0x3a0, 0x10));
1356+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND2) + ofs,
1357+ PREFETCH(0x4a0, 0x10));
1358+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs,
1359+ PREFETCH(0x5a0, 0x4));
1360+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs,
1361+ PREFETCH(0x5e0, 0x4));
1362+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs,
1363+ PREFETCH(0x620, 0x4));
1364+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND0) + ofs,
1365+ PREFETCH(0x660, 0x4));
1366+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND2) + ofs,
1367+ PREFETCH(0x6a0, 0x4));
1368+ }
1369+#undef PREFETCH
1370+
1371 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + ofs, WF_WFDMA0_GLO_CFG_EXT1_CALC_MODE);
1372 }
1373
1374@@ -128,8 +189,9 @@ static void mt7996_dma_disable(struct mt7996_dev *dev, bool reset)
1375 }
1376 }
1377
1378-void mt7996_dma_start(struct mt7996_dev *dev, bool reset)
1379+void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
1380 {
1381+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
1382 u32 hif1_ofs = 0;
1383 u32 irq_mask;
1384
1385@@ -138,11 +200,16 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset)
1386
1387 /* enable WFDMA Tx/Rx */
1388 if (!reset) {
1389- mt76_set(dev, MT_WFDMA0_GLO_CFG,
1390- MT_WFDMA0_GLO_CFG_TX_DMA_EN |
1391- MT_WFDMA0_GLO_CFG_RX_DMA_EN |
1392- MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
1393- MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
1394+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
1395+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
1396+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
1397+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO);
1398+ else
1399+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
1400+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
1401+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
1402+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
1403+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
1404
1405 if (dev->hif2)
1406 mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
1407@@ -153,11 +220,7 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset)
1408 }
1409
1410 /* enable interrupts for TX/RX rings */
1411- irq_mask = MT_INT_MCU_CMD;
1412- if (reset)
1413- goto done;
1414-
1415- irq_mask = MT_INT_RX_DONE_MCU | MT_INT_TX_DONE_MCU;
1416+ irq_mask = MT_INT_MCU_CMD | MT_INT_RX_DONE_MCU | MT_INT_TX_DONE_MCU;
1417
1418 if (!dev->mphy.band_idx)
1419 irq_mask |= MT_INT_BAND0_RX_DONE;
1420@@ -168,7 +231,16 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset)
1421 if (dev->tbtc_support)
1422 irq_mask |= MT_INT_BAND2_RX_DONE;
1423
1424-done:
1425+ if (mtk_wed_device_active(wed) && wed_reset) {
1426+ u32 wed_irq_mask = irq_mask;
1427+
1428+ wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
1429+ mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
1430+ mtk_wed_device_start(wed, wed_irq_mask);
1431+ }
1432+
1433+ irq_mask = reset ? MT_INT_MCU_CMD : irq_mask;
1434+
1435 mt7996_irq_enable(dev, irq_mask);
1436 mt7996_irq_disable(dev, 0);
1437 }
1438@@ -241,17 +313,90 @@ static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
1439 /* fix hardware limitation, pcie1's rx ring3 is not available
1440 * so, redirect pcie0 rx ring3 interrupt to pcie1
1441 */
1442- mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL,
1443- MT_WFDMA0_RX_INT_SEL_RING3);
1444+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
1445+ dev->has_rro)
1446+ mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL + hif1_ofs,
1447+ MT_WFDMA0_RX_INT_SEL_RING6);
1448+ else
1449+ mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL,
1450+ MT_WFDMA0_RX_INT_SEL_RING3);
1451+ }
1452+
1453+ mt7996_dma_start(dev, reset, true);
1454+}
1455+
1456+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
1457+int mt7996_dma_rro_init(struct mt7996_dev *dev)
1458+{
1459+ struct mt76_dev *mdev = &dev->mt76;
1460+ u32 irq_mask;
1461+ int ret;
1462+
1463+ /* ind cmd */
1464+ mdev->q_rx[MT_RXQ_RRO_IND].flags = MT_WED_RRO_Q_IND;
1465+ mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed;
1466+ ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_IND],
1467+ MT_RXQ_ID(MT_RXQ_RRO_IND),
1468+ MT7996_RX_RING_SIZE,
1469+ 0, MT_RXQ_RRO_IND_RING_BASE);
1470+ if (ret)
1471+ return ret;
1472
1473- /* TODO: redirect rx ring6 interrupt to pcie0 for wed function */
1474+ /* rx msdu page queue for band0 */
1475+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags =
1476+ MT_WED_RRO_Q_MSDU_PG(0) | MT_QFLAG_WED_RRO_EN;
1477+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed;
1478+ ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0],
1479+ MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0),
1480+ MT7996_RX_RING_SIZE,
1481+ MT7996_RX_MSDU_PAGE_SIZE,
1482+ MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND0));
1483+ if (ret)
1484+ return ret;
1485+
1486+ if (dev->dbdc_support) {
1487+ /* rx msdu page queue for band1 */
1488+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags =
1489+ MT_WED_RRO_Q_MSDU_PG(1) | MT_QFLAG_WED_RRO_EN;
1490+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed;
1491+ ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1],
1492+ MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1),
1493+ MT7996_RX_RING_SIZE,
1494+ MT7996_RX_MSDU_PAGE_SIZE,
1495+ MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND1));
1496+ if (ret)
1497+ return ret;
1498+ }
1499+
1500+ if (dev->tbtc_support) {
1501+ /* rx msdu page queue for band2 */
1502+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags =
1503+ MT_WED_RRO_Q_MSDU_PG(2) | MT_QFLAG_WED_RRO_EN;
1504+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed;
1505+ ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2],
1506+ MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2),
1507+ MT7996_RX_RING_SIZE,
1508+ MT7996_RX_MSDU_PAGE_SIZE,
1509+ MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND2));
1510+ if (ret)
1511+ return ret;
1512 }
1513
1514- mt7996_dma_start(dev, reset);
1515+ irq_mask = mdev->mmio.irqmask | MT_INT_RRO_RX_DONE |
1516+ MT_INT_TX_DONE_BAND2;
1517+ mt76_wr(dev, MT_INT_MASK_CSR, irq_mask);
1518+ mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false);
1519+ mt7996_irq_enable(dev, irq_mask);
1520+
1521+ return 0;
1522 }
1523+#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
1524
1525 int mt7996_dma_init(struct mt7996_dev *dev)
1526 {
1527+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
1528+ struct mtk_wed_device *wed_hif2 = &dev->mt76.mmio.wed_hif2;
1529+ u32 rx_base;
1530 u32 hif1_ofs = 0;
1531 int ret;
1532
1533@@ -265,10 +410,11 @@ int mt7996_dma_init(struct mt7996_dev *dev)
1534 mt7996_dma_disable(dev, true);
1535
1536 /* init tx queue */
1537- ret = mt76_connac_init_tx_queues(dev->phy.mt76,
1538- MT_TXQ_ID(dev->mphy.band_idx),
1539- MT7996_TX_RING_SIZE,
1540- MT_TXQ_RING_BASE(0), 0);
1541+ ret = mt7996_init_tx_queues(&dev->phy,
1542+ MT_TXQ_ID(dev->mphy.band_idx),
1543+ MT7996_TX_RING_SIZE,
1544+ MT_TXQ_RING_BASE(0),
1545+ wed);
1546 if (ret)
1547 return ret;
1548
1549@@ -315,6 +461,11 @@ int mt7996_dma_init(struct mt7996_dev *dev)
1550 return ret;
1551
1552 /* rx data queue for band0 and band1 */
1553+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
1554+ dev->mt76.q_rx[MT_RXQ_MAIN].flags = MT_WED_Q_RX(0);
1555+ dev->mt76.q_rx[MT_RXQ_MAIN].wed = wed;
1556+ }
1557+
1558 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
1559 MT_RXQ_ID(MT_RXQ_MAIN),
1560 MT7996_RX_RING_SIZE,
1561@@ -324,6 +475,11 @@ int mt7996_dma_init(struct mt7996_dev *dev)
1562 return ret;
1563
1564 /* tx free notify event from WA for band0 */
1565+ if (mtk_wed_device_active(wed) && !dev->has_rro) {
1566+ dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
1567+ dev->mt76.q_rx[MT_RXQ_MAIN_WA].wed = wed;
1568+ }
1569+
1570 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
1571 MT_RXQ_ID(MT_RXQ_MAIN_WA),
1572 MT7996_RX_MCU_RING_SIZE,
1573@@ -334,17 +490,23 @@ int mt7996_dma_init(struct mt7996_dev *dev)
1574
1575 if (dev->tbtc_support || dev->mphy.band_idx == MT_BAND2) {
1576 /* rx data queue for band2 */
1577+ rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs;
1578 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2],
1579 MT_RXQ_ID(MT_RXQ_BAND2),
1580 MT7996_RX_RING_SIZE,
1581 MT_RX_BUF_SIZE,
1582- MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs);
1583+ rx_base);
1584 if (ret)
1585 return ret;
1586
1587 /* tx free notify event from WA for band2
1588 * use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1
1589 */
1590+ if (mtk_wed_device_active(wed_hif2) && !dev->has_rro) {
1591+ dev->mt76.q_rx[MT_RXQ_BAND2_WA].flags = MT_WED_Q_TXFREE;
1592+ dev->mt76.q_rx[MT_RXQ_BAND2_WA].wed = wed_hif2;
1593+ }
1594+
1595 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2_WA],
1596 MT_RXQ_ID(MT_RXQ_BAND2_WA),
1597 MT7996_RX_MCU_RING_SIZE,
1598@@ -354,6 +516,60 @@ int mt7996_dma_init(struct mt7996_dev *dev)
1599 return ret;
1600 }
1601
1602+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) &&
1603+ dev->has_rro) {
1604+ /* rx rro data queue for band0 */
1605+ dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags =
1606+ MT_WED_RRO_Q_DATA(0) | MT_QFLAG_WED_RRO_EN;
1607+ dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed;
1608+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0],
1609+ MT_RXQ_ID(MT_RXQ_RRO_BAND0),
1610+ MT7996_RX_RING_SIZE,
1611+ MT7996_RX_BUF_SIZE,
1612+ MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND0));
1613+ if (ret)
1614+ return ret;
1615+
1616+ /* tx free notify event from WA for band0 */
1617+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
1618+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
1619+
1620+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
1621+ MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
1622+ MT7996_RX_MCU_RING_SIZE,
1623+ MT7996_RX_BUF_SIZE,
1624+ MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
1625+ if (ret)
1626+ return ret;
1627+
1628+ if (dev->tbtc_support || dev->mphy.band_idx == MT_BAND2) {
1629+ /* rx rro data queue for band2 */
1630+ dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags =
1631+ MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
1632+ dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed;
1633+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2],
1634+ MT_RXQ_ID(MT_RXQ_RRO_BAND2),
1635+ MT7996_RX_RING_SIZE,
1636+ MT7996_RX_BUF_SIZE,
1637+ MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND2) + hif1_ofs);
1638+ if (ret)
1639+ return ret;
1640+
1641+ /* tx free notify event from MAC for band2 */
1642+ if (mtk_wed_device_active(wed_hif2)) {
1643+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].flags = MT_WED_Q_TXFREE;
1644+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].wed = wed_hif2;
1645+ }
1646+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2],
1647+ MT_RXQ_ID(MT_RXQ_TXFREE_BAND2),
1648+ MT7996_RX_MCU_RING_SIZE,
1649+ MT7996_RX_BUF_SIZE,
1650+ MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND2) + hif1_ofs);
1651+ if (ret)
1652+ return ret;
1653+ }
1654+ }
1655+
1656 ret = mt76_init_queues(dev, mt76_dma_rx_poll);
1657 if (ret < 0)
1658 return ret;
1659diff --git a/mt7996/init.c b/mt7996/init.c
1660index 12c2513..d335b58 100644
1661--- a/mt7996/init.c
1662+++ b/mt7996/init.c
1663@@ -155,7 +155,7 @@ mt7996_regd_notifier(struct wiphy *wiphy,
1664 }
1665
1666 static void
1667-mt7996_init_wiphy(struct ieee80211_hw *hw)
1668+mt7996_init_wiphy(struct ieee80211_hw *hw, struct mtk_wed_device *wed)
1669 {
1670 struct mt7996_phy *phy = mt7996_hw_phy(hw);
1671 struct mt76_dev *mdev = &phy->dev->mt76;
1672@@ -167,6 +167,8 @@ mt7996_init_wiphy(struct ieee80211_hw *hw)
1673 hw->max_rx_aggregation_subframes = max_subframes;
1674 hw->max_tx_aggregation_subframes = max_subframes;
1675 hw->netdev_features = NETIF_F_RXCSUM;
1676+ if (mtk_wed_device_active(wed))
1677+ hw->netdev_features |= NETIF_F_HW_TC;
1678
1679 hw->radiotap_timestamp.units_pos =
1680 IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US;
1681@@ -312,8 +314,13 @@ void mt7996_mac_init(struct mt7996_dev *dev)
1682
1683 /* rro module init */
1684 mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE, 2);
1685- mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 3);
1686- mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 1);
1687+ if (dev->has_rro) {
1688+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 1);
1689+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 0);
1690+ } else {
1691+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 3);
1692+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 1);
1693+ }
1694
1695 mt7996_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET),
1696 MCU_WA_PARAM_HW_PATH_HIF_VER,
1697@@ -350,6 +357,7 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
1698 struct mt76_phy *mphy;
1699 u32 mac_ofs, hif1_ofs = 0;
1700 int ret;
1701+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
1702
1703 if (band != MT_BAND1 && band != MT_BAND2)
1704 return 0;
1705@@ -361,8 +369,10 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
1706 if (phy)
1707 return 0;
1708
1709- if (band == MT_BAND2 && dev->hif2)
1710+ if (band == MT_BAND2 && dev->hif2) {
1711 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
1712+ wed = &dev->mt76.mmio.wed_hif2;
1713+ }
1714
1715 mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7996_ops, band);
1716 if (!mphy)
1717@@ -395,11 +405,12 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
1718 mt76_eeprom_override(mphy);
1719
1720 /* init wiphy according to mphy and phy */
1721- mt7996_init_wiphy(mphy->hw);
1722- ret = mt76_connac_init_tx_queues(phy->mt76,
1723- MT_TXQ_ID(band),
1724- MT7996_TX_RING_SIZE,
1725- MT_TXQ_RING_BASE(band) + hif1_ofs, 0);
1726+ mt7996_init_wiphy(mphy->hw, wed);
1727+ ret = mt7996_init_tx_queues(mphy->priv,
1728+ MT_TXQ_ID(band),
1729+ MT7996_TX_RING_SIZE,
1730+ MT_TXQ_RING_BASE(band) + hif1_ofs,
1731+ wed);
1732 if (ret)
1733 goto error;
1734
1735@@ -412,6 +423,13 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
1736 if (ret)
1737 goto error;
1738
1739+ if (wed == &dev->mt76.mmio.wed_hif2 && mtk_wed_device_active(wed)) {
1740+ u32 irq_mask = dev->mt76.mmio.irqmask | MT_INT_TX_DONE_BAND2;
1741+
1742+ mt76_wr(dev, MT_INT1_MASK_CSR, irq_mask);
1743+ mtk_wed_device_start(&dev->mt76.mmio.wed_hif2, irq_mask);
1744+ }
1745+
1746 return 0;
1747
1748 error:
1749@@ -456,6 +474,120 @@ void mt7996_wfsys_reset(struct mt7996_dev *dev)
1750 msleep(20);
1751 }
1752
1753+static int mt7996_wed_rro_init(struct mt7996_dev *dev)
1754+{
1755+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
1756+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
1757+ u32 reg = MT_RRO_ADDR_ELEM_SEG_ADDR0;
1758+ struct mt7996_wed_rro_addr *addr;
1759+ void *ptr;
1760+ int i;
1761+
1762+ if (!dev->has_rro)
1763+ return 0;
1764+
1765+ if (!mtk_wed_device_active(wed))
1766+ return 0;
1767+
1768+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.ba_bitmap); i++) {
1769+ ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
1770+ MT7996_RRO_BA_BITMAP_CR_SIZE,
1771+ &dev->wed_rro.ba_bitmap[i].phy_addr,
1772+ GFP_KERNEL);
1773+ if (!ptr)
1774+ return -ENOMEM;
1775+
1776+ dev->wed_rro.ba_bitmap[i].ptr = ptr;
1777+ }
1778+
1779+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.addr_elem); i++) {
1780+ int j;
1781+
1782+ ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
1783+ MT7996_RRO_WINDOW_MAX_SIZE * sizeof(*addr),
1784+ &dev->wed_rro.addr_elem[i].phy_addr,
1785+ GFP_KERNEL);
1786+ if (!ptr)
1787+ return -ENOMEM;
1788+
1789+ dev->wed_rro.addr_elem[i].ptr = ptr;
1790+ memset(dev->wed_rro.addr_elem[i].ptr, 0,
1791+ MT7996_RRO_WINDOW_MAX_SIZE * sizeof(*addr));
1792+
1793+ addr = dev->wed_rro.addr_elem[i].ptr;
1794+ for (j = 0; j < MT7996_RRO_WINDOW_MAX_SIZE; j++) {
1795+ addr->signature = 0xff;
1796+ addr++;
1797+ }
1798+
1799+ wed->wlan.ind_cmd.addr_elem_phys[i] =
1800+ dev->wed_rro.addr_elem[i].phy_addr;
1801+ }
1802+
1803+ ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
1804+ MT7996_RRO_WINDOW_MAX_LEN * sizeof(*addr),
1805+ &dev->wed_rro.session.phy_addr,
1806+ GFP_KERNEL);
1807+ if (!ptr)
1808+ return -ENOMEM;
1809+
1810+ dev->wed_rro.session.ptr = ptr;
1811+ addr = dev->wed_rro.session.ptr;
1812+ for (i = 0; i < MT7996_RRO_WINDOW_MAX_LEN; i++) {
1813+ addr->signature = 0xff;
1814+ addr++;
1815+ }
1816+
1817+ /* rro hw init */
1818+ /* TODO: remove line after WM has set */
1819+ mt76_clear(dev, WF_RRO_AXI_MST_CFG, WF_RRO_AXI_MST_CFG_DIDX_OK);
1820+
1821+ /* setup BA bitmap cache address */
1822+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE0,
1823+ dev->wed_rro.ba_bitmap[0].phy_addr);
1824+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE1, 0);
1825+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT0,
1826+ dev->wed_rro.ba_bitmap[1].phy_addr);
1827+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT1, 0);
1828+
1829+ /* setup Address element address */
1830+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.addr_elem); i++) {
1831+ mt76_wr(dev, reg, dev->wed_rro.addr_elem[i].phy_addr >> 4);
1832+ reg += 4;
1833+ }
1834+
1835+ /* setup Address element address - separate address segment mode */
1836+ mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE1,
1837+ MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE);
1838+
1839+ wed->wlan.ind_cmd.win_size = ffs(MT7996_RRO_WINDOW_MAX_LEN) - 6;
1840+ wed->wlan.ind_cmd.particular_sid = MT7996_RRO_MAX_SESSION;
1841+ wed->wlan.ind_cmd.particular_se_phys = dev->wed_rro.session.phy_addr;
1842+ wed->wlan.ind_cmd.se_group_nums = MT7996_RRO_ADDR_ELEM_LEN;
1843+ wed->wlan.ind_cmd.ack_sn_addr = MT_RRO_ACK_SN_CTRL;
1844+
1845+ mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0x15010e00);
1846+ mt76_set(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1,
1847+ MT_RRO_IND_CMD_SIGNATURE_BASE1_EN);
1848+
1849+ /* particular session configure */
1850+ /* use max session idx + 1 as particular session id */
1851+ mt76_wr(dev, MT_RRO_PARTICULAR_CFG0, dev->wed_rro.session.phy_addr);
1852+ mt76_wr(dev, MT_RRO_PARTICULAR_CFG1,
1853+ MT_RRO_PARTICULAR_CONFG_EN |
1854+ FIELD_PREP(MT_RRO_PARTICULAR_SID, MT7996_RRO_MAX_SESSION));
1855+
1856+ /* interrupt enable */
1857+ mt76_wr(dev, MT_RRO_HOST_INT_ENA,
1858+ MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA);
1859+
1860+ /* rro ind cmd queue init */
1861+ return mt7996_dma_rro_init(dev);
1862+#else
1863+ return 0;
1864+#endif
1865+}
1866+
1867 static int mt7996_init_hardware(struct mt7996_dev *dev)
1868 {
1869 int ret, idx;
1870@@ -477,6 +609,10 @@ static int mt7996_init_hardware(struct mt7996_dev *dev)
1871 if (ret)
1872 return ret;
1873
1874+ ret = mt7996_wed_rro_init(dev);
1875+ if (ret)
1876+ return ret;
1877+
1878 ret = mt7996_eeprom_init(dev);
1879 if (ret < 0)
1880 return ret;
1881@@ -884,7 +1020,7 @@ int mt7996_register_device(struct mt7996_dev *dev)
1882 if (ret)
1883 return ret;
1884
1885- mt7996_init_wiphy(hw);
1886+ mt7996_init_wiphy(hw, &dev->mt76.mmio.wed);
1887
1888 ret = mt76_register_device(&dev->mt76, true, mt76_rates,
1889 ARRAY_SIZE(mt76_rates));
1890diff --git a/mt7996/mac.c b/mt7996/mac.c
1891index 1a1e218..4be5410 100644
1892--- a/mt7996/mac.c
1893+++ b/mt7996/mac.c
1894@@ -449,8 +449,36 @@ mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
1895 return 0;
1896 }
1897
1898+static void
1899+mt7996_wed_check_ppe(struct mt7996_dev *dev, struct mt76_queue *q,
1900+ struct mt7996_sta *msta, struct sk_buff *skb,
1901+ u32 info)
1902+{
1903+ struct ieee80211_vif *vif;
1904+ struct wireless_dev *wdev;
1905+
1906+ if (!msta || !msta->vif)
1907+ return;
1908+
1909+ if (!mt76_queue_is_wed_rx(q))
1910+ return;
1911+
1912+ if (!(info & MT_DMA_INFO_PPE_VLD))
1913+ return;
1914+
1915+ vif = container_of((void *)msta->vif, struct ieee80211_vif,
1916+ drv_priv);
1917+ wdev = ieee80211_vif_to_wdev(vif);
1918+ skb->dev = wdev->netdev;
1919+
1920+ mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb,
1921+ FIELD_GET(MT_DMA_PPE_CPU_REASON, info),
1922+ FIELD_GET(MT_DMA_PPE_ENTRY, info));
1923+}
1924+
1925 static int
1926-mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
1927+mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
1928+ struct sk_buff *skb, u32 *info)
1929 {
1930 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1931 struct mt76_phy *mphy = &dev->mt76.phy;
1932@@ -475,7 +503,10 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
1933 u16 seq_ctrl = 0;
1934 __le16 fc = 0;
1935 int idx;
1936+ u8 hw_aggr = false;
1937+ struct mt7996_sta *msta = NULL;
1938
1939+ hw_aggr = status->aggr;
1940 memset(status, 0, sizeof(*status));
1941
1942 band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1);
1943@@ -502,8 +533,6 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
1944 status->wcid = mt7996_rx_get_wcid(dev, idx, unicast);
1945
1946 if (status->wcid) {
1947- struct mt7996_sta *msta;
1948-
1949 msta = container_of(status->wcid, struct mt7996_sta, wcid);
1950 spin_lock_bh(&dev->mt76.sta_poll_lock);
1951 if (list_empty(&msta->wcid.poll_list))
1952@@ -708,12 +737,14 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
1953 }
1954 } else {
1955 status->flag |= RX_FLAG_8023;
1956+ mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
1957+ *info);
1958 }
1959
1960 if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
1961 mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
1962
1963- if (!status->wcid || !ieee80211_is_data_qos(fc))
1964+ if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr)
1965 return 0;
1966
1967 status->aggr = unicast &&
1968@@ -1010,6 +1041,29 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
1969 return 0;
1970 }
1971
1972+u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
1973+{
1974+ struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE;
1975+ __le32 *txwi = ptr;
1976+ u32 val;
1977+
1978+ memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
1979+
1980+ val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
1981+ FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
1982+ txwi[0] = cpu_to_le32(val);
1983+
1984+ val = BIT(31) |
1985+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
1986+ txwi[1] = cpu_to_le32(val);
1987+
1988+ txp->token = cpu_to_le16(token_id);
1989+ txp->nbuf = 1;
1990+ txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
1991+
1992+ return MT_TXD_SIZE + sizeof(*txp);
1993+}
1994+
1995 static void
1996 mt7996_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
1997 {
1998@@ -1388,6 +1442,12 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1999
2000 switch (type) {
2001 case PKT_TYPE_TXRX_NOTIFY:
2002+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2) &&
2003+ q == MT_RXQ_TXFREE_BAND2) {
2004+ dev_kfree_skb(skb);
2005+ break;
2006+ }
2007+
2008 mt7996_mac_tx_free(dev, skb->data, skb->len);
2009 napi_consume_skb(skb, 1);
2010 break;
2011@@ -1404,7 +1464,7 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
2012 dev_kfree_skb(skb);
2013 break;
2014 case PKT_TYPE_NORMAL:
2015- if (!mt7996_mac_fill_rx(dev, skb)) {
2016+ if (!mt7996_mac_fill_rx(dev, q, skb, info)) {
2017 mt76_rx(&dev->mt76, q, skb);
2018 return;
2019 }
2020@@ -1862,7 +1922,7 @@ void mt7996_mac_reset_work(struct work_struct *work)
2021 mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
2022
2023 /* enable DMA Tx/Tx and interrupt */
2024- mt7996_dma_start(dev, false);
2025+ mt7996_dma_start(dev, false, false);
2026
2027 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
2028 clear_bit(MT76_RESET, &dev->mphy.state);
2029diff --git a/mt7996/main.c b/mt7996/main.c
2030index a2ab668..ae4f0ce 100644
2031--- a/mt7996/main.c
2032+++ b/mt7996/main.c
2033@@ -1368,6 +1368,44 @@ out:
2034 return ret;
2035 }
2036
2037+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
2038+static int
2039+mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
2040+ struct ieee80211_vif *vif,
2041+ struct ieee80211_sta *sta,
2042+ struct net_device_path_ctx *ctx,
2043+ struct net_device_path *path)
2044+{
2045+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
2046+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
2047+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
2048+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
2049+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
2050+
2051+ if (phy != &dev->phy && phy->mt76->band_idx == MT_BAND2)
2052+ wed = &dev->mt76.mmio.wed_hif2;
2053+
2054+ if (!mtk_wed_device_active(wed))
2055+ return -ENODEV;
2056+
2057+ if (msta->wcid.idx > MT7996_WTBL_STA)
2058+ return -EIO;
2059+
2060+ path->type = DEV_PATH_MTK_WDMA;
2061+ path->dev = ctx->dev;
2062+ path->mtk_wdma.wdma_idx = wed->wdma_idx;
2063+ path->mtk_wdma.bss = mvif->mt76.idx;
2064+ path->mtk_wdma.queue = 0;
2065+ path->mtk_wdma.wcid = msta->wcid.idx;
2066+
2067+ path->mtk_wdma.amsdu = mtk_wed_is_amsdu_supported(wed);
2068+ ctx->dev = NULL;
2069+
2070+ return 0;
2071+}
2072+
2073+#endif
2074+
2075 const struct ieee80211_ops mt7996_ops = {
2076 .tx = mt7996_tx,
2077 .start = mt7996_start,
2078@@ -1412,4 +1450,8 @@ const struct ieee80211_ops mt7996_ops = {
2079 .sta_add_debugfs = mt7996_sta_add_debugfs,
2080 #endif
2081 .set_radar_background = mt7996_set_radar_background,
2082+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
2083+ .net_fill_forward_path = mt7996_net_fill_forward_path,
2084+ .net_setup_tc = mt76_net_setup_tc,
2085+#endif
2086 };
2087diff --git a/mt7996/mcu.c b/mt7996/mcu.c
2088index 12bf4e5..3ff70c6 100644
2089--- a/mt7996/mcu.c
2090+++ b/mt7996/mcu.c
2091@@ -912,7 +912,7 @@ int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif)
2092 }
2093
2094 static int
2095-mt7996_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
2096+mt7996_mcu_sta_ba(struct mt7996_dev *dev, struct mt76_vif *mvif,
2097 struct ieee80211_ampdu_params *params,
2098 bool enable, bool tx)
2099 {
2100@@ -921,7 +921,7 @@ mt7996_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
2101 struct sk_buff *skb;
2102 struct tlv *tlv;
2103
2104- skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid,
2105+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, mvif, wcid,
2106 MT7996_STA_UPDATE_MAX_SIZE);
2107 if (IS_ERR(skb))
2108 return PTR_ERR(skb);
2109@@ -935,8 +935,9 @@ mt7996_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
2110 ba->ba_en = enable << params->tid;
2111 ba->amsdu = params->amsdu;
2112 ba->tid = params->tid;
2113+ ba->ba_rdd_rro = !tx && enable && dev->has_rro;
2114
2115- return mt76_mcu_skb_send_msg(dev, skb,
2116+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
2117 MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
2118 }
2119
2120@@ -951,8 +952,7 @@ int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
2121 if (enable && !params->amsdu)
2122 msta->wcid.amsdu = false;
2123
2124- return mt7996_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
2125- enable, true);
2126+ return mt7996_mcu_sta_ba(dev, &mvif->mt76, params, enable, true);
2127 }
2128
2129 int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
2130@@ -962,8 +962,7 @@ int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
2131 struct mt7996_sta *msta = (struct mt7996_sta *)params->sta->drv_priv;
2132 struct mt7996_vif *mvif = msta->vif;
2133
2134- return mt7996_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
2135- enable, false);
2136+ return mt7996_mcu_sta_ba(dev, &mvif->mt76, params, enable, false);
2137 }
2138
2139 static void
2140diff --git a/mt7996/mmio.c b/mt7996/mmio.c
2141index 3a591a7..c7b6d4b 100644
2142--- a/mt7996/mmio.c
2143+++ b/mt7996/mmio.c
2144@@ -10,6 +10,10 @@
2145 #include "mt7996.h"
2146 #include "mac.h"
2147 #include "../trace.h"
2148+#include "../dma.h"
2149+
2150+static bool wed_enable;
2151+module_param(wed_enable, bool, 0644);
2152
2153 static const struct __base mt7996_reg_base[] = {
2154 [WF_AGG_BASE] = { { 0x820e2000, 0x820f2000, 0x830e2000 } },
2155@@ -191,6 +195,143 @@ static u32 mt7996_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
2156 return dev->bus_ops->rmw(mdev, __mt7996_reg_addr(dev, offset), mask, val);
2157 }
2158
2159+int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
2160+ bool hif2, int *irq)
2161+{
2162+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
2163+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
2164+ struct pci_dev *pci_dev = pdev_ptr;
2165+ u32 hif1_ofs = 0;
2166+ int ret;
2167+
2168+ if (!wed_enable)
2169+ return 0;
2170+
2171+ dev->has_rro = true;
2172+
2173+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
2174+
2175+ if (hif2)
2176+ wed = &dev->mt76.mmio.wed_hif2;
2177+
2178+ wed->wlan.pci_dev = pci_dev;
2179+ wed->wlan.bus_type = MTK_WED_BUS_PCIE;
2180+
2181+ wed->wlan.base = devm_ioremap(dev->mt76.dev,
2182+ pci_resource_start(pci_dev, 0),
2183+ pci_resource_len(pci_dev, 0));
2184+ wed->wlan.phy_base = pci_resource_start(pci_dev, 0);
2185+
2186+ if (hif2) {
2187+ wed->wlan.wpdma_int = wed->wlan.phy_base +
2188+ MT_INT_PCIE1_SOURCE_CSR_EXT;
2189+ wed->wlan.wpdma_mask = wed->wlan.phy_base +
2190+ MT_INT_PCIE1_MASK_CSR;
2191+ wed->wlan.wpdma_tx = wed->wlan.phy_base + hif1_ofs +
2192+ MT_TXQ_RING_BASE(0) +
2193+ MT7996_TXQ_BAND2 * MT_RING_SIZE;
2194+ if (dev->has_rro) {
2195+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + hif1_ofs +
2196+ MT_RXQ_RING_BASE(0) +
2197+ MT7996_RXQ_TXFREE2 * MT_RING_SIZE;
2198+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_EXT) - 1;
2199+ } else {
2200+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + hif1_ofs +
2201+ MT_RXQ_RING_BASE(0) +
2202+ MT7996_RXQ_MCU_WA_TRI * MT_RING_SIZE;
2203+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_DONE_WA_TRI) - 1;
2204+ }
2205+
2206+ wed->wlan.wpdma_rx_glo = wed->wlan.phy_base + hif1_ofs + MT_WFDMA0_GLO_CFG;
2207+ wed->wlan.wpdma_rx = wed->wlan.phy_base + hif1_ofs +
2208+ MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) +
2209+ MT7996_RXQ_BAND0 * MT_RING_SIZE;
2210+
2211+ wed->wlan.id = 0x7991;
2212+ wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND2) - 1;
2213+ } else {
2214+ wed->wlan.hw_rro = dev->has_rro; /* default on */
2215+ wed->wlan.wpdma_int = wed->wlan.phy_base + MT_INT_SOURCE_CSR;
2216+ wed->wlan.wpdma_mask = wed->wlan.phy_base + MT_INT_MASK_CSR;
2217+ wed->wlan.wpdma_tx = wed->wlan.phy_base + MT_TXQ_RING_BASE(0) +
2218+ MT7996_TXQ_BAND0 * MT_RING_SIZE;
2219+
2220+ wed->wlan.wpdma_rx_glo = wed->wlan.phy_base + MT_WFDMA0_GLO_CFG;
2221+
2222+ wed->wlan.wpdma_rx = wed->wlan.phy_base +
2223+ MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) +
2224+ MT7996_RXQ_BAND0 * MT_RING_SIZE;
2225+
2226+ wed->wlan.wpdma_rx_rro[0] = wed->wlan.phy_base +
2227+ MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND0) +
2228+ MT7996_RXQ_RRO_BAND0 * MT_RING_SIZE;
2229+ wed->wlan.wpdma_rx_rro[1] = wed->wlan.phy_base + hif1_ofs +
2230+ MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND2) +
2231+ MT7996_RXQ_RRO_BAND2 * MT_RING_SIZE;
2232+ wed->wlan.wpdma_rx_pg = wed->wlan.phy_base +
2233+ MT_RXQ_RING_BASE(MT7996_RXQ_MSDU_PG_BAND0) +
2234+ MT7996_RXQ_MSDU_PG_BAND0 * MT_RING_SIZE;
2235+
2236+ wed->wlan.rx_nbuf = 65536;
2237+ wed->wlan.rx_npkt = dev->hif2 ? 32768 : 24576;
2238+ wed->wlan.rx_size = SKB_WITH_OVERHEAD(MT_RX_BUF_SIZE);
2239+
2240+ wed->wlan.rx_tbit[0] = ffs(MT_INT_RX_DONE_BAND0) - 1;
2241+ wed->wlan.rx_tbit[1] = ffs(MT_INT_RX_DONE_BAND2) - 1;
2242+
2243+ wed->wlan.rro_rx_tbit[0] = ffs(MT_INT_RX_DONE_RRO_BAND0) - 1;
2244+ wed->wlan.rro_rx_tbit[1] = ffs(MT_INT_RX_DONE_RRO_BAND2) - 1;
2245+
2246+ wed->wlan.rx_pg_tbit[0] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND0) - 1;
2247+ wed->wlan.rx_pg_tbit[1] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND1) - 1;
2248+ wed->wlan.rx_pg_tbit[2] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND2) - 1;
2249+
2250+ wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND0) - 1;
2251+ wed->wlan.tx_tbit[1] = ffs(MT_INT_TX_DONE_BAND1) - 1;
2252+ if (dev->has_rro) {
2253+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + MT_RXQ_RING_BASE(0) +
2254+ MT7996_RXQ_TXFREE0 * MT_RING_SIZE;
2255+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_MAIN) - 1;
2256+ } else {
2257+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_DONE_WA_MAIN) - 1;
2258+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + MT_RXQ_RING_BASE(0) +
2259+ MT7996_RXQ_MCU_WA_MAIN * MT_RING_SIZE;
2260+ }
2261+ dev->mt76.rx_token_size = MT7996_TOKEN_SIZE + wed->wlan.rx_npkt;
2262+ }
2263+
2264+ wed->wlan.nbuf = MT7996_HW_TOKEN_SIZE;
2265+ wed->wlan.token_start = MT7996_TOKEN_SIZE - wed->wlan.nbuf;
2266+
2267+ wed->wlan.amsdu_max_subframes = 8;
2268+ wed->wlan.amsdu_max_len = 1536;
2269+
2270+ wed->wlan.init_buf = mt7996_wed_init_buf;
2271+ wed->wlan.init_rx_buf = mt76_mmio_wed_init_rx_buf;
2272+ wed->wlan.release_rx_buf = mt76_mmio_wed_release_rx_buf;
2273+ wed->wlan.offload_enable = mt76_mmio_wed_offload_enable;
2274+ wed->wlan.offload_disable = mt76_mmio_wed_offload_disable;
2275+
2276+ if (mtk_wed_device_attach(wed))
2277+ return 0;
2278+
2279+ *irq = wed->irq;
2280+ dev->mt76.dma_dev = wed->dev;
2281+
2282+ ret = dma_set_mask(wed->dev, DMA_BIT_MASK(32));
2283+ if (ret)
2284+ return ret;
2285+
2286+ ret = dma_set_coherent_mask(wed->dev, DMA_BIT_MASK(32));
2287+ if (ret)
2288+ return ret;
2289+
2290+ return 1;
2291+#else
2292+ return 0;
2293+#endif
2294+}
2295+
2296 static int mt7996_mmio_init(struct mt76_dev *mdev,
2297 void __iomem *mem_base,
2298 u32 device_id)
2299@@ -241,8 +382,17 @@ void mt7996_dual_hif_set_irq_mask(struct mt7996_dev *dev, bool write_reg,
2300 mdev->mmio.irqmask |= set;
2301
2302 if (write_reg) {
2303- mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
2304- mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
2305+ if (mtk_wed_device_active(&mdev->mmio.wed)) {
2306+ mtk_wed_device_irq_set_mask(&mdev->mmio.wed,
2307+ mdev->mmio.irqmask);
2308+ if (mtk_wed_device_active(&mdev->mmio.wed_hif2)) {
2309+ mtk_wed_device_irq_set_mask(&mdev->mmio.wed_hif2,
2310+ mdev->mmio.irqmask);
2311+ }
2312+ } else {
2313+ mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
2314+ mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
2315+ }
2316 }
2317
2318 spin_unlock_irqrestore(&mdev->mmio.irq_lock, flags);
2319@@ -260,22 +410,36 @@ static void mt7996_rx_poll_complete(struct mt76_dev *mdev,
2320 static void mt7996_irq_tasklet(struct tasklet_struct *t)
2321 {
2322 struct mt7996_dev *dev = from_tasklet(dev, t, mt76.irq_tasklet);
2323+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
2324+ struct mtk_wed_device *wed_hif2 = &dev->mt76.mmio.wed_hif2;
2325 u32 i, intr, mask, intr1;
2326
2327- mt76_wr(dev, MT_INT_MASK_CSR, 0);
2328- if (dev->hif2)
2329- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
2330-
2331- intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
2332- intr &= dev->mt76.mmio.irqmask;
2333- mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
2334-
2335- if (dev->hif2) {
2336- intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
2337- intr1 &= dev->mt76.mmio.irqmask;
2338- mt76_wr(dev, MT_INT1_SOURCE_CSR, intr1);
2339+ if (dev->hif2 && mtk_wed_device_active(wed_hif2)) {
2340+ mtk_wed_device_irq_set_mask(wed_hif2, 0);
2341+ intr1 = mtk_wed_device_irq_get(wed_hif2,
2342+ dev->mt76.mmio.irqmask);
2343+ if (intr1 & MT_INT_RX_TXFREE_EXT)
2344+ napi_schedule(&dev->mt76.napi[MT_RXQ_TXFREE_BAND2]);
2345+ }
2346
2347- intr |= intr1;
2348+ if (mtk_wed_device_active(wed)) {
2349+ mtk_wed_device_irq_set_mask(wed, 0);
2350+ intr = mtk_wed_device_irq_get(wed, dev->mt76.mmio.irqmask);
2351+ intr |= (intr1 & ~MT_INT_RX_TXFREE_EXT);
2352+ } else {
2353+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
2354+ if (dev->hif2)
2355+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
2356+
2357+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
2358+ intr &= dev->mt76.mmio.irqmask;
2359+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
2360+ if (dev->hif2) {
2361+ intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
2362+ intr1 &= dev->mt76.mmio.irqmask;
2363+ mt76_wr(dev, MT_INT1_SOURCE_CSR, intr1);
2364+ intr |= intr1;
2365+ }
2366 }
2367
2368 trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
2369@@ -308,9 +472,17 @@ irqreturn_t mt7996_irq_handler(int irq, void *dev_instance)
2370 {
2371 struct mt7996_dev *dev = dev_instance;
2372
2373- mt76_wr(dev, MT_INT_MASK_CSR, 0);
2374- if (dev->hif2)
2375- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
2376+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
2377+ mtk_wed_device_irq_set_mask(&dev->mt76.mmio.wed, 0);
2378+ else
2379+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
2380+
2381+ if (dev->hif2) {
2382+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
2383+ mtk_wed_device_irq_set_mask(&dev->mt76.mmio.wed_hif2, 0);
2384+ else
2385+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
2386+ }
2387
2388 if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
2389 return IRQ_NONE;
2390diff --git a/mt7996/mt7996.h b/mt7996/mt7996.h
2391index 7354e5c..c541eaa 100644
2392--- a/mt7996/mt7996.h
2393+++ b/mt7996/mt7996.h
2394@@ -37,6 +37,7 @@
2395 #define MT7996_EEPROM_SIZE 7680
2396 #define MT7996_EEPROM_BLOCK_SIZE 16
2397 #define MT7996_TOKEN_SIZE 16384
2398+#define MT7996_HW_TOKEN_SIZE 8192
2399
2400 #define MT7996_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
2401 #define MT7996_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
2402@@ -49,6 +50,22 @@
2403 #define MT7996_BASIC_RATES_TBL 11
2404 #define MT7996_BEACON_RATES_TBL 25
2405
2406+#define MT7996_RRO_MAX_SESSION 1024
2407+#define MT7996_RRO_WINDOW_MAX_LEN 1024
2408+#define MT7996_RRO_ADDR_ELEM_LEN 128
2409+#define MT7996_RRO_BA_BITMAP_LEN 2
2410+#define MT7996_RRO_BA_BITMAP_CR_SIZE ((MT7996_RRO_MAX_SESSION * 128) / \
2411+ MT7996_RRO_BA_BITMAP_LEN)
2412+#define MT7996_RRO_BA_BITMAP_SESSION_SIZE (MT7996_RRO_MAX_SESSION / \
2413+ MT7996_RRO_ADDR_ELEM_LEN)
2414+#define MT7996_RRO_WINDOW_MAX_SIZE (MT7996_RRO_WINDOW_MAX_LEN * \
2415+ MT7996_RRO_BA_BITMAP_SESSION_SIZE)
2416+
2417+#define MT7996_RX_BUF_SIZE (1800 + \
2418+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
2419+#define MT7996_RX_MSDU_PAGE_SIZE (128 + \
2420+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
2421+
2422 struct mt7996_vif;
2423 struct mt7996_sta;
2424 struct mt7996_dfs_pulse;
2425@@ -78,6 +95,16 @@ enum mt7996_rxq_id {
2426 MT7996_RXQ_BAND0 = 4,
2427 MT7996_RXQ_BAND1 = 4,/* unused */
2428 MT7996_RXQ_BAND2 = 5,
2429+ MT7996_RXQ_RRO_BAND0 = 8,
2430+ MT7996_RXQ_RRO_BAND1 = 8,/* unused */
2431+ MT7996_RXQ_RRO_BAND2 = 6,
2432+ MT7996_RXQ_MSDU_PG_BAND0 = 10,
2433+ MT7996_RXQ_MSDU_PG_BAND1 = 11,
2434+ MT7996_RXQ_MSDU_PG_BAND2 = 12,
2435+ MT7996_RXQ_TXFREE0 = 9,
2436+ MT7996_RXQ_TXFREE1 = 9,
2437+ MT7996_RXQ_TXFREE2 = 7,
2438+ MT7996_RXQ_RRO_IND = 0,
2439 };
2440
2441 struct mt7996_twt_flow {
2442@@ -147,6 +174,15 @@ struct mt7996_hif {
2443 int irq;
2444 };
2445
2446+struct mt7996_wed_rro_addr {
2447+ u32 head_low;
2448+ u32 head_high : 4;
2449+ u32 count: 11;
2450+ u32 oor: 1;
2451+ u32 rsv : 8;
2452+ u32 signature : 8;
2453+};
2454+
2455 struct mt7996_phy {
2456 struct mt76_phy *mt76;
2457 struct mt7996_dev *dev;
2458@@ -226,6 +262,22 @@ struct mt7996_dev {
2459 bool tbtc_support:1;
2460 bool flash_mode:1;
2461 bool has_eht:1;
2462+ bool has_rro:1;
2463+
2464+ struct {
2465+ struct {
2466+ void *ptr;
2467+ dma_addr_t phy_addr;
2468+ } ba_bitmap[MT7996_RRO_BA_BITMAP_LEN];
2469+ struct {
2470+ void *ptr;
2471+ dma_addr_t phy_addr;
2472+ } addr_elem[MT7996_RRO_ADDR_ELEM_LEN];
2473+ struct {
2474+ void *ptr;
2475+ dma_addr_t phy_addr;
2476+ } session;
2477+ } wed_rro;
2478
2479 bool ibf;
2480 u8 fw_debug_wm;
2481@@ -335,7 +387,9 @@ int mt7996_dma_init(struct mt7996_dev *dev);
2482 void mt7996_dma_reset(struct mt7996_dev *dev, bool force);
2483 void mt7996_dma_prefetch(struct mt7996_dev *dev);
2484 void mt7996_dma_cleanup(struct mt7996_dev *dev);
2485-void mt7996_dma_start(struct mt7996_dev *dev, bool reset);
2486+void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset);
2487+int mt7996_init_tx_queues(struct mt7996_phy *phy, int idx,
2488+ int n_desc, int ring_base, struct mtk_wed_device *wed);
2489 void mt7996_init_txpower(struct mt7996_dev *dev,
2490 struct ieee80211_supported_band *sband);
2491 int mt7996_txbf_init(struct mt7996_dev *dev);
2492@@ -495,5 +549,16 @@ int mt7996_mcu_wtbl_update_hdr_trans(struct mt7996_dev *dev,
2493 void mt7996_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2494 struct ieee80211_sta *sta, struct dentry *dir);
2495 #endif
2496+int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
2497+ bool hif2, int *irq);
2498+u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id);
2499+
2500+#ifdef CONFIG_MTK_DEBUG
2501+int mt7996_mtk_init_debugfs(struct mt7996_phy *phy, struct dentry *dir);
2502+#endif
2503+
2504+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
2505+int mt7996_dma_rro_init(struct mt7996_dev *dev);
2506+#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
2507
2508 #endif
2509diff --git a/mt7996/pci.c b/mt7996/pci.c
2510index c530105..92869ca 100644
2511--- a/mt7996/pci.c
2512+++ b/mt7996/pci.c
2513@@ -125,15 +125,26 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
2514 mt7996_wfsys_reset(dev);
2515 hif2 = mt7996_pci_init_hif2(pdev);
2516
2517- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
2518+ ret = mt7996_mmio_wed_init(dev, pdev, false, &irq);
2519 if (ret < 0)
2520- goto free_device;
2521+ goto free_wed_or_irq_vector;
2522
2523- irq = pdev->irq;
2524- ret = devm_request_irq(mdev->dev, irq, mt7996_irq_handler,
2525+ if (!ret) {
2526+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
2527+ if (ret < 0)
2528+ goto free_device;
2529+ }
2530+ ret = devm_request_irq(mdev->dev, pdev->irq, mt7996_irq_handler,
2531 IRQF_SHARED, KBUILD_MODNAME, dev);
2532 if (ret)
2533- goto free_irq_vector;
2534+ goto free_wed_or_irq_vector;
2535+
2536+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
2537+ ret = devm_request_irq(mdev->dev, irq, mt7996_irq_handler,
2538+ IRQF_SHARED, KBUILD_MODNAME "-wed", dev);
2539+ if (ret)
2540+ goto free_irq;
2541+ }
2542
2543 mt76_wr(dev, MT_INT_MASK_CSR, 0);
2544 /* master switch of PCIe tnterrupt enable */
2545@@ -143,16 +154,30 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
2546 hif2_dev = container_of(hif2->dev, struct pci_dev, dev);
2547 dev->hif2 = hif2;
2548
2549- ret = pci_alloc_irq_vectors(hif2_dev, 1, 1, PCI_IRQ_ALL_TYPES);
2550+ ret = mt7996_mmio_wed_init(dev, hif2_dev, true, &irq);
2551 if (ret < 0)
2552- goto free_hif2;
2553+ goto free_irq;
2554+
2555+ if (!ret) {
2556+ ret = pci_alloc_irq_vectors(hif2_dev, 1, 1, PCI_IRQ_ALL_TYPES);
2557+ if (ret < 0)
2558+ goto free_hif2;
2559
2560- dev->hif2->irq = hif2_dev->irq;
2561- ret = devm_request_irq(mdev->dev, dev->hif2->irq,
2562- mt7996_irq_handler, IRQF_SHARED,
2563- KBUILD_MODNAME "-hif", dev);
2564+ dev->hif2->irq = hif2_dev->irq;
2565+ }
2566+
2567+ ret = devm_request_irq(mdev->dev, hif2_dev->irq, mt7996_irq_handler,
2568+ IRQF_SHARED, KBUILD_MODNAME "-hif", dev);
2569 if (ret)
2570- goto free_hif2_irq_vector;
2571+ goto free_hif2;
2572+
2573+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) {
2574+ ret = devm_request_irq(mdev->dev, irq,
2575+ mt7996_irq_handler, IRQF_SHARED,
2576+ KBUILD_MODNAME "-wed-hif", dev);
2577+ if (ret)
2578+ goto free_hif2_irq_vector;
2579+ }
2580
2581 mt76_wr(dev, MT_INT1_MASK_CSR, 0);
2582 /* master switch of PCIe tnterrupt enable */
2583@@ -168,15 +193,28 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
2584 free_hif2_irq:
2585 if (dev->hif2)
2586 devm_free_irq(mdev->dev, dev->hif2->irq, dev);
2587+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
2588+ devm_free_irq(mdev->dev, dev->mt76.mmio.wed_hif2.irq, dev);
2589 free_hif2_irq_vector:
2590- if (dev->hif2)
2591- pci_free_irq_vectors(hif2_dev);
2592+ if (dev->hif2) {
2593+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
2594+ mtk_wed_device_detach(&dev->mt76.mmio.wed_hif2);
2595+ else
2596+ pci_free_irq_vectors(hif2_dev);
2597+ }
2598 free_hif2:
2599 if (dev->hif2)
2600 put_device(dev->hif2->dev);
2601- devm_free_irq(mdev->dev, irq, dev);
2602-free_irq_vector:
2603- pci_free_irq_vectors(pdev);
2604+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
2605+ devm_free_irq(mdev->dev, dev->mt76.mmio.wed.irq, dev);
2606+free_irq:
2607+ devm_free_irq(mdev->dev, pdev->irq, dev);
2608+free_wed_or_irq_vector:
2609+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
2610+ mtk_wed_device_detach(&dev->mt76.mmio.wed);
2611+ else
2612+ pci_free_irq_vectors(pdev);
2613+
2614 free_device:
2615 mt76_free_device(&dev->mt76);
2616
2617diff --git a/mt7996/regs.h b/mt7996/regs.h
2618index 5702290..854390d 100644
2619--- a/mt7996/regs.h
2620+++ b/mt7996/regs.h
2621@@ -39,6 +39,38 @@ enum base_rev {
2622
2623 #define __BASE(_id, _band) (dev->reg.base[(_id)].band_base[(_band)])
2624
2625+/* RRO TOP */
2626+#define MT_RRO_TOP_BASE 0xA000
2627+#define MT_RRO_TOP(ofs) (MT_RRO_TOP_BASE + (ofs))
2628+
2629+#define MT_RRO_BA_BITMAP_BASE0 MT_RRO_TOP(0x8)
2630+#define MT_RRO_BA_BITMAP_BASE1 MT_RRO_TOP(0xC)
2631+#define WF_RRO_AXI_MST_CFG MT_RRO_TOP(0xB8)
2632+#define WF_RRO_AXI_MST_CFG_DIDX_OK BIT(12)
2633+#define MT_RRO_ADDR_ARRAY_BASE1 MT_RRO_TOP(0x34)
2634+#define MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE BIT(31)
2635+
2636+#define MT_RRO_IND_CMD_SIGNATURE_BASE0 MT_RRO_TOP(0x38)
2637+#define MT_RRO_IND_CMD_SIGNATURE_BASE1 MT_RRO_TOP(0x3C)
2638+#define MT_RRO_IND_CMD_0_CTRL0 MT_RRO_TOP(0x40)
2639+#define MT_RRO_IND_CMD_SIGNATURE_BASE1_EN BIT(31)
2640+
2641+#define MT_RRO_PARTICULAR_CFG0 MT_RRO_TOP(0x5C)
2642+#define MT_RRO_PARTICULAR_CFG1 MT_RRO_TOP(0x60)
2643+#define MT_RRO_PARTICULAR_CONFG_EN BIT(31)
2644+#define MT_RRO_PARTICULAR_SID GENMASK(30, 16)
2645+
2646+#define MT_RRO_BA_BITMAP_BASE_EXT0 MT_RRO_TOP(0x70)
2647+#define MT_RRO_BA_BITMAP_BASE_EXT1 MT_RRO_TOP(0x74)
2648+#define MT_RRO_HOST_INT_ENA MT_RRO_TOP(0x204)
2649+#define MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA BIT(0)
2650+
2651+#define MT_RRO_ADDR_ELEM_SEG_ADDR0 MT_RRO_TOP(0x400)
2652+
2653+#define MT_RRO_ACK_SN_CTRL MT_RRO_TOP(0x50)
2654+#define MT_RRO_ACK_SN_CTRL_SN_MASK GENMASK(27, 16)
2655+#define MT_RRO_ACK_SN_CTRL_SESSION_MASK GENMASK(11, 0)
2656+
2657 #define MT_MCU_INT_EVENT 0x2108
2658 #define MT_MCU_INT_EVENT_DMA_STOPPED BIT(0)
2659 #define MT_MCU_INT_EVENT_DMA_INIT BIT(1)
2660@@ -323,6 +355,7 @@ enum base_rev {
2661
2662 #define MT_WFDMA0_RX_INT_PCIE_SEL MT_WFDMA0(0x154)
2663 #define MT_WFDMA0_RX_INT_SEL_RING3 BIT(3)
2664+#define MT_WFDMA0_RX_INT_SEL_RING6 BIT(6)
2665
2666 #define MT_WFDMA0_MCU_HOST_INT_ENA MT_WFDMA0(0x1f4)
2667
2668@@ -367,6 +400,9 @@ enum base_rev {
2669 #define MT_WFDMA0_PCIE1_BASE 0xd8000
2670 #define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
2671
2672+#define MT_INT_PCIE1_SOURCE_CSR_EXT MT_WFDMA0_PCIE1(0x118)
2673+#define MT_INT_PCIE1_MASK_CSR MT_WFDMA0_PCIE1(0x11c)
2674+
2675 #define MT_WFDMA0_PCIE1_BUSY_ENA MT_WFDMA0_PCIE1(0x13c)
2676 #define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0)
2677 #define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
2678@@ -387,6 +423,7 @@ enum base_rev {
2679 #define MT_MCUQ_RING_BASE(q) (MT_Q_BASE(q) + 0x300)
2680 #define MT_TXQ_RING_BASE(q) (MT_Q_BASE(__TXQ(q)) + 0x300)
2681 #define MT_RXQ_RING_BASE(q) (MT_Q_BASE(__RXQ(q)) + 0x500)
2682+#define MT_RXQ_RRO_IND_RING_BASE MT_RRO_TOP(0x40)
2683
2684 #define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \
2685 MT_MCUQ_ID(q) * 0x4)
2686@@ -412,6 +449,15 @@ enum base_rev {
2687 #define MT_INT_RX_TXFREE_MAIN BIT(17)
2688 #define MT_INT_RX_TXFREE_TRI BIT(15)
2689 #define MT_INT_MCU_CMD BIT(29)
2690+#define MT_INT_RX_TXFREE_EXT BIT(26)
2691+
2692+#define MT_INT_RX_DONE_RRO_BAND0 BIT(16)
2693+#define MT_INT_RX_DONE_RRO_BAND1 BIT(16)
2694+#define MT_INT_RX_DONE_RRO_BAND2 BIT(14)
2695+#define MT_INT_RX_DONE_RRO_IND BIT(11)
2696+#define MT_INT_RX_DONE_MSDU_PG_BAND0 BIT(18)
2697+#define MT_INT_RX_DONE_MSDU_PG_BAND1 BIT(19)
2698+#define MT_INT_RX_DONE_MSDU_PG_BAND2 BIT(23)
2699
2700 #define MT_INT_RX(q) (dev->q_int_mask[__RXQ(q)])
2701 #define MT_INT_TX_MCU(q) (dev->q_int_mask[(q)])
2702@@ -420,20 +466,31 @@ enum base_rev {
2703 MT_INT_RX(MT_RXQ_MCU_WA))
2704
2705 #define MT_INT_BAND0_RX_DONE (MT_INT_RX(MT_RXQ_MAIN) | \
2706- MT_INT_RX(MT_RXQ_MAIN_WA))
2707+ MT_INT_RX(MT_RXQ_MAIN_WA) | \
2708+ MT_INT_RX(MT_RXQ_TXFREE_BAND0))
2709
2710 #define MT_INT_BAND1_RX_DONE (MT_INT_RX(MT_RXQ_BAND1) | \
2711 MT_INT_RX(MT_RXQ_BAND1_WA) | \
2712- MT_INT_RX(MT_RXQ_MAIN_WA))
2713+ MT_INT_RX(MT_RXQ_MAIN_WA) | \
2714+ MT_INT_RX(MT_RXQ_TXFREE_BAND0))
2715
2716 #define MT_INT_BAND2_RX_DONE (MT_INT_RX(MT_RXQ_BAND2) | \
2717 MT_INT_RX(MT_RXQ_BAND2_WA) | \
2718- MT_INT_RX(MT_RXQ_MAIN_WA))
2719+ MT_INT_RX(MT_RXQ_MAIN_WA) | \
2720+ MT_INT_RX(MT_RXQ_TXFREE_BAND0))
2721+
2722+#define MT_INT_RRO_RX_DONE (MT_INT_RX(MT_RXQ_RRO_BAND0) | \
2723+ MT_INT_RX(MT_RXQ_RRO_BAND1) | \
2724+ MT_INT_RX(MT_RXQ_RRO_BAND2) | \
2725+ MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND0) | \
2726+ MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND1) | \
2727+ MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND2))
2728
2729 #define MT_INT_RX_DONE_ALL (MT_INT_RX_DONE_MCU | \
2730 MT_INT_BAND0_RX_DONE | \
2731 MT_INT_BAND1_RX_DONE | \
2732- MT_INT_BAND2_RX_DONE)
2733+ MT_INT_BAND2_RX_DONE | \
2734+ MT_INT_RRO_RX_DONE)
2735
2736 #define MT_INT_TX_DONE_FWDL BIT(26)
2737 #define MT_INT_TX_DONE_MCU_WM BIT(27)
2738--
27392.18.0
2740