blob: 2fdaf5cac99a962723040720aa20ab1bd4fe90cb [file] [log] [blame]
developer5579e462023-06-28 11:14:11 +08001From 017ed7925cbdfb41d3d85fed54a97cff9fcf2f78 Mon Sep 17 00:00:00 2001
developer064da3c2023-06-13 15:57:26 +08002From: Bo Jiao <Bo.Jiao@mediatek.com>
3Date: Mon, 6 Feb 2023 13:50:56 +0800
developer5579e462023-06-28 11:14:11 +08004Subject: [PATCH] wifi: mt76: mt7996: wed: add wed3.0 rx support
developer064da3c2023-06-13 15:57:26 +08005
6add hardware rro support, This is the preliminary patch for WED3.0 support.
7
8Signed-off-by: Bo Jiao <Bo.Jiao@mediatek.com>
9Change-Id: I7e113b1392bcf085ec02c8a44ffbb7cf7c3fa027
10Signed-off-by: sujuan.chen <sujuan.chen@mediatek.com>
11---
12 dma.c | 197 +++++++++++++++++++++++++++++++++++++-----------
13 dma.h | 12 +++
14 mac80211.c | 1 +
15 mt76.h | 63 ++++++++++++++--
16 mt7996/dma.c | 163 +++++++++++++++++++++++++++++++++------
17 mt7996/init.c | 124 +++++++++++++++++++++++++++++-
18 mt7996/mac.c | 42 +++++++++--
19 mt7996/mcu.c | 8 +-
20 mt7996/mmio.c | 36 +++++++--
21 mt7996/mt7996.h | 58 ++++++++++++++
22 mt7996/regs.h | 63 +++++++++++++++-
23 11 files changed, 675 insertions(+), 92 deletions(-)
24
25diff --git a/dma.c b/dma.c
26index 930ec768..e5b4d898 100644
27--- a/dma.c
28+++ b/dma.c
29@@ -193,46 +193,65 @@ EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
30 static void
31 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
32 {
33+ int ndesc = q->ndesc;
34+
35+ if (q->flags & MT_QFLAG_MAGIC)
36+ ndesc |= MT_DMA_MAGIC_EN;
37+
38 Q_WRITE(dev, q, desc_base, q->desc_dma);
39- Q_WRITE(dev, q, ring_size, q->ndesc);
40+ Q_WRITE(dev, q, ring_size, ndesc);
41 q->head = Q_READ(dev, q, dma_idx);
42 q->tail = q->head;
43 }
44
45 static void
46-mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
47+mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, bool skip)
48 {
49 int i;
50
51 if (!q || !q->ndesc)
52 return;
53
54+ if (!q->desc)
55+ goto done;
56+
57 /* clear descriptors */
58 for (i = 0; i < q->ndesc; i++)
59 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
60
61+ if (skip)
62+ goto sync;
63+
64+done:
65 Q_WRITE(dev, q, cpu_idx, 0);
66 Q_WRITE(dev, q, dma_idx, 0);
67+sync:
68 mt76_dma_sync_idx(dev, q);
69 }
70
71 static int
72 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
73- struct mt76_queue_buf *buf, void *data)
74+ struct mt76_queue_buf *buf, void *data,
75+ struct mt76_rxwi_cache *rxwi)
76 {
77- struct mt76_desc *desc = &q->desc[q->head];
78+ struct mt76_desc *desc;
79 struct mt76_queue_entry *entry = &q->entry[q->head];
80- struct mt76_rxwi_cache *rxwi = NULL;
81 u32 buf1 = 0, ctrl;
82 int idx = q->head;
83 int rx_token;
84
85+ if (mt76_queue_is_rro_ind(q))
86+ goto done;
87+
88+ desc = &q->desc[q->head];
89 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
90
91 if (mt76_queue_is_wed_rx(q)) {
92- rxwi = mt76_get_rxwi(dev);
93- if (!rxwi)
94- return -ENOMEM;
95+ if (!rxwi) {
96+ rxwi = mt76_get_rxwi(dev);
97+ if (!rxwi)
98+ return -ENOMEM;
99+ }
100
101 rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr);
102 if (rx_token < 0) {
103@@ -249,6 +268,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
104 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
105 WRITE_ONCE(desc->info, 0);
106
107+done:
108 entry->dma_addr[0] = buf->addr;
109 entry->dma_len[0] = buf->len;
110 entry->rxwi = rxwi;
111@@ -396,14 +416,15 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
112
113 static void *
114 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
115- int *len, u32 *info, bool *more, bool *drop)
116+ int *len, u32 *info, bool *more, bool *drop, bool flush)
117 {
118 struct mt76_queue_entry *e = &q->entry[idx];
119 struct mt76_desc *desc = &q->desc[idx];
120 void *buf;
121+ u32 ctrl;
122
123+ ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
124 if (len) {
125- u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
126 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
127 *more = !(ctrl & MT_DMA_CTL_LAST_SEC0);
128 }
129@@ -411,6 +432,12 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
130 if (info)
131 *info = le32_to_cpu(desc->info);
132
133+ if (drop) {
134+ *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | MT_DMA_CTL_DROP));
135+ if (ctrl & MT_DMA_CTL_VER_MASK)
136+ *drop = !!(ctrl & MT_DMA_CTL_PN_CHK_FAIL);
137+ }
138+
139 if (mt76_queue_is_wed_rx(q)) {
140 u32 buf1 = le32_to_cpu(desc->buf1);
141 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
142@@ -423,20 +450,46 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
143 SKB_WITH_OVERHEAD(q->buf_size),
144 DMA_FROM_DEVICE);
145
146- buf = r->ptr;
147- r->dma_addr = 0;
148- r->ptr = NULL;
149-
150- mt76_put_rxwi(dev, r);
151-
152- if (drop) {
153- u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
154-
155- *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A |
156- MT_DMA_CTL_DROP));
157+ if (flush) {
158+ buf = r->ptr;
159+ r->dma_addr = 0;
160+ r->ptr = NULL;
161+
162+ mt76_put_rxwi(dev, r);
163+ } else {
164+ struct mt76_queue_buf qbuf;
165+
166+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
167+ if (!buf)
168+ return NULL;
169+
170+ memcpy(buf, r->ptr, SKB_WITH_OVERHEAD(q->buf_size));
171+
172+ r->dma_addr = dma_map_single(dev->dma_dev, r->ptr,
173+ SKB_WITH_OVERHEAD(q->buf_size),
174+ DMA_FROM_DEVICE);
175+ if (unlikely(dma_mapping_error(dev->dma_dev, r->dma_addr))) {
176+ skb_free_frag(r->ptr);
177+ mt76_put_rxwi(dev, r);
178+ return NULL;
179+ }
180+
181+ qbuf.addr = r->dma_addr;
182+ qbuf.len = SKB_WITH_OVERHEAD(q->buf_size);
183+ qbuf.skip_unmap = false;
184+
185+ if (mt76_dma_add_rx_buf(dev, q, &qbuf, r->ptr, r) < 0) {
186+ dma_unmap_single(dev->dma_dev, r->dma_addr,
187+ SKB_WITH_OVERHEAD(q->buf_size),
188+ DMA_FROM_DEVICE);
189+ skb_free_frag(r->ptr);
190+ mt76_put_rxwi(dev, r);
191+ return NULL;
192+ }
193+ }
194
195+ if (drop)
196 *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
197- }
198 } else {
199 buf = e->buf;
200 e->buf = NULL;
201@@ -458,15 +511,20 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
202 if (!q->queued)
203 return NULL;
204
205- if (flush)
206- q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
207- else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
208- return NULL;
209+ if (q->flags & MT_QFLAG_RRO) {
210+ goto done;
211+ } else {
212+ if (flush)
213+ q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
214+ else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
215+ return NULL;
216+ }
217
218+done:
219 q->tail = (q->tail + 1) % q->ndesc;
220 q->queued--;
221
222- return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
223+ return mt76_dma_get_buf(dev, q, idx, len, info, more, drop, flush);
224 }
225
226 static int
227@@ -615,7 +673,10 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
228
229 while (q->queued < q->ndesc - 1) {
230 struct mt76_queue_buf qbuf;
231- void *buf;
232+ void *buf = NULL;
233+
234+ if (mt76_queue_is_rro_ind(q))
235+ goto done;
236
237 buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
238 if (!buf)
239@@ -627,10 +688,11 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
240 break;
241 }
242
243+done:
244 qbuf.addr = addr + offset;
245 qbuf.len = len - offset;
246 qbuf.skip_unmap = false;
247- if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
248+ if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf, NULL) < 0) {
249 dma_unmap_single(dev->dma_dev, addr, len,
250 DMA_FROM_DEVICE);
251 skb_free_frag(buf);
252@@ -639,7 +701,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
253 frames++;
254 }
255
256- if (frames)
257+ if (frames || mt76_queue_is_wed_rx(q))
258 mt76_dma_kick_queue(dev, q);
259
260 spin_unlock_bh(&q->lock);
261@@ -652,7 +714,7 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
262 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
263 struct mtk_wed_device *wed = &dev->mmio.wed;
264 int ret, type, ring;
265- u8 flags;
266+ u16 flags;
267
268 if (!q || !q->ndesc)
269 return -EINVAL;
270@@ -679,7 +741,7 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
271 case MT76_WED_Q_TXFREE:
272 /* WED txfree queue needs ring to be initialized before setup */
273 q->flags = 0;
274- mt76_dma_queue_reset(dev, q);
275+ mt76_dma_queue_reset(dev, q, false);
276 mt76_dma_rx_fill(dev, q);
277 q->flags = flags;
278
279@@ -688,9 +750,31 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
280 q->wed_regs = wed->txfree_ring.reg_base;
281 break;
282 case MT76_WED_Q_RX:
283- ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset);
284- if (!ret)
285- q->wed_regs = wed->rx_ring[ring].reg_base;
286+ if (q->flags & MT_QFLAG_RRO) {
287+ q->flags &= ~0x1f;
288+
289+ ring = FIELD_GET(MT_QFLAG_RRO_RING, q->flags);
290+ type = FIELD_GET(MT_QFLAG_RRO_TYPE, q->flags);
291+ if (type == MT76_RRO_Q_DATA) {
292+ mt76_dma_queue_reset(dev, q, true);
293+ ret = mtk_wed_device_rro_rx_ring_setup(wed, ring, q->regs);
294+ } else if (type == MT76_RRO_Q_MSDU_PG) {
295+ mt76_dma_queue_reset(dev, q, true);
296+ ret = mtk_wed_device_msdu_pg_rx_ring_setup(wed, ring, q->regs);
297+ } else if (type == MT76_RRO_Q_IND) {
298+ mt76_dma_queue_reset(dev, q, false);
299+ mt76_dma_rx_fill(dev, q);
300+ ret = mtk_wed_device_ind_rx_ring_setup(wed, q->regs);
301+ }
302+ if (type != MT76_RRO_Q_IND) {
303+ q->head = q->ndesc - 1;
304+ q->queued = q->ndesc - 1;
305+ }
306+ } else {
307+ ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, 0);
308+ if (!ret)
309+ q->wed_regs = wed->rx_ring[ring].reg_base;
310+ }
311 break;
312 default:
313 ret = -EINVAL;
314@@ -719,10 +803,25 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
315 q->hw_idx = idx;
316
317 size = q->ndesc * sizeof(struct mt76_desc);
318+ if (mt76_queue_is_rro_ind(q))
319+ size = q->ndesc * sizeof(struct mt76_rro_desc);
320+
321 q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
322 if (!q->desc)
323 return -ENOMEM;
324
325+ if (mt76_queue_is_rro_ind(q)) {
326+ struct mt76_rro_ind *cmd;
327+ int i;
328+
329+ q->rro_desc = (struct mt76_rro_desc *)(q->desc);
330+ q->desc = NULL;
331+ for (i = 0; i < q->ndesc; i++) {
332+ cmd = (struct mt76_rro_ind *) &q->rro_desc[i];
333+ cmd->magic_cnt = MT_DMA_IND_CMD_MAGIC_CNT - 1;
334+ }
335+ }
336+
337 size = q->ndesc * sizeof(*q->entry);
338 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
339 if (!q->entry)
340@@ -732,8 +831,11 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
341 if (ret)
342 return ret;
343
344- if (!mt76_queue_is_txfree(q))
345- mt76_dma_queue_reset(dev, q);
346+ if (!mtk_wed_device_active(&dev->mmio.wed) ||
347+ (!mt76_queue_is_wed_txfree(q) &&
348+ !(mtk_wed_get_rx_capa(&dev->mmio.wed) &&
349+ q->flags & MT_QFLAG_RRO)))
350+ mt76_dma_queue_reset(dev, q, false);
351
352 return 0;
353 }
354@@ -751,13 +853,13 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
355 spin_lock_bh(&q->lock);
356
357 do {
358+ if (q->flags & MT_QFLAG_RRO)
359+ break;
360+
361 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
362 if (!buf)
363 break;
364
365- if (q->flags & MT_QFLAG_RRO)
366- continue;
367-
368 skb_free_frag(buf);
369 } while (1);
370
371@@ -768,8 +870,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
372
373 spin_unlock_bh(&q->lock);
374
375- if (((q->flags & MT_QFLAG_WED) &&
376- FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) ||
377+ if (mt76_queue_is_wed_rx(q) ||
378 (q->flags & MT_QFLAG_RRO))
379 return;
380
381@@ -790,9 +891,13 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
382 if (!q->ndesc)
383 return;
384
385+ if (!q->desc)
386+ goto done;
387+
388 for (i = 0; i < q->ndesc; i++)
389 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
390
391+done:
392 mt76_dma_rx_cleanup(dev, q);
393
394 /* reset WED rx queues */
395@@ -839,8 +944,8 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
396 bool check_ddone = false;
397 bool more;
398
399- if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
400- q->flags == MT_WED_Q_TXFREE) {
401+ if ((IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
402+ q->flags == MT_WED_Q_TXFREE)) {
403 dma_idx = Q_READ(dev, q, dma_idx);
404 check_ddone = true;
405 }
406@@ -1002,7 +1107,8 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
407 mt76_for_each_q_rx(dev, i) {
408 struct mt76_queue *q = &dev->q_rx[i];
409
410- if (mt76_queue_is_wed_rx(q))
411+ if (mtk_wed_device_active(&dev->mmio.wed) &&
412+ (q->flags & MT_QFLAG_RRO))
413 continue;
414
415 netif_napi_del(&dev->napi[i]);
416@@ -1014,6 +1120,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
417
418 if (mtk_wed_device_active(&dev->mmio.wed_ext))
419 mtk_wed_device_detach(&dev->mmio.wed_ext);
420+
421 mt76_free_pending_txwi(dev);
422 mt76_free_pending_rxwi(dev);
423 }
424diff --git a/dma.h b/dma.h
425index 1b090d78..48037092 100644
426--- a/dma.h
427+++ b/dma.h
428@@ -25,6 +25,13 @@
429 #define MT_DMA_PPE_ENTRY GENMASK(30, 16)
430 #define MT_DMA_INFO_PPE_VLD BIT(31)
431
432+#define MT_DMA_CTL_PN_CHK_FAIL BIT(13)
433+#define MT_DMA_CTL_VER_MASK BIT(7)
434+
435+#define MT_DMA_MAGIC_EN BIT(13)
436+
437+#define MT_DMA_IND_CMD_MAGIC_CNT 8
438+
439 #define MT_DMA_HDR_LEN 4
440 #define MT_RX_INFO_LEN 4
441 #define MT_FCE_INFO_LEN 4
442@@ -37,6 +44,11 @@ struct mt76_desc {
443 __le32 info;
444 } __packed __aligned(4);
445
446+struct mt76_rro_desc {
447+ __le32 buf0;
448+ __le32 buf1;
449+} __packed __aligned(4);
450+
451 enum mt76_qsel {
452 MT_QSEL_MGMT,
453 MT_QSEL_HCCA,
454diff --git a/mac80211.c b/mac80211.c
455index f7578308..3a5755f9 100644
456--- a/mac80211.c
457+++ b/mac80211.c
458@@ -727,6 +727,7 @@ static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
459 return;
460 }
461 }
462+
463 __skb_queue_tail(&dev->rx_skb[q], skb);
464 }
465
466diff --git a/mt76.h b/mt76.h
467index ee0dbdd7..e4351338 100644
468--- a/mt76.h
469+++ b/mt76.h
470@@ -48,6 +48,18 @@
471
472 #define MT76_TOKEN_FREE_THR 64
473
474+#define MT_QFLAG_RRO_RING GENMASK(6, 5)
475+#define MT_QFLAG_RRO_TYPE GENMASK(8, 7)
476+#define MT_QFLAG_RRO BIT(9)
477+#define MT_QFLAG_MAGIC BIT(10)
478+
479+#define __MT_RRO_Q(_type, _n) (MT_QFLAG_RRO | \
480+ FIELD_PREP(MT_QFLAG_RRO_TYPE, _type) | \
481+ FIELD_PREP(MT_QFLAG_RRO_RING, _n))
482+#define MT_RRO_Q_DATA(_n) __MT_RRO_Q(MT76_RRO_Q_DATA, _n)
483+#define MT_RRO_Q_MSDU_PG(_n) __MT_RRO_Q(MT76_RRO_Q_MSDU_PG, _n)
484+#define MT_RRO_Q_IND __MT_RRO_Q(MT76_RRO_Q_IND, 0)
485+
486 #define MT_QFLAG_WED_RING GENMASK(1, 0)
487 #define MT_QFLAG_WED_TYPE GENMASK(3, 2)
488 #define MT_QFLAG_WED BIT(4)
489@@ -82,6 +94,12 @@ enum mt76_wed_type {
490 MT76_WED_Q_RX,
491 };
492
493+enum mt76_RRO_type {
494+ MT76_RRO_Q_DATA,
495+ MT76_RRO_Q_MSDU_PG,
496+ MT76_RRO_Q_IND,
497+};
498+
499 struct mt76_bus_ops {
500 u32 (*rr)(struct mt76_dev *dev, u32 offset);
501 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
502@@ -128,6 +146,16 @@ enum mt76_rxq_id {
503 MT_RXQ_MAIN_WA,
504 MT_RXQ_BAND2,
505 MT_RXQ_BAND2_WA,
506+ MT_RXQ_RRO_BAND0,
507+ MT_RXQ_RRO_BAND1,
508+ MT_RXQ_RRO_BAND2,
509+ MT_RXQ_MSDU_PAGE_BAND0,
510+ MT_RXQ_MSDU_PAGE_BAND1,
511+ MT_RXQ_MSDU_PAGE_BAND2,
512+ MT_RXQ_TXFREE_BAND0,
513+ MT_RXQ_TXFREE_BAND1,
514+ MT_RXQ_TXFREE_BAND2,
515+ MT_RXQ_RRO_IND,
516 __MT_RXQ_MAX
517 };
518
519@@ -206,6 +234,7 @@ struct mt76_queue {
520 spinlock_t lock;
521 spinlock_t cleanup_lock;
522 struct mt76_queue_entry *entry;
523+ struct mt76_rro_desc *rro_desc;
524 struct mt76_desc *desc;
525
526 u16 first;
527@@ -219,8 +248,8 @@ struct mt76_queue {
528
529 u8 buf_offset;
530 u8 hw_idx;
531- u8 flags;
532-
533+ u8 magic_cnt;
534+ u32 flags;
535 u32 wed_regs;
536
537 dma_addr_t desc_dma;
538@@ -274,7 +303,7 @@ struct mt76_queue_ops {
539
540 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
541
542- void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q);
543+ void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q, bool skip);
544 };
545
546 enum mt76_phy_type {
547@@ -369,6 +398,17 @@ struct mt76_txq {
548 bool aggr;
549 };
550
551+struct mt76_rro_ind {
552+ u32 se_id : 12;
553+ u32 rsv : 4;
554+ u32 start_sn : 12;
555+ u32 ind_reason : 4;
556+ u32 ind_cnt : 13;
557+ u32 win_sz : 3;
558+ u32 rsv2 : 13;
559+ u32 magic_cnt : 3;
560+};
561+
562 struct mt76_txwi_cache {
563 struct list_head list;
564 dma_addr_t dma_addr;
565@@ -1516,12 +1556,19 @@ static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q)
566 return (q->flags & MT_QFLAG_WED) &&
567 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX;
568 }
569-static inline bool mt76_queue_is_txfree(struct mt76_queue *q)
570+
571+static inline bool mt76_queue_is_wed_txfree(struct mt76_queue *q)
572 {
573 return (q->flags & MT_QFLAG_WED) &&
574 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TXFREE;
575 }
576
577+static inline bool mt76_queue_is_rro_ind(struct mt76_queue *q)
578+{
579+ return (q->flags & MT_QFLAG_RRO) &&
580+ FIELD_GET(MT_QFLAG_RRO_TYPE, q->flags) == MT76_RRO_Q_IND;
581+}
582+
583 struct mt76_txwi_cache *
584 mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
585 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
586@@ -1540,10 +1587,14 @@ static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
587 static inline int
588 mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
589 {
590- int token;
591+ int token, start = 0;
592+
593+ if (mtk_wed_device_active(&dev->mmio.wed))
594+ start = dev->mmio.wed.wlan.nbuf;
595
596 spin_lock_bh(&dev->token_lock);
597- token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC);
598+ token = idr_alloc(&dev->token, *ptxwi, start, start + dev->token_size,
599+ GFP_ATOMIC);
600 spin_unlock_bh(&dev->token_lock);
601
602 return token;
603diff --git a/mt7996/dma.c b/mt7996/dma.c
developer5579e462023-06-28 11:14:11 +0800604index 428f3d08..45ccc7b5 100644
developer064da3c2023-06-13 15:57:26 +0800605--- a/mt7996/dma.c
606+++ b/mt7996/dma.c
607@@ -64,6 +64,29 @@ static void mt7996_dma_config(struct mt7996_dev *dev)
608 RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2);
609 RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI);
610
611+ if (dev->rro_support) {
612+ /* band0 */
613+ RXQ_CONFIG(MT_RXQ_RRO_BAND0, WFDMA0, MT_INT_RX_DONE_RRO_BAND0,
614+ MT7996_RXQ_RRO_BAND0);
615+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND0,
616+ MT7996_RXQ_MSDU_PG_BAND0);
617+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0, MT_INT_RX_TXFREE_MAIN,
618+ MT7996_RXQ_TXFREE0);
619+ /* band1 */
620+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND1,
621+ MT7996_RXQ_MSDU_PG_BAND1);
622+ /* band2 */
623+ RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0, MT_INT_RX_DONE_RRO_BAND2,
624+ MT7996_RXQ_RRO_BAND2);
625+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND2,
626+ MT7996_RXQ_MSDU_PG_BAND2);
627+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0, MT_INT_RX_TXFREE_TRI,
628+ MT7996_RXQ_TXFREE2);
629+
630+ RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0, MT_INT_RX_DONE_RRO_IND,
631+ MT7996_RXQ_RRO_IND);
632+ }
633+
634 /* data tx queue */
635 TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0);
636 TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
developer5579e462023-06-28 11:14:11 +0800637@@ -102,6 +125,22 @@ static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
638 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND2_WA) + ofs, PREFETCH(0x2));
639 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x10));
640 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND2) + ofs, PREFETCH(0x10));
developer064da3c2023-06-13 15:57:26 +0800641+ if (dev->rro_support) {
642+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND0) + ofs,
developer5579e462023-06-28 11:14:11 +0800643+ PREFETCH(0x10));
developer064da3c2023-06-13 15:57:26 +0800644+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND2) + ofs,
developer5579e462023-06-28 11:14:11 +0800645+ PREFETCH(0x10));
developer064da3c2023-06-13 15:57:26 +0800646+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs,
developer5579e462023-06-28 11:14:11 +0800647+ PREFETCH(0x4));
developer064da3c2023-06-13 15:57:26 +0800648+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs,
developer5579e462023-06-28 11:14:11 +0800649+ PREFETCH(0x4));
developer064da3c2023-06-13 15:57:26 +0800650+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs,
developer5579e462023-06-28 11:14:11 +0800651+ PREFETCH(0x4));
developer064da3c2023-06-13 15:57:26 +0800652+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND0) + ofs,
developer5579e462023-06-28 11:14:11 +0800653+ PREFETCH(0x4));
developer064da3c2023-06-13 15:57:26 +0800654+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND2) + ofs,
developer5579e462023-06-28 11:14:11 +0800655+ PREFETCH(0x4));
developer064da3c2023-06-13 15:57:26 +0800656+ }
developer5579e462023-06-28 11:14:11 +0800657 #undef PREFETCH
developer064da3c2023-06-13 15:57:26 +0800658
659 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + ofs, WF_WFDMA0_GLO_CFG_EXT1_CALC_MODE);
developer5579e462023-06-28 11:14:11 +0800660@@ -161,6 +200,7 @@ static void mt7996_dma_disable(struct mt7996_dev *dev, bool reset)
developer064da3c2023-06-13 15:57:26 +0800661
662 void __mt7996_dma_enable(struct mt7996_dev *dev, bool reset, bool wed_reset)
663 {
664+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
665 u32 hif1_ofs = 0;
666 u32 irq_mask;
667
developer5579e462023-06-28 11:14:11 +0800668@@ -169,11 +209,16 @@ void __mt7996_dma_enable(struct mt7996_dev *dev, bool reset, bool wed_reset)
developer064da3c2023-06-13 15:57:26 +0800669
670 /* enable wpdma tx/rx */
671 if (!reset) {
672- mt76_set(dev, MT_WFDMA0_GLO_CFG,
673- MT_WFDMA0_GLO_CFG_TX_DMA_EN |
674- MT_WFDMA0_GLO_CFG_RX_DMA_EN |
675- MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
676- MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
677+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
678+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
679+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
680+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO);
681+ else
682+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
683+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
684+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
685+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
686+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
687
688 if (dev->hif2)
689 mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
developer5579e462023-06-28 11:14:11 +0800690@@ -185,8 +230,8 @@ void __mt7996_dma_enable(struct mt7996_dev *dev, bool reset, bool wed_reset)
developer064da3c2023-06-13 15:57:26 +0800691
692 /* enable interrupts for TX/RX rings */
693 irq_mask = MT_INT_MCU_CMD |
694- MT_INT_RX_DONE_MCU |
695- MT_INT_TX_DONE_MCU;
696+ MT_INT_RX_DONE_MCU |
697+ MT_INT_TX_DONE_MCU;
698
699 if (mt7996_band_valid(dev, MT_BAND0))
700 irq_mask |= MT_INT_BAND0_RX_DONE;
developer5579e462023-06-28 11:14:11 +0800701@@ -197,14 +242,14 @@ void __mt7996_dma_enable(struct mt7996_dev *dev, bool reset, bool wed_reset)
developer064da3c2023-06-13 15:57:26 +0800702 if (mt7996_band_valid(dev, MT_BAND2))
703 irq_mask |= MT_INT_BAND2_RX_DONE;
704
705- if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wed_reset) {
706+ if (mtk_wed_device_active(wed) && wed_reset) {
707 u32 wed_irq_mask = irq_mask;
708
709 wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
710
711 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
712
713- mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask);
714+ mtk_wed_device_start(wed, wed_irq_mask);
715 }
716
717 irq_mask = reset ? MT_INT_MCU_CMD : irq_mask;
developer5579e462023-06-28 11:14:11 +0800718@@ -298,7 +343,8 @@ static int mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
developer064da3c2023-06-13 15:57:26 +0800719 /* fix hardware limitation, pcie1's rx ring3 is not available
720 * so, redirect pcie0 rx ring3 interrupt to pcie1
721 */
722- if (mtk_wed_device_active(&dev->mt76.mmio.wed) && dev->rro_support)
723+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
724+ dev->rro_support)
725 mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL + hif1_ofs,
726 MT_WFDMA0_RX_INT_SEL_RING6);
727 else
developer5579e462023-06-28 11:14:11 +0800728@@ -311,6 +357,78 @@ static int mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
developer064da3c2023-06-13 15:57:26 +0800729 return 0;
730 }
731
732+int mt7996_dma_rro_init(struct mt7996_dev *dev)
733+{
734+ int ret;
735+ u32 hif1_ofs = 0;
736+ u32 wed_irq_mask;
737+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
738+
739+ if (dev->hif2)
740+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
741+
742+ /* ind cmd */
743+ dev->mt76.q_rx[MT_RXQ_RRO_IND].flags = MT_RRO_Q_IND | MT_WED_Q_RX(0);
744+ dev->mt76.q_rx[MT_RXQ_RRO_IND].flags |= MT_WED_Q_RX(0);
745+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_IND],
746+ MT_RXQ_ID(MT_RXQ_RRO_IND),
747+ MT7996_RX_RING_SIZE,
748+ 0, MT_RXQ_RRO_IND_RING_BASE);
749+ if (ret)
750+ return ret;
751+
752+ /* rx msdu page queue for band0 */
753+ dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags = MT_RRO_Q_MSDU_PG(0);
754+ dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags |= MT_QFLAG_MAGIC;
755+ dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags |= MT_WED_Q_RX(0);
756+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND0],
757+ MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0),
758+ MT7996_RX_RING_SIZE,
759+ MT7996_RX_MSDU_PAGE_SIZE,
760+ MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND0));
761+ if (ret)
762+ return ret;
763+
764+ if (mt7996_band_valid(dev, MT_BAND1)) {
765+ /* rx msdu page queue for band1 */
766+ dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags = MT_RRO_Q_MSDU_PG(1);
767+ dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags |= MT_QFLAG_MAGIC;
768+ dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags |= MT_WED_Q_RX(1);
769+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND1],
770+ MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1),
771+ MT7996_RX_RING_SIZE,
772+ MT7996_RX_MSDU_PAGE_SIZE,
773+ MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND1));
774+ if (ret)
775+ return ret;
776+ }
777+
778+ if (mt7996_band_valid(dev, MT_BAND2)) {
779+ /* rx msdu page queue for band2 */
780+ dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags = MT_RRO_Q_MSDU_PG(2);
781+ dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags |= MT_QFLAG_MAGIC;
782+ dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags |= MT_WED_Q_RX(0);
783+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND2],
784+ MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2),
785+ MT7996_RX_RING_SIZE,
786+ MT7996_RX_MSDU_PAGE_SIZE,
787+ MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND2));
788+ if (ret)
789+ return ret;
790+ }
791+
792+ wed_irq_mask = dev->mt76.mmio.irqmask |
793+ MT_INT_RRO_RX_DONE |
794+ MT_INT_TX_DONE_BAND2;
795+
796+ mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
797+
798+ mtk_wed_device_start_hwrro(wed, wed_irq_mask, false);
799+ mt7996_irq_enable(dev, wed_irq_mask);
800+
801+ return 0;
802+}
803+
804 int mt7996_dma_init(struct mt7996_dev *dev)
805 {
806 struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
developer5579e462023-06-28 11:14:11 +0800807@@ -380,6 +498,9 @@ int mt7996_dma_init(struct mt7996_dev *dev)
developer064da3c2023-06-13 15:57:26 +0800808 return ret;
809
810 /* rx data queue for band0 and band1 */
811+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
812+ dev->mt76.q_rx[MT_RXQ_MAIN].flags = MT_WED_Q_RX(0);
813+
814 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
815 MT_RXQ_ID(MT_RXQ_MAIN),
816 MT7996_RX_RING_SIZE,
developer5579e462023-06-28 11:14:11 +0800817@@ -403,9 +524,6 @@ int mt7996_dma_init(struct mt7996_dev *dev)
developer064da3c2023-06-13 15:57:26 +0800818 if (mt7996_band_valid(dev, MT_BAND2)) {
819 /* rx data queue for band2 */
820 rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs;
821- if (mtk_wed_device_active(wed))
822- rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2);
823-
824 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2],
825 MT_RXQ_ID(MT_RXQ_BAND2),
826 MT7996_RX_RING_SIZE,
developer5579e462023-06-28 11:14:11 +0800827@@ -429,11 +547,12 @@ int mt7996_dma_init(struct mt7996_dev *dev)
developer064da3c2023-06-13 15:57:26 +0800828 return ret;
829 }
830
831-
832- if (dev->rro_support) {
833+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) &&
834+ dev->rro_support) {
835 /* rx rro data queue for band0 */
836 dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags = MT_RRO_Q_DATA(0);
837 dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags |= MT_QFLAG_MAGIC;
838+ dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags |= MT_WED_Q_RX(0);
839 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0],
840 MT_RXQ_ID(MT_RXQ_RRO_BAND0),
841 MT7996_RX_RING_SIZE,
developer5579e462023-06-28 11:14:11 +0800842@@ -443,8 +562,7 @@ int mt7996_dma_init(struct mt7996_dev *dev)
developer064da3c2023-06-13 15:57:26 +0800843 return ret;
844
845 /* tx free notify event from WA for band0 */
846- if (mtk_wed_device_active(wed))
847- dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
848+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
849 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
850 MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
851 MT7996_RX_MCU_RING_SIZE,
developer5579e462023-06-28 11:14:11 +0800852@@ -457,6 +575,7 @@ int mt7996_dma_init(struct mt7996_dev *dev)
developer064da3c2023-06-13 15:57:26 +0800853 /* rx rro data queue for band2 */
854 dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags = MT_RRO_Q_DATA(1);
855 dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags |= MT_QFLAG_MAGIC;
856+ dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags |= MT_WED_Q_RX(1);
857 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2],
858 MT_RXQ_ID(MT_RXQ_RRO_BAND2),
859 MT7996_RX_RING_SIZE,
developer5579e462023-06-28 11:14:11 +0800860@@ -534,18 +653,18 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
developer064da3c2023-06-13 15:57:26 +0800861
862 /* reset hw queues */
863 for (i = 0; i < __MT_TXQ_MAX; i++) {
864- mt76_queue_reset(dev, dev->mphy.q_tx[i]);
865+ mt76_queue_reset(dev, dev->mphy.q_tx[i], false);
866 if (phy2)
867- mt76_queue_reset(dev, phy2->q_tx[i]);
868+ mt76_queue_reset(dev, phy2->q_tx[i], false);
869 if (phy3)
870- mt76_queue_reset(dev, phy3->q_tx[i]);
871+ mt76_queue_reset(dev, phy3->q_tx[i], false);
872 }
873
874 for (i = 0; i < __MT_MCUQ_MAX; i++)
875- mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
876+ mt76_queue_reset(dev, dev->mt76.q_mcu[i], false);
877
878 mt76_for_each_q_rx(&dev->mt76, i) {
879- mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
880+ mt76_queue_reset(dev, &dev->mt76.q_rx[i], false);
881 }
882
883 mt76_tx_status_check(&dev->mt76, true);
884diff --git a/mt7996/init.c b/mt7996/init.c
885index 6cfbc50d..d70dcf9f 100644
886--- a/mt7996/init.c
887+++ b/mt7996/init.c
888@@ -496,8 +496,13 @@ void mt7996_mac_init(struct mt7996_dev *dev)
889
890 /* rro module init */
891 mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE, 2);
892- mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 3);
893- mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 1);
894+ if (dev->rro_support) {
895+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 1);
896+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 0);
897+ } else {
898+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 3);
899+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 1);
900+ }
901
902 mt7996_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET),
903 MCU_WA_PARAM_HW_PATH_HIF_VER,
904@@ -650,6 +655,114 @@ void mt7996_wfsys_reset(struct mt7996_dev *dev)
905 msleep(20);
906 }
907
908+static int mt7996_rro_init(struct mt7996_dev *dev)
909+{
910+ struct mt7996_rro_addr *ptr;
911+ struct mt7996_rro_cfg *rro = &dev->rro;
912+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
913+ u32 size, val = 0, reg = MT_RRO_ADDR_ELEM_SEG_ADDR0;
914+ int i, j;
915+ void *buf;
916+
917+ for (i = 0; i < MT7996_RRO_BA_BITMAP_CR_CNT; i++) {
918+ buf = dmam_alloc_coherent(dev->mt76.dma_dev,
919+ MT7996_BA_BITMAP_SZ_PER_CR,
920+ &rro->ba_bitmap_cache_pa[i],
921+ GFP_KERNEL);
922+ if (!buf)
923+ return -ENOMEM;
924+
925+ rro->ba_bitmap_cache_va[i] = buf;
926+ }
927+
928+ rro->win_sz = MT7996_RRO_WIN_SIZE_MAX;
929+ for (i = 0; i < MT7996_RRO_ADDR_ELEM_CR_CNT; i++) {
930+ size = MT7996_RRO_SESSION_PER_CR *
931+ rro->win_sz * sizeof(struct mt7996_rro_addr);
932+
933+ buf = dmam_alloc_coherent(dev->mt76.dma_dev, size,
934+ &rro->addr_elem_alloc_pa[i],
935+ GFP_KERNEL);
936+ if (!buf)
937+ return -ENOMEM;
938+ rro->addr_elem_alloc_va[i] = buf;
939+
940+ memset(rro->addr_elem_alloc_va[i], 0, size);
941+
942+ ptr = rro->addr_elem_alloc_va[i];
943+ for (j = 0; j < MT7996_RRO_SESSION_PER_CR * rro->win_sz; j++, ptr++)
944+ ptr->signature = 0xff;
945+
946+ wed->wlan.ind_cmd.addr_elem_phys[i] = rro->addr_elem_alloc_pa[i];
947+ }
948+
949+ rro->particular_se_id = MT7996_RRO_SESSION_MAX;
950+ size = rro->win_sz * sizeof(struct mt7996_rro_addr);
951+ buf = dmam_alloc_coherent(dev->mt76.dma_dev, size,
952+ &rro->particular_session_pa,
953+ GFP_KERNEL);
954+ if (!buf)
955+ return -ENOMEM;
956+
957+ rro->particular_session_va = buf;
958+ ptr = rro->particular_session_va;
959+ for (j = 0; j < rro->win_sz; j++, ptr++)
960+ ptr->signature = 0xff;
961+
962+ INIT_LIST_HEAD(&rro->pg_addr_cache);
963+ for (i = 0; i < MT7996_RRO_MSDU_PG_HASH_SIZE; i++)
964+ INIT_LIST_HEAD(&rro->pg_hash_head[i]);
965+
966+ /* rro hw init */
967+ /* TODO: remove line after WM has set */
968+ mt76_clear(dev, WF_RRO_AXI_MST_CFG, WF_RRO_AXI_MST_CFG_DIDX_OK);
969+
970+ /* setup BA bitmap cache address */
971+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE0,
972+ rro->ba_bitmap_cache_pa[0]);
973+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE1, 0);
974+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT0,
975+ rro->ba_bitmap_cache_pa[1]);
976+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT1, 0);
977+
978+ /* setup Address element address */
979+ for (i = 0; i < MT7996_RRO_ADDR_ELEM_CR_CNT; i++) {
980+ mt76_wr(dev, reg, rro->addr_elem_alloc_pa[i] >> 4);
981+ reg += 4;
982+ }
983+
984+ /* setup Address element address - separate address segment mode */
985+ mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE1,
986+ MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE);
987+
988+ wed->wlan.ind_cmd.win_size = ffs(rro->win_sz) - 6;
989+ wed->wlan.ind_cmd.particular_sid = rro->particular_se_id;
990+ wed->wlan.ind_cmd.particular_se_phys = rro->particular_session_pa;
991+ wed->wlan.ind_cmd.se_group_nums = MT7996_RRO_ADDR_ELEM_CR_CNT;
992+ wed->wlan.ind_cmd.ack_sn_addr = MT_RRO_ACK_SN_CTRL;
993+
994+ mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0x15010e00);
995+ mt76_set(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1,
996+ MT_RRO_IND_CMD_SIGNATURE_BASE1_EN);
997+
998+ /* particular session configure */
999+ /* use max session idx + 1 as particular session id */
1000+ mt76_wr(dev, MT_RRO_PARTICULAR_CFG0,
1001+ rro->particular_session_pa);
1002+
1003+ val = FIELD_PREP(MT_RRO_PARTICULAR_SID,
1004+ MT7996_RRO_SESSION_MAX);
1005+ val |= MT_RRO_PARTICULAR_CONFG_EN;
1006+ mt76_wr(dev, MT_RRO_PARTICULAR_CFG1, val);
1007+
1008+ /* interrupt enable */
1009+ mt76_wr(dev, MT_RRO_HOST_INT_ENA,
1010+ MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA);
1011+
1012+ /* rro ind cmd queue init */
1013+ return mt7996_dma_rro_init(dev);
1014+}
1015+
1016 static int mt7996_init_hardware(struct mt7996_dev *dev)
1017 {
1018 int ret, idx;
1019@@ -677,6 +790,13 @@ static int mt7996_init_hardware(struct mt7996_dev *dev)
1020 if (ret)
1021 return ret;
1022
1023+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
1024+ dev->rro_support) {
1025+ ret = mt7996_rro_init(dev);
1026+ if (ret)
1027+ return ret;
1028+ }
1029+
1030 ret = mt7996_eeprom_init(dev);
1031 if (ret < 0)
1032 return ret;
1033diff --git a/mt7996/mac.c b/mt7996/mac.c
1034index fc2d9269..4fbbc077 100644
1035--- a/mt7996/mac.c
1036+++ b/mt7996/mac.c
1037@@ -614,8 +614,37 @@ mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
1038 return 0;
1039 }
1040
1041+static void
1042+mt7996_wed_check_ppe(struct mt7996_dev *dev, struct mt76_queue *q,
1043+ struct mt7996_sta *msta, struct sk_buff *skb,
1044+ u32 info)
1045+{
1046+ struct ieee80211_vif *vif;
1047+ struct wireless_dev *wdev;
1048+
1049+ if (!msta || !msta->vif)
1050+ return;
1051+
1052+ if (!mt76_queue_is_wed_rx(q))
1053+ return;
1054+
1055+ if (!(info & MT_DMA_INFO_PPE_VLD))
1056+ return;
1057+
1058+ vif = container_of((void *)msta->vif, struct ieee80211_vif,
1059+ drv_priv);
1060+ wdev = ieee80211_vif_to_wdev(vif);
1061+ skb->dev = wdev->netdev;
1062+
1063+ mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb,
1064+ FIELD_GET(MT_DMA_PPE_CPU_REASON, info),
1065+ FIELD_GET(MT_DMA_PPE_ENTRY, info));
1066+}
1067+
1068+
1069 static int
1070-mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
1071+mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
1072+ struct sk_buff *skb, u32 *info)
1073 {
1074 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1075 struct mt76_phy *mphy = &dev->mt76.phy;
1076@@ -640,7 +669,10 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
1077 u16 seq_ctrl = 0;
1078 __le16 fc = 0;
1079 int idx;
1080+ u8 hw_aggr = false;
1081+ struct mt7996_sta *msta = NULL;
1082
1083+ hw_aggr = status->aggr;
1084 memset(status, 0, sizeof(*status));
1085
1086 band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1);
1087@@ -667,8 +699,6 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
1088 status->wcid = mt7996_rx_get_wcid(dev, idx, unicast);
1089
1090 if (status->wcid) {
1091- struct mt7996_sta *msta;
1092-
1093 msta = container_of(status->wcid, struct mt7996_sta, wcid);
1094 spin_lock_bh(&dev->sta_poll_lock);
1095 if (list_empty(&msta->poll_list))
1096@@ -871,12 +901,14 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
1097 #endif
1098 } else {
1099 status->flag |= RX_FLAG_8023;
1100+ mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
1101+ *info);
1102 }
1103
1104 if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
1105 mt7996_mac_decode_he_radiotap(skb, rxv, mode);
1106
1107- if (!status->wcid || !ieee80211_is_data_qos(fc))
1108+ if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr)
1109 return 0;
1110
1111 status->aggr = unicast &&
1112@@ -1604,7 +1636,7 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1113 dev_kfree_skb(skb);
1114 break;
1115 case PKT_TYPE_NORMAL:
1116- if (!mt7996_mac_fill_rx(dev, skb)) {
1117+ if (!mt7996_mac_fill_rx(dev, q, skb, info)) {
1118 mt76_rx(&dev->mt76, q, skb);
1119 return;
1120 }
1121diff --git a/mt7996/mcu.c b/mt7996/mcu.c
1122index 59f22f6d..1891c0d7 100644
1123--- a/mt7996/mcu.c
1124+++ b/mt7996/mcu.c
1125@@ -949,7 +949,7 @@ int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif)
1126 static int
1127 mt7996_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
1128 struct ieee80211_ampdu_params *params,
1129- bool enable, bool tx)
1130+ bool enable, bool tx, bool rro_enable)
1131 {
1132 struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
1133 struct sta_rec_ba_uni *ba;
1134@@ -970,6 +970,8 @@ mt7996_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
1135 ba->ba_en = enable << params->tid;
1136 ba->amsdu = params->amsdu;
1137 ba->tid = params->tid;
1138+ if (rro_enable && !tx && enable)
1139+ ba->ba_rdd_rro = true;
1140
1141 return mt76_mcu_skb_send_msg(dev, skb,
1142 MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
1143@@ -987,7 +989,7 @@ int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
1144 msta->wcid.amsdu = false;
1145
1146 return mt7996_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
1147- enable, true);
1148+ enable, true, dev->rro_support);
1149 }
1150
1151 int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
1152@@ -998,7 +1000,7 @@ int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
1153 struct mt7996_vif *mvif = msta->vif;
1154
1155 return mt7996_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
1156- enable, false);
1157+ enable, false, dev->rro_support);
1158 }
1159
1160 static void
1161diff --git a/mt7996/mmio.c b/mt7996/mmio.c
1162index b9e47e73..9960dca7 100644
1163--- a/mt7996/mmio.c
1164+++ b/mt7996/mmio.c
1165@@ -346,9 +346,15 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
1166 wed->wlan.txfree_tbit = ffs(MT_INT_RX_DONE_WA_TRI) - 1;
1167 }
1168
1169+ wed->wlan.wpdma_rx_glo = wed->wlan.phy_base + hif1_ofs + MT_WFDMA0_GLO_CFG;
1170+ wed->wlan.wpdma_rx = wed->wlan.phy_base + hif1_ofs +
1171+ MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) +
1172+ MT7996_RXQ_BAND0 * MT_RING_SIZE;
1173+
1174 wed->wlan.chip_id = 0x7991;
1175 wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND2) - 1;
1176 } else {
1177+ wed->wlan.hwrro = dev->rro_support; /* default on */
1178 wed->wlan.wpdma_int = wed->wlan.phy_base + MT_INT_SOURCE_CSR;
1179 wed->wlan.wpdma_mask = wed->wlan.phy_base + MT_INT_MASK_CSR;
1180 wed->wlan.wpdma_tx = wed->wlan.phy_base + MT_TXQ_RING_BASE(0) +
1181@@ -360,13 +366,33 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
1182 MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) +
1183 MT7996_RXQ_BAND0 * MT_RING_SIZE;
1184
1185+ wed->wlan.wpdma_rx_rro[0] = wed->wlan.phy_base +
1186+ MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND0) +
1187+ MT7996_RXQ_RRO_BAND0 * MT_RING_SIZE;
1188+ wed->wlan.wpdma_rx_rro[1] = wed->wlan.phy_base + hif1_ofs +
1189+ MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND2) +
1190+ MT7996_RXQ_RRO_BAND2 * MT_RING_SIZE;
1191+ wed->wlan.wpdma_rx_pg = wed->wlan.phy_base +
1192+ MT_RXQ_RING_BASE(MT7996_RXQ_MSDU_PG_BAND0) +
1193+ MT7996_RXQ_MSDU_PG_BAND0 * MT_RING_SIZE;
1194+
1195 wed->wlan.rx_nbuf = 65536;
1196 wed->wlan.rx_npkt = 24576;
1197+ if (dev->hif2)
1198+ wed->wlan.rx_npkt += 8192;
1199+
1200 wed->wlan.rx_size = SKB_WITH_OVERHEAD(MT_RX_BUF_SIZE);
1201
1202 wed->wlan.rx_tbit[0] = ffs(MT_INT_RX_DONE_BAND0) - 1;
1203 wed->wlan.rx_tbit[1] = ffs(MT_INT_RX_DONE_BAND2) - 1;
1204
1205+ wed->wlan.rro_rx_tbit[0] = ffs(MT_INT_RX_DONE_RRO_BAND0) - 1;
1206+ wed->wlan.rro_rx_tbit[1] = ffs(MT_INT_RX_DONE_RRO_BAND2) - 1;
1207+
1208+ wed->wlan.rx_pg_tbit[0] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND0) - 1;
1209+ wed->wlan.rx_pg_tbit[1] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND1) - 1;
1210+ wed->wlan.rx_pg_tbit[2] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND2) - 1;
1211+
1212 wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND0) - 1;
1213 wed->wlan.tx_tbit[1] = ffs(MT_INT_TX_DONE_BAND1) - 1;
1214 if (dev->rro_support) {
1215@@ -378,6 +404,8 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
1216 wed->wlan.wpdma_txfree = wed->wlan.phy_base + MT_RXQ_RING_BASE(0) +
1217 MT7996_RXQ_MCU_WA_MAIN * MT_RING_SIZE;
1218 }
1219+
1220+ dev->mt76.rx_token_size += wed->wlan.rx_npkt;
1221 }
1222
1223 wed->wlan.nbuf = 16384;
1224@@ -394,8 +422,6 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
1225 wed->wlan.release_rx_buf = mt7996_mmio_wed_release_rx_buf;
1226 wed->wlan.update_wo_rx_stats = NULL;
1227
1228- dev->mt76.rx_token_size += wed->wlan.rx_npkt;
1229-
1230 if (mtk_wed_device_attach(wed))
1231 return 0;
1232
1233@@ -557,10 +583,9 @@ static void mt7996_irq_tasklet(struct tasklet_struct *t)
1234 irqreturn_t mt7996_irq_handler(int irq, void *dev_instance)
1235 {
1236 struct mt7996_dev *dev = dev_instance;
1237- struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
1238
1239- if (mtk_wed_device_active(wed))
1240- mtk_wed_device_irq_set_mask(wed, 0);
1241+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
1242+ mtk_wed_device_irq_set_mask(&dev->mt76.mmio.wed, 0);
1243 else
1244 mt76_wr(dev, MT_INT_MASK_CSR, 0);
1245
1246@@ -592,6 +617,7 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
1247 SURVEY_INFO_TIME_RX |
1248 SURVEY_INFO_TIME_BSS_RX,
1249 .token_size = MT7996_TOKEN_SIZE,
1250+ .rx_token_size = MT7996_RX_TOKEN_SIZE,
1251 .tx_prepare_skb = mt7996_tx_prepare_skb,
1252 .tx_complete_skb = mt76_connac_tx_complete_skb,
1253 .rx_skb = mt7996_queue_rx_skb,
1254diff --git a/mt7996/mt7996.h b/mt7996/mt7996.h
1255index 43f20da4..836c7db7 100644
1256--- a/mt7996/mt7996.h
1257+++ b/mt7996/mt7996.h
1258@@ -39,6 +39,7 @@
1259 #define MT7996_EEPROM_SIZE 7680
1260 #define MT7996_EEPROM_BLOCK_SIZE 16
1261 #define MT7996_TOKEN_SIZE 16384
1262+#define MT7996_RX_TOKEN_SIZE 16384
1263
1264 #define MT7996_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
1265 #define MT7996_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
1266@@ -63,6 +64,24 @@
1267 #define MT7996_SKU_RATE_NUM 417
1268 #define MT7996_SKU_PATH_NUM 494
1269
1270+#define MT7996_RRO_MSDU_PG_HASH_SIZE 127
1271+#define MT7996_RRO_SESSION_MAX 1024
1272+#define MT7996_RRO_WIN_SIZE_MAX 1024
1273+#define MT7996_RRO_ADDR_ELEM_CR_CNT 128
1274+#define MT7996_RRO_BA_BITMAP_CR_CNT 2
1275+#define MT7996_RRO_SESSION_PER_CR (MT7996_RRO_SESSION_MAX / \
1276+ MT7996_RRO_ADDR_ELEM_CR_CNT)
1277+#define MT7996_BA_BITMAP_SZ_PER_SESSION 128
1278+#define MT7996_BA_BITMAP_SZ_PER_CR ((MT7996_RRO_SESSION_MAX * \
1279+ MT7996_BA_BITMAP_SZ_PER_SESSION) / \
1280+ MT7996_RRO_BA_BITMAP_CR_CNT)
1281+#define MT7996_SKB_TRUESIZE(x) ((x) + \
1282+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
1283+#define MT7996_RX_BUF_SIZE MT7996_SKB_TRUESIZE(1800)
1284+#define MT7996_RX_MSDU_PAGE_SIZE MT7996_SKB_TRUESIZE(128)
1285+
1286+#define MT7996_WED_RX_TOKEN_SIZE 32768
1287+
1288 struct mt7996_vif;
1289 struct mt7996_sta;
1290 struct mt7996_dfs_pulse;
1291@@ -102,6 +121,16 @@ enum mt7996_rxq_id {
1292 MT7996_RXQ_BAND0 = 4,
1293 MT7996_RXQ_BAND1 = 4,/* unused */
1294 MT7996_RXQ_BAND2 = 5,
1295+ MT7996_RXQ_RRO_BAND0 = 8,
1296+ MT7996_RXQ_RRO_BAND1 = 8,/* unused */
1297+ MT7996_RXQ_RRO_BAND2 = 6,
1298+ MT7996_RXQ_MSDU_PG_BAND0 = 10,
1299+ MT7996_RXQ_MSDU_PG_BAND1 = 11,
1300+ MT7996_RXQ_MSDU_PG_BAND2 = 12,
1301+ MT7996_RXQ_TXFREE0 = 9,
1302+ MT7996_RXQ_TXFREE1 = 9,
1303+ MT7996_RXQ_TXFREE2 = 7,
1304+ MT7996_RXQ_RRO_IND = 0,
1305 };
1306
1307 struct mt7996_twt_flow {
1308@@ -272,6 +301,31 @@ struct mt7996_air_monitor_ctrl {
1309 };
1310 #endif
1311
1312+struct mt7996_rro_addr {
1313+ u32 head_pkt_l;
1314+ u32 head_pkt_h : 4;
1315+ u32 seg_cnt : 11;
1316+ u32 out_of_range: 1;
1317+ u32 rsv : 8;
1318+ u32 signature : 8;
1319+};
1320+
1321+struct mt7996_rro_cfg {
1322+ u32 ind_signature;
1323+ void *ba_bitmap_cache_va[MT7996_RRO_BA_BITMAP_CR_CNT];
1324+ void *addr_elem_alloc_va[MT7996_RRO_ADDR_ELEM_CR_CNT];
1325+ void *particular_session_va;
1326+ u32 particular_se_id;
1327+ dma_addr_t ba_bitmap_cache_pa[MT7996_RRO_BA_BITMAP_CR_CNT];
1328+ dma_addr_t addr_elem_alloc_pa[MT7996_RRO_ADDR_ELEM_CR_CNT];
1329+ dma_addr_t particular_session_pa;
1330+ u16 win_sz;
1331+
1332+ spinlock_t lock;
1333+ struct list_head pg_addr_cache;
1334+ struct list_head pg_hash_head[MT7996_RRO_MSDU_PG_HASH_SIZE];
1335+};
1336+
1337 struct mt7996_phy {
1338 struct mt76_phy *mt76;
1339 struct mt7996_dev *dev;
1340@@ -390,6 +444,9 @@ struct mt7996_dev {
1341 bool flash_mode:1;
1342 bool has_eht:1;
1343
1344+ bool rro_support:1;
1345+ struct mt7996_rro_cfg rro;
1346+
1347 bool testmode_enable;
1348 bool bin_file_mode;
1349 u8 eeprom_mode;
1350@@ -709,6 +766,7 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
1351 struct ieee80211_sta *sta,
1352 struct mt76_tx_info *tx_info);
1353 void mt7996_tx_token_put(struct mt7996_dev *dev);
1354+int mt7996_dma_rro_init(struct mt7996_dev *dev);
1355 void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1356 struct sk_buff *skb, u32 *info);
1357 bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len);
1358diff --git a/mt7996/regs.h b/mt7996/regs.h
developer5579e462023-06-28 11:14:11 +08001359index 5ed7bcca..47fa965f 100644
developer064da3c2023-06-13 15:57:26 +08001360--- a/mt7996/regs.h
1361+++ b/mt7996/regs.h
1362@@ -39,6 +39,40 @@ enum base_rev {
1363
1364 #define __BASE(_id, _band) (dev->reg.base[(_id)].band_base[(_band)])
1365
1366+
1367+/* RRO TOP */
1368+#define MT_RRO_TOP_BASE 0xA000
1369+#define MT_RRO_TOP(ofs) (MT_RRO_TOP_BASE + (ofs))
1370+
1371+#define MT_RRO_BA_BITMAP_BASE0 MT_RRO_TOP(0x8)
1372+#define MT_RRO_BA_BITMAP_BASE1 MT_RRO_TOP(0xC)
1373+#define WF_RRO_AXI_MST_CFG MT_RRO_TOP(0xB8)
1374+#define WF_RRO_AXI_MST_CFG_DIDX_OK BIT(12)
1375+#define MT_RRO_ADDR_ARRAY_BASE1 MT_RRO_TOP(0x34)
1376+#define MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE BIT(31)
1377+
1378+#define MT_RRO_IND_CMD_SIGNATURE_BASE0 MT_RRO_TOP(0x38)
1379+#define MT_RRO_IND_CMD_SIGNATURE_BASE1 MT_RRO_TOP(0x3C)
1380+#define MT_RRO_IND_CMD_0_CTRL0 MT_RRO_TOP(0x40)
1381+#define MT_RRO_IND_CMD_SIGNATURE_BASE1_EN BIT(31)
1382+
1383+#define MT_RRO_PARTICULAR_CFG0 MT_RRO_TOP(0x5C)
1384+#define MT_RRO_PARTICULAR_CFG1 MT_RRO_TOP(0x60)
1385+#define MT_RRO_PARTICULAR_CONFG_EN BIT(31)
1386+#define MT_RRO_PARTICULAR_SID GENMASK(30, 16)
1387+
1388+#define MT_RRO_BA_BITMAP_BASE_EXT0 MT_RRO_TOP(0x70)
1389+#define MT_RRO_BA_BITMAP_BASE_EXT1 MT_RRO_TOP(0x74)
1390+#define MT_RRO_HOST_INT_ENA MT_RRO_TOP(0x204)
1391+#define MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA BIT(0)
1392+
1393+#define MT_RRO_ADDR_ELEM_SEG_ADDR0 MT_RRO_TOP(0x400)
1394+
1395+#define MT_RRO_ACK_SN_CTRL MT_RRO_TOP(0x50)
1396+#define MT_RRO_ACK_SN_CTRL_SN_MASK GENMASK(27, 16)
1397+#define MT_RRO_ACK_SN_CTRL_SESSION_MASK GENMASK(11, 0)
1398+
1399+
1400 #define MT_MCU_INT_EVENT 0x2108
1401 #define MT_MCU_INT_EVENT_DMA_STOPPED BIT(0)
1402 #define MT_MCU_INT_EVENT_DMA_INIT BIT(1)
developer5579e462023-06-28 11:14:11 +08001403@@ -400,6 +434,7 @@ enum base_rev {
developer064da3c2023-06-13 15:57:26 +08001404 #define MT_MCUQ_RING_BASE(q) (MT_Q_BASE(q) + 0x300)
1405 #define MT_TXQ_RING_BASE(q) (MT_Q_BASE(__TXQ(q)) + 0x300)
1406 #define MT_RXQ_RING_BASE(q) (MT_Q_BASE(__RXQ(q)) + 0x500)
1407+#define MT_RXQ_RRO_IND_RING_BASE MT_RRO_TOP(0x40)
1408
1409 #define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \
1410 MT_MCUQ_ID(q) * 0x4)
developer5579e462023-06-28 11:14:11 +08001411@@ -427,6 +462,15 @@ enum base_rev {
developer064da3c2023-06-13 15:57:26 +08001412 #define MT_INT_MCU_CMD BIT(29)
1413 #define MT_INT_RX_TXFREE_EXT BIT(26)
1414
1415+#define MT_INT_RX_DONE_RRO_BAND0 BIT(16)
1416+#define MT_INT_RX_DONE_RRO_BAND1 BIT(16)
1417+#define MT_INT_RX_DONE_RRO_BAND2 BIT(14)
1418+#define MT_INT_RX_DONE_RRO_IND BIT(11)
1419+#define MT_INT_RX_DONE_MSDU_PG_BAND0 BIT(18)
1420+#define MT_INT_RX_DONE_MSDU_PG_BAND1 BIT(19)
1421+#define MT_INT_RX_DONE_MSDU_PG_BAND2 BIT(23)
1422+
1423+
1424 #define MT_INT_RX(q) (dev->q_int_mask[__RXQ(q)])
1425 #define MT_INT_TX_MCU(q) (dev->q_int_mask[(q)])
1426
developer5579e462023-06-28 11:14:11 +08001427@@ -434,20 +478,31 @@ enum base_rev {
developer064da3c2023-06-13 15:57:26 +08001428 MT_INT_RX(MT_RXQ_MCU_WA))
1429
1430 #define MT_INT_BAND0_RX_DONE (MT_INT_RX(MT_RXQ_MAIN) | \
1431- MT_INT_RX(MT_RXQ_MAIN_WA))
1432+ MT_INT_RX(MT_RXQ_MAIN_WA) | \
1433+ MT_INT_RX(MT_RXQ_TXFREE_BAND0))
1434
1435 #define MT_INT_BAND1_RX_DONE (MT_INT_RX(MT_RXQ_BAND1) | \
1436 MT_INT_RX(MT_RXQ_BAND1_WA) | \
1437- MT_INT_RX(MT_RXQ_MAIN_WA))
1438+ MT_INT_RX(MT_RXQ_MAIN_WA) | \
1439+ MT_INT_RX(MT_RXQ_TXFREE_BAND0))
1440
1441 #define MT_INT_BAND2_RX_DONE (MT_INT_RX(MT_RXQ_BAND2) | \
1442 MT_INT_RX(MT_RXQ_BAND2_WA) | \
1443- MT_INT_RX(MT_RXQ_MAIN_WA))
1444+ MT_INT_RX(MT_RXQ_MAIN_WA) | \
1445+ MT_INT_RX(MT_RXQ_TXFREE_BAND0))
1446+
1447+#define MT_INT_RRO_RX_DONE (MT_INT_RX(MT_RXQ_RRO_BAND0) | \
1448+ MT_INT_RX(MT_RXQ_RRO_BAND1) | \
1449+ MT_INT_RX(MT_RXQ_RRO_BAND2) | \
1450+ MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND0) | \
1451+ MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND1) | \
1452+ MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND2))
1453
1454 #define MT_INT_RX_DONE_ALL (MT_INT_RX_DONE_MCU | \
1455 MT_INT_BAND0_RX_DONE | \
1456 MT_INT_BAND1_RX_DONE | \
1457- MT_INT_BAND2_RX_DONE)
1458+ MT_INT_BAND2_RX_DONE | \
1459+ MT_INT_RRO_RX_DONE)
1460
1461 #define MT_INT_TX_DONE_FWDL BIT(26)
1462 #define MT_INT_TX_DONE_MCU_WM BIT(27)
1463--
developer5579e462023-06-28 11:14:11 +080014642.18.0
developer064da3c2023-06-13 15:57:26 +08001465