blob: 2dc5574b918c63fb7aa5acaeb69189a8f379154b [file] [log] [blame]
developerc2cfe0f2023-09-22 04:11:09 +08001From c9cbe5b9cc6e0d17352814aafe6514a6623bbd12 Mon Sep 17 00:00:00 2001
developer064da3c2023-06-13 15:57:26 +08002From: Bo Jiao <Bo.Jiao@mediatek.com>
3Date: Mon, 6 Feb 2023 13:50:56 +0800
developerc2cfe0f2023-09-22 04:11:09 +08004Subject: [PATCH 2004/2012] wifi: mt76: mt7996: wed: add wed3.0 rx support
developer064da3c2023-06-13 15:57:26 +08005
6add hardware rro support, This is the preliminary patch for WED3.0 support.
7
8Signed-off-by: Bo Jiao <Bo.Jiao@mediatek.com>
9Change-Id: I7e113b1392bcf085ec02c8a44ffbb7cf7c3fa027
10Signed-off-by: sujuan.chen <sujuan.chen@mediatek.com>
11---
developerc2cfe0f2023-09-22 04:11:09 +080012 dma.c | 197 +++++++++++++++++++++++++++++++++++++-----------
developer064da3c2023-06-13 15:57:26 +080013 dma.h | 12 +++
14 mac80211.c | 1 +
developerc2cfe0f2023-09-22 04:11:09 +080015 mt76.h | 63 ++++++++++++++--
16 mt7996/dma.c | 161 ++++++++++++++++++++++++++++++++++-----
17 mt7996/init.c | 130 ++++++++++++++++++++++++++++++--
18 mt7996/mac.c | 42 +++++++++--
developer064da3c2023-06-13 15:57:26 +080019 mt7996/mcu.c | 8 +-
developerc2cfe0f2023-09-22 04:11:09 +080020 mt7996/mmio.c | 44 +++++++++--
developer064da3c2023-06-13 15:57:26 +080021 mt7996/mt7996.h | 58 ++++++++++++++
developerc2cfe0f2023-09-22 04:11:09 +080022 mt7996/pci.c | 3 +-
23 mt7996/regs.h | 69 ++++++++++++++++-
24 12 files changed, 693 insertions(+), 95 deletions(-)
developer064da3c2023-06-13 15:57:26 +080025
26diff --git a/dma.c b/dma.c
developerc2cfe0f2023-09-22 04:11:09 +080027index c2dbe6f6b..8097a3121 100644
developer064da3c2023-06-13 15:57:26 +080028--- a/dma.c
29+++ b/dma.c
developerc2cfe0f2023-09-22 04:11:09 +080030@@ -198,46 +198,65 @@ EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
developer064da3c2023-06-13 15:57:26 +080031 static void
32 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
33 {
34+ int ndesc = q->ndesc;
35+
36+ if (q->flags & MT_QFLAG_MAGIC)
37+ ndesc |= MT_DMA_MAGIC_EN;
38+
39 Q_WRITE(dev, q, desc_base, q->desc_dma);
40- Q_WRITE(dev, q, ring_size, q->ndesc);
41+ Q_WRITE(dev, q, ring_size, ndesc);
42 q->head = Q_READ(dev, q, dma_idx);
43 q->tail = q->head;
44 }
45
46 static void
47-mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
48+mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, bool skip)
49 {
50 int i;
51
52 if (!q || !q->ndesc)
53 return;
54
55+ if (!q->desc)
56+ goto done;
57+
58 /* clear descriptors */
59 for (i = 0; i < q->ndesc; i++)
60 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
61
62+ if (skip)
63+ goto sync;
64+
65+done:
66 Q_WRITE(dev, q, cpu_idx, 0);
67 Q_WRITE(dev, q, dma_idx, 0);
68+sync:
69 mt76_dma_sync_idx(dev, q);
70 }
71
72 static int
73 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
74- struct mt76_queue_buf *buf, void *data)
75+ struct mt76_queue_buf *buf, void *data,
76+ struct mt76_rxwi_cache *rxwi)
77 {
78- struct mt76_desc *desc = &q->desc[q->head];
79+ struct mt76_desc *desc;
80 struct mt76_queue_entry *entry = &q->entry[q->head];
81- struct mt76_rxwi_cache *rxwi = NULL;
82 u32 buf1 = 0, ctrl;
83 int idx = q->head;
84 int rx_token;
developer562703a2023-06-29 15:47:16 +080085
developerc2cfe0f2023-09-22 04:11:09 +080086+ if (mt76_queue_is_rro_ind(q))
87+ goto done;
88+
developer064da3c2023-06-13 15:57:26 +080089+ desc = &q->desc[q->head];
90 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
91
92 if (mt76_queue_is_wed_rx(q)) {
93- rxwi = mt76_get_rxwi(dev);
94- if (!rxwi)
95- return -ENOMEM;
96+ if (!rxwi) {
97+ rxwi = mt76_get_rxwi(dev);
98+ if (!rxwi)
99+ return -ENOMEM;
100+ }
101
102 rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr);
103 if (rx_token < 0) {
developerc2cfe0f2023-09-22 04:11:09 +0800104@@ -254,6 +273,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
developer064da3c2023-06-13 15:57:26 +0800105 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
106 WRITE_ONCE(desc->info, 0);
107
108+done:
109 entry->dma_addr[0] = buf->addr;
110 entry->dma_len[0] = buf->len;
111 entry->rxwi = rxwi;
developerc2cfe0f2023-09-22 04:11:09 +0800112@@ -398,14 +418,15 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
developer064da3c2023-06-13 15:57:26 +0800113
114 static void *
115 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
116- int *len, u32 *info, bool *more, bool *drop)
117+ int *len, u32 *info, bool *more, bool *drop, bool flush)
118 {
119 struct mt76_queue_entry *e = &q->entry[idx];
120 struct mt76_desc *desc = &q->desc[idx];
developerc2cfe0f2023-09-22 04:11:09 +0800121 void *buf;
developer064da3c2023-06-13 15:57:26 +0800122+ u32 ctrl;
123
124+ ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
125 if (len) {
126- u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
127 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
128 *more = !(ctrl & MT_DMA_CTL_LAST_SEC0);
129 }
developerc2cfe0f2023-09-22 04:11:09 +0800130@@ -413,6 +434,12 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
developer064da3c2023-06-13 15:57:26 +0800131 if (info)
132 *info = le32_to_cpu(desc->info);
133
134+ if (drop) {
135+ *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | MT_DMA_CTL_DROP));
136+ if (ctrl & MT_DMA_CTL_VER_MASK)
137+ *drop = !!(ctrl & MT_DMA_CTL_PN_CHK_FAIL);
138+ }
139+
140 if (mt76_queue_is_wed_rx(q)) {
141 u32 buf1 = le32_to_cpu(desc->buf1);
142 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
developerc2cfe0f2023-09-22 04:11:09 +0800143@@ -425,20 +452,46 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
developer064da3c2023-06-13 15:57:26 +0800144 SKB_WITH_OVERHEAD(q->buf_size),
145 DMA_FROM_DEVICE);
146
147- buf = r->ptr;
148- r->dma_addr = 0;
149- r->ptr = NULL;
150-
151- mt76_put_rxwi(dev, r);
152-
153- if (drop) {
154- u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
155-
156- *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A |
157- MT_DMA_CTL_DROP));
158+ if (flush) {
159+ buf = r->ptr;
160+ r->dma_addr = 0;
161+ r->ptr = NULL;
162+
163+ mt76_put_rxwi(dev, r);
164+ } else {
165+ struct mt76_queue_buf qbuf;
166+
167+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
168+ if (!buf)
169+ return NULL;
170+
171+ memcpy(buf, r->ptr, SKB_WITH_OVERHEAD(q->buf_size));
172+
173+ r->dma_addr = dma_map_single(dev->dma_dev, r->ptr,
174+ SKB_WITH_OVERHEAD(q->buf_size),
175+ DMA_FROM_DEVICE);
176+ if (unlikely(dma_mapping_error(dev->dma_dev, r->dma_addr))) {
177+ skb_free_frag(r->ptr);
178+ mt76_put_rxwi(dev, r);
179+ return NULL;
180+ }
181+
182+ qbuf.addr = r->dma_addr;
183+ qbuf.len = SKB_WITH_OVERHEAD(q->buf_size);
184+ qbuf.skip_unmap = false;
185+
186+ if (mt76_dma_add_rx_buf(dev, q, &qbuf, r->ptr, r) < 0) {
187+ dma_unmap_single(dev->dma_dev, r->dma_addr,
188+ SKB_WITH_OVERHEAD(q->buf_size),
189+ DMA_FROM_DEVICE);
190+ skb_free_frag(r->ptr);
191+ mt76_put_rxwi(dev, r);
192+ return NULL;
193+ }
194+ }
195
196+ if (drop)
197 *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
198- }
199 } else {
developerc2cfe0f2023-09-22 04:11:09 +0800200 buf = e->buf;
201 e->buf = NULL;
202@@ -460,15 +513,20 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
developer064da3c2023-06-13 15:57:26 +0800203 if (!q->queued)
204 return NULL;
205
206- if (flush)
207- q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
208- else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
developerc2cfe0f2023-09-22 04:11:09 +0800209- return NULL;
210+ if (q->flags & MT_QFLAG_RRO) {
developer064da3c2023-06-13 15:57:26 +0800211+ goto done;
212+ } else {
213+ if (flush)
214+ q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
215+ else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
216+ return NULL;
217+ }
218
219+done:
220 q->tail = (q->tail + 1) % q->ndesc;
221 q->queued--;
222
223- return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
224+ return mt76_dma_get_buf(dev, q, idx, len, info, more, drop, flush);
225 }
226
227 static int
developerc2cfe0f2023-09-22 04:11:09 +0800228@@ -617,7 +675,10 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
developer064da3c2023-06-13 15:57:26 +0800229
230 while (q->queued < q->ndesc - 1) {
231 struct mt76_queue_buf qbuf;
232- void *buf;
233+ void *buf = NULL;
234+
235+ if (mt76_queue_is_rro_ind(q))
236+ goto done;
237
238 buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
239 if (!buf)
developerc2cfe0f2023-09-22 04:11:09 +0800240@@ -629,10 +690,11 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
developer064da3c2023-06-13 15:57:26 +0800241 break;
242 }
243
244+done:
245 qbuf.addr = addr + offset;
246 qbuf.len = len - offset;
247 qbuf.skip_unmap = false;
248- if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
249+ if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf, NULL) < 0) {
250 dma_unmap_single(dev->dma_dev, addr, len,
251 DMA_FROM_DEVICE);
252 skb_free_frag(buf);
developerc2cfe0f2023-09-22 04:11:09 +0800253@@ -641,7 +703,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
developer064da3c2023-06-13 15:57:26 +0800254 frames++;
255 }
256
257- if (frames)
258+ if (frames || mt76_queue_is_wed_rx(q))
259 mt76_dma_kick_queue(dev, q);
260
261 spin_unlock_bh(&q->lock);
developerc2cfe0f2023-09-22 04:11:09 +0800262@@ -654,7 +716,7 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
developer064da3c2023-06-13 15:57:26 +0800263 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
264 struct mtk_wed_device *wed = &dev->mmio.wed;
265 int ret, type, ring;
266- u8 flags;
267+ u16 flags;
268
269 if (!q || !q->ndesc)
270 return -EINVAL;
developerc2cfe0f2023-09-22 04:11:09 +0800271@@ -681,7 +743,7 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
developer064da3c2023-06-13 15:57:26 +0800272 case MT76_WED_Q_TXFREE:
273 /* WED txfree queue needs ring to be initialized before setup */
274 q->flags = 0;
275- mt76_dma_queue_reset(dev, q);
276+ mt76_dma_queue_reset(dev, q, false);
277 mt76_dma_rx_fill(dev, q);
278 q->flags = flags;
279
developerc2cfe0f2023-09-22 04:11:09 +0800280@@ -690,9 +752,31 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
developer064da3c2023-06-13 15:57:26 +0800281 q->wed_regs = wed->txfree_ring.reg_base;
282 break;
283 case MT76_WED_Q_RX:
284- ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset);
285- if (!ret)
286- q->wed_regs = wed->rx_ring[ring].reg_base;
287+ if (q->flags & MT_QFLAG_RRO) {
288+ q->flags &= ~0x1f;
289+
290+ ring = FIELD_GET(MT_QFLAG_RRO_RING, q->flags);
291+ type = FIELD_GET(MT_QFLAG_RRO_TYPE, q->flags);
292+ if (type == MT76_RRO_Q_DATA) {
293+ mt76_dma_queue_reset(dev, q, true);
294+ ret = mtk_wed_device_rro_rx_ring_setup(wed, ring, q->regs);
295+ } else if (type == MT76_RRO_Q_MSDU_PG) {
296+ mt76_dma_queue_reset(dev, q, true);
297+ ret = mtk_wed_device_msdu_pg_rx_ring_setup(wed, ring, q->regs);
298+ } else if (type == MT76_RRO_Q_IND) {
299+ mt76_dma_queue_reset(dev, q, false);
300+ mt76_dma_rx_fill(dev, q);
301+ ret = mtk_wed_device_ind_rx_ring_setup(wed, q->regs);
302+ }
303+ if (type != MT76_RRO_Q_IND) {
304+ q->head = q->ndesc - 1;
305+ q->queued = q->ndesc - 1;
306+ }
307+ } else {
308+ ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, 0);
309+ if (!ret)
310+ q->wed_regs = wed->rx_ring[ring].reg_base;
311+ }
312 break;
313 default:
314 ret = -EINVAL;
developerc2cfe0f2023-09-22 04:11:09 +0800315@@ -721,10 +805,25 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developer064da3c2023-06-13 15:57:26 +0800316 q->hw_idx = idx;
317
318 size = q->ndesc * sizeof(struct mt76_desc);
319+ if (mt76_queue_is_rro_ind(q))
320+ size = q->ndesc * sizeof(struct mt76_rro_desc);
321+
322 q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
323 if (!q->desc)
324 return -ENOMEM;
325
326+ if (mt76_queue_is_rro_ind(q)) {
327+ struct mt76_rro_ind *cmd;
328+ int i;
329+
330+ q->rro_desc = (struct mt76_rro_desc *)(q->desc);
331+ q->desc = NULL;
332+ for (i = 0; i < q->ndesc; i++) {
333+ cmd = (struct mt76_rro_ind *) &q->rro_desc[i];
334+ cmd->magic_cnt = MT_DMA_IND_CMD_MAGIC_CNT - 1;
335+ }
336+ }
337+
338 size = q->ndesc * sizeof(*q->entry);
339 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
340 if (!q->entry)
developerc2cfe0f2023-09-22 04:11:09 +0800341@@ -734,8 +833,11 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
developer064da3c2023-06-13 15:57:26 +0800342 if (ret)
343 return ret;
344
345- if (!mt76_queue_is_txfree(q))
346- mt76_dma_queue_reset(dev, q);
347+ if (!mtk_wed_device_active(&dev->mmio.wed) ||
348+ (!mt76_queue_is_wed_txfree(q) &&
349+ !(mtk_wed_get_rx_capa(&dev->mmio.wed) &&
350+ q->flags & MT_QFLAG_RRO)))
351+ mt76_dma_queue_reset(dev, q, false);
352
353 return 0;
354 }
developerc2cfe0f2023-09-22 04:11:09 +0800355@@ -753,13 +855,13 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
356 spin_lock_bh(&q->lock);
357
358 do {
359+ if (q->flags & MT_QFLAG_RRO)
360+ break;
361+
362 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
363 if (!buf)
364 break;
365
366- if (q->flags & MT_QFLAG_RRO)
367- continue;
368-
369 skb_free_frag(buf);
370 } while (1);
371
372@@ -770,8 +872,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
developer064da3c2023-06-13 15:57:26 +0800373
374 spin_unlock_bh(&q->lock);
375
376- if (((q->flags & MT_QFLAG_WED) &&
377- FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) ||
378+ if (mt76_queue_is_wed_rx(q) ||
379 (q->flags & MT_QFLAG_RRO))
380 return;
381
developerc2cfe0f2023-09-22 04:11:09 +0800382@@ -792,9 +893,13 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
developer064da3c2023-06-13 15:57:26 +0800383 if (!q->ndesc)
384 return;
385
386+ if (!q->desc)
387+ goto done;
388+
389 for (i = 0; i < q->ndesc; i++)
390 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
391
392+done:
393 mt76_dma_rx_cleanup(dev, q);
394
395 /* reset WED rx queues */
developerc2cfe0f2023-09-22 04:11:09 +0800396@@ -841,8 +946,8 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
developer064da3c2023-06-13 15:57:26 +0800397 bool check_ddone = false;
398 bool more;
399
400- if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
401- q->flags == MT_WED_Q_TXFREE) {
402+ if ((IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
403+ q->flags == MT_WED_Q_TXFREE)) {
404 dma_idx = Q_READ(dev, q, dma_idx);
405 check_ddone = true;
406 }
developerc2cfe0f2023-09-22 04:11:09 +0800407@@ -1004,7 +1109,8 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
developer064da3c2023-06-13 15:57:26 +0800408 mt76_for_each_q_rx(dev, i) {
409 struct mt76_queue *q = &dev->q_rx[i];
410
411- if (mt76_queue_is_wed_rx(q))
412+ if (mtk_wed_device_active(&dev->mmio.wed) &&
413+ (q->flags & MT_QFLAG_RRO))
414 continue;
415
416 netif_napi_del(&dev->napi[i]);
developerc2cfe0f2023-09-22 04:11:09 +0800417@@ -1016,6 +1122,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
developer064da3c2023-06-13 15:57:26 +0800418
419 if (mtk_wed_device_active(&dev->mmio.wed_ext))
420 mtk_wed_device_detach(&dev->mmio.wed_ext);
421+
422 mt76_free_pending_txwi(dev);
423 mt76_free_pending_rxwi(dev);
424 }
425diff --git a/dma.h b/dma.h
developerc2cfe0f2023-09-22 04:11:09 +0800426index 1b090d78c..480370928 100644
developer064da3c2023-06-13 15:57:26 +0800427--- a/dma.h
428+++ b/dma.h
429@@ -25,6 +25,13 @@
430 #define MT_DMA_PPE_ENTRY GENMASK(30, 16)
431 #define MT_DMA_INFO_PPE_VLD BIT(31)
432
433+#define MT_DMA_CTL_PN_CHK_FAIL BIT(13)
434+#define MT_DMA_CTL_VER_MASK BIT(7)
435+
436+#define MT_DMA_MAGIC_EN BIT(13)
437+
438+#define MT_DMA_IND_CMD_MAGIC_CNT 8
439+
440 #define MT_DMA_HDR_LEN 4
441 #define MT_RX_INFO_LEN 4
442 #define MT_FCE_INFO_LEN 4
443@@ -37,6 +44,11 @@ struct mt76_desc {
444 __le32 info;
445 } __packed __aligned(4);
446
447+struct mt76_rro_desc {
448+ __le32 buf0;
449+ __le32 buf1;
450+} __packed __aligned(4);
451+
452 enum mt76_qsel {
453 MT_QSEL_MGMT,
454 MT_QSEL_HCCA,
455diff --git a/mac80211.c b/mac80211.c
developerc2cfe0f2023-09-22 04:11:09 +0800456index 7cd9b6fc7..3070321d5 100644
developer064da3c2023-06-13 15:57:26 +0800457--- a/mac80211.c
458+++ b/mac80211.c
developerc2cfe0f2023-09-22 04:11:09 +0800459@@ -735,6 +735,7 @@ static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
developer064da3c2023-06-13 15:57:26 +0800460 return;
461 }
462 }
463+
464 __skb_queue_tail(&dev->rx_skb[q], skb);
465 }
466
467diff --git a/mt76.h b/mt76.h
developerc2cfe0f2023-09-22 04:11:09 +0800468index 3b2a658db..3954d01c5 100644
developer064da3c2023-06-13 15:57:26 +0800469--- a/mt76.h
470+++ b/mt76.h
471@@ -48,6 +48,18 @@
472
473 #define MT76_TOKEN_FREE_THR 64
474
475+#define MT_QFLAG_RRO_RING GENMASK(6, 5)
476+#define MT_QFLAG_RRO_TYPE GENMASK(8, 7)
477+#define MT_QFLAG_RRO BIT(9)
478+#define MT_QFLAG_MAGIC BIT(10)
479+
480+#define __MT_RRO_Q(_type, _n) (MT_QFLAG_RRO | \
481+ FIELD_PREP(MT_QFLAG_RRO_TYPE, _type) | \
482+ FIELD_PREP(MT_QFLAG_RRO_RING, _n))
483+#define MT_RRO_Q_DATA(_n) __MT_RRO_Q(MT76_RRO_Q_DATA, _n)
484+#define MT_RRO_Q_MSDU_PG(_n) __MT_RRO_Q(MT76_RRO_Q_MSDU_PG, _n)
485+#define MT_RRO_Q_IND __MT_RRO_Q(MT76_RRO_Q_IND, 0)
486+
487 #define MT_QFLAG_WED_RING GENMASK(1, 0)
488 #define MT_QFLAG_WED_TYPE GENMASK(3, 2)
489 #define MT_QFLAG_WED BIT(4)
490@@ -82,6 +94,12 @@ enum mt76_wed_type {
491 MT76_WED_Q_RX,
492 };
493
494+enum mt76_RRO_type {
495+ MT76_RRO_Q_DATA,
496+ MT76_RRO_Q_MSDU_PG,
497+ MT76_RRO_Q_IND,
498+};
499+
500 struct mt76_bus_ops {
501 u32 (*rr)(struct mt76_dev *dev, u32 offset);
502 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
503@@ -128,6 +146,16 @@ enum mt76_rxq_id {
504 MT_RXQ_MAIN_WA,
505 MT_RXQ_BAND2,
506 MT_RXQ_BAND2_WA,
507+ MT_RXQ_RRO_BAND0,
508+ MT_RXQ_RRO_BAND1,
509+ MT_RXQ_RRO_BAND2,
510+ MT_RXQ_MSDU_PAGE_BAND0,
511+ MT_RXQ_MSDU_PAGE_BAND1,
512+ MT_RXQ_MSDU_PAGE_BAND2,
513+ MT_RXQ_TXFREE_BAND0,
514+ MT_RXQ_TXFREE_BAND1,
515+ MT_RXQ_TXFREE_BAND2,
516+ MT_RXQ_RRO_IND,
517 __MT_RXQ_MAX
518 };
519
520@@ -206,6 +234,7 @@ struct mt76_queue {
521 spinlock_t lock;
522 spinlock_t cleanup_lock;
523 struct mt76_queue_entry *entry;
524+ struct mt76_rro_desc *rro_desc;
525 struct mt76_desc *desc;
526
527 u16 first;
528@@ -219,8 +248,8 @@ struct mt76_queue {
529
530 u8 buf_offset;
531 u8 hw_idx;
532- u8 flags;
533-
534+ u8 magic_cnt;
535+ u32 flags;
536 u32 wed_regs;
537
538 dma_addr_t desc_dma;
539@@ -274,7 +303,7 @@ struct mt76_queue_ops {
540
541 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
542
543- void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q);
544+ void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q, bool skip);
545 };
546
547 enum mt76_phy_type {
developerc2cfe0f2023-09-22 04:11:09 +0800548@@ -375,6 +404,17 @@ struct mt76_txq {
developer064da3c2023-06-13 15:57:26 +0800549 bool aggr;
550 };
551
552+struct mt76_rro_ind {
553+ u32 se_id : 12;
554+ u32 rsv : 4;
555+ u32 start_sn : 12;
556+ u32 ind_reason : 4;
557+ u32 ind_cnt : 13;
558+ u32 win_sz : 3;
559+ u32 rsv2 : 13;
560+ u32 magic_cnt : 3;
561+};
562+
563 struct mt76_txwi_cache {
564 struct list_head list;
565 dma_addr_t dma_addr;
developerc2cfe0f2023-09-22 04:11:09 +0800566@@ -1629,12 +1669,19 @@ static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q)
developer064da3c2023-06-13 15:57:26 +0800567 return (q->flags & MT_QFLAG_WED) &&
568 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX;
569 }
570-static inline bool mt76_queue_is_txfree(struct mt76_queue *q)
571+
572+static inline bool mt76_queue_is_wed_txfree(struct mt76_queue *q)
573 {
574 return (q->flags & MT_QFLAG_WED) &&
575 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TXFREE;
576 }
577
578+static inline bool mt76_queue_is_rro_ind(struct mt76_queue *q)
579+{
580+ return (q->flags & MT_QFLAG_RRO) &&
581+ FIELD_GET(MT_QFLAG_RRO_TYPE, q->flags) == MT76_RRO_Q_IND;
582+}
583+
584 struct mt76_txwi_cache *
585 mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
586 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
developerc2cfe0f2023-09-22 04:11:09 +0800587@@ -1653,10 +1700,14 @@ static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
developer064da3c2023-06-13 15:57:26 +0800588 static inline int
589 mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
590 {
591- int token;
592+ int token, start = 0;
593+
594+ if (mtk_wed_device_active(&dev->mmio.wed))
595+ start = dev->mmio.wed.wlan.nbuf;
596
597 spin_lock_bh(&dev->token_lock);
598- token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC);
599+ token = idr_alloc(&dev->token, *ptxwi, start, start + dev->token_size,
600+ GFP_ATOMIC);
601 spin_unlock_bh(&dev->token_lock);
602
603 return token;
604diff --git a/mt7996/dma.c b/mt7996/dma.c
developerc2cfe0f2023-09-22 04:11:09 +0800605index 3c8f617e0..309cc242e 100644
developer064da3c2023-06-13 15:57:26 +0800606--- a/mt7996/dma.c
607+++ b/mt7996/dma.c
608@@ -64,6 +64,29 @@ static void mt7996_dma_config(struct mt7996_dev *dev)
609 RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2);
610 RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI);
611
612+ if (dev->rro_support) {
613+ /* band0 */
614+ RXQ_CONFIG(MT_RXQ_RRO_BAND0, WFDMA0, MT_INT_RX_DONE_RRO_BAND0,
615+ MT7996_RXQ_RRO_BAND0);
616+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND0,
617+ MT7996_RXQ_MSDU_PG_BAND0);
618+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0, MT_INT_RX_TXFREE_MAIN,
619+ MT7996_RXQ_TXFREE0);
620+ /* band1 */
621+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND1,
622+ MT7996_RXQ_MSDU_PG_BAND1);
623+ /* band2 */
624+ RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0, MT_INT_RX_DONE_RRO_BAND2,
625+ MT7996_RXQ_RRO_BAND2);
626+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND2,
627+ MT7996_RXQ_MSDU_PG_BAND2);
628+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0, MT_INT_RX_TXFREE_TRI,
629+ MT7996_RXQ_TXFREE2);
630+
631+ RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0, MT_INT_RX_DONE_RRO_IND,
632+ MT7996_RXQ_RRO_IND);
633+ }
634+
635 /* data tx queue */
636 TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0);
637 TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
developer5579e462023-06-28 11:14:11 +0800638@@ -102,6 +125,22 @@ static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
639 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND2_WA) + ofs, PREFETCH(0x2));
640 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x10));
641 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND2) + ofs, PREFETCH(0x10));
developer064da3c2023-06-13 15:57:26 +0800642+ if (dev->rro_support) {
643+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND0) + ofs,
developer5579e462023-06-28 11:14:11 +0800644+ PREFETCH(0x10));
developer064da3c2023-06-13 15:57:26 +0800645+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND2) + ofs,
developer5579e462023-06-28 11:14:11 +0800646+ PREFETCH(0x10));
developer064da3c2023-06-13 15:57:26 +0800647+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs,
developer5579e462023-06-28 11:14:11 +0800648+ PREFETCH(0x4));
developer064da3c2023-06-13 15:57:26 +0800649+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs,
developer5579e462023-06-28 11:14:11 +0800650+ PREFETCH(0x4));
developer064da3c2023-06-13 15:57:26 +0800651+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs,
developer5579e462023-06-28 11:14:11 +0800652+ PREFETCH(0x4));
developer064da3c2023-06-13 15:57:26 +0800653+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND0) + ofs,
developer5579e462023-06-28 11:14:11 +0800654+ PREFETCH(0x4));
developer064da3c2023-06-13 15:57:26 +0800655+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND2) + ofs,
developer5579e462023-06-28 11:14:11 +0800656+ PREFETCH(0x4));
developer064da3c2023-06-13 15:57:26 +0800657+ }
developer5579e462023-06-28 11:14:11 +0800658 #undef PREFETCH
developer064da3c2023-06-13 15:57:26 +0800659
660 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + ofs, WF_WFDMA0_GLO_CFG_EXT1_CALC_MODE);
developer5579e462023-06-28 11:14:11 +0800661@@ -161,6 +200,7 @@ static void mt7996_dma_disable(struct mt7996_dev *dev, bool reset)
developer064da3c2023-06-13 15:57:26 +0800662
developerc2cfe0f2023-09-22 04:11:09 +0800663 void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
developer064da3c2023-06-13 15:57:26 +0800664 {
665+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
666 u32 hif1_ofs = 0;
667 u32 irq_mask;
668
developerc2cfe0f2023-09-22 04:11:09 +0800669@@ -169,11 +209,16 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
developer064da3c2023-06-13 15:57:26 +0800670
developerc2cfe0f2023-09-22 04:11:09 +0800671 /* enable WFDMA Tx/Rx */
developer064da3c2023-06-13 15:57:26 +0800672 if (!reset) {
673- mt76_set(dev, MT_WFDMA0_GLO_CFG,
674- MT_WFDMA0_GLO_CFG_TX_DMA_EN |
675- MT_WFDMA0_GLO_CFG_RX_DMA_EN |
676- MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
677- MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
678+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
679+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
680+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
681+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO);
682+ else
683+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
684+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
685+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
686+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
687+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
688
689 if (dev->hif2)
690 mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
developerc2cfe0f2023-09-22 04:11:09 +0800691@@ -195,14 +240,14 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
developer064da3c2023-06-13 15:57:26 +0800692 if (mt7996_band_valid(dev, MT_BAND2))
693 irq_mask |= MT_INT_BAND2_RX_DONE;
694
695- if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wed_reset) {
696+ if (mtk_wed_device_active(wed) && wed_reset) {
697 u32 wed_irq_mask = irq_mask;
698
699 wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
700
701 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
702
703- mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask);
704+ mtk_wed_device_start(wed, wed_irq_mask);
705 }
706
707 irq_mask = reset ? MT_INT_MCU_CMD : irq_mask;
developerc2cfe0f2023-09-22 04:11:09 +0800708@@ -296,7 +341,8 @@ static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
developer064da3c2023-06-13 15:57:26 +0800709 /* fix hardware limitation, pcie1's rx ring3 is not available
710 * so, redirect pcie0 rx ring3 interrupt to pcie1
711 */
712- if (mtk_wed_device_active(&dev->mt76.mmio.wed) && dev->rro_support)
713+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
714+ dev->rro_support)
715 mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL + hif1_ofs,
716 MT_WFDMA0_RX_INT_SEL_RING6);
717 else
developerc2cfe0f2023-09-22 04:11:09 +0800718@@ -307,6 +353,78 @@ static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
719 mt7996_dma_start(dev, reset, true);
developer064da3c2023-06-13 15:57:26 +0800720 }
721
722+int mt7996_dma_rro_init(struct mt7996_dev *dev)
723+{
724+ int ret;
725+ u32 hif1_ofs = 0;
726+ u32 wed_irq_mask;
727+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
728+
729+ if (dev->hif2)
730+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
731+
732+ /* ind cmd */
733+ dev->mt76.q_rx[MT_RXQ_RRO_IND].flags = MT_RRO_Q_IND | MT_WED_Q_RX(0);
734+ dev->mt76.q_rx[MT_RXQ_RRO_IND].flags |= MT_WED_Q_RX(0);
735+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_IND],
736+ MT_RXQ_ID(MT_RXQ_RRO_IND),
737+ MT7996_RX_RING_SIZE,
738+ 0, MT_RXQ_RRO_IND_RING_BASE);
739+ if (ret)
740+ return ret;
741+
742+ /* rx msdu page queue for band0 */
743+ dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags = MT_RRO_Q_MSDU_PG(0);
744+ dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags |= MT_QFLAG_MAGIC;
745+ dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags |= MT_WED_Q_RX(0);
746+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND0],
747+ MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0),
748+ MT7996_RX_RING_SIZE,
749+ MT7996_RX_MSDU_PAGE_SIZE,
750+ MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND0));
751+ if (ret)
752+ return ret;
753+
754+ if (mt7996_band_valid(dev, MT_BAND1)) {
755+ /* rx msdu page queue for band1 */
756+ dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags = MT_RRO_Q_MSDU_PG(1);
757+ dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags |= MT_QFLAG_MAGIC;
758+ dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags |= MT_WED_Q_RX(1);
759+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND1],
760+ MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1),
761+ MT7996_RX_RING_SIZE,
762+ MT7996_RX_MSDU_PAGE_SIZE,
763+ MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND1));
764+ if (ret)
765+ return ret;
766+ }
767+
768+ if (mt7996_band_valid(dev, MT_BAND2)) {
769+ /* rx msdu page queue for band2 */
770+ dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags = MT_RRO_Q_MSDU_PG(2);
771+ dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags |= MT_QFLAG_MAGIC;
772+ dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags |= MT_WED_Q_RX(0);
773+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MSDU_PAGE_BAND2],
774+ MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2),
775+ MT7996_RX_RING_SIZE,
776+ MT7996_RX_MSDU_PAGE_SIZE,
777+ MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND2));
778+ if (ret)
779+ return ret;
780+ }
781+
782+ wed_irq_mask = dev->mt76.mmio.irqmask |
783+ MT_INT_RRO_RX_DONE |
784+ MT_INT_TX_DONE_BAND2;
785+
786+ mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
787+
788+ mtk_wed_device_start_hwrro(wed, wed_irq_mask, false);
789+ mt7996_irq_enable(dev, wed_irq_mask);
790+
791+ return 0;
792+}
793+
794 int mt7996_dma_init(struct mt7996_dev *dev)
795 {
796 struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
developerc2cfe0f2023-09-22 04:11:09 +0800797@@ -376,6 +494,9 @@ int mt7996_dma_init(struct mt7996_dev *dev)
developer064da3c2023-06-13 15:57:26 +0800798 return ret;
799
800 /* rx data queue for band0 and band1 */
801+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
802+ dev->mt76.q_rx[MT_RXQ_MAIN].flags = MT_WED_Q_RX(0);
803+
804 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
805 MT_RXQ_ID(MT_RXQ_MAIN),
806 MT7996_RX_RING_SIZE,
developerc2cfe0f2023-09-22 04:11:09 +0800807@@ -399,8 +520,9 @@ int mt7996_dma_init(struct mt7996_dev *dev)
developer064da3c2023-06-13 15:57:26 +0800808 if (mt7996_band_valid(dev, MT_BAND2)) {
809 /* rx data queue for band2 */
810 rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs;
811- if (mtk_wed_device_active(wed))
812- rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2);
developerc2cfe0f2023-09-22 04:11:09 +0800813+ if (mtk_wed_device_active(wed_ext) && mtk_wed_get_rx_capa(wed_ext))
814+ dev->mt76.q_rx[MT_RXQ_BAND2].flags = MT_WED_Q_RX(0) |
815+ MT_QFLAG_WED_EXT;
816
developer064da3c2023-06-13 15:57:26 +0800817 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2],
818 MT_RXQ_ID(MT_RXQ_BAND2),
developerc2cfe0f2023-09-22 04:11:09 +0800819@@ -425,11 +547,12 @@ int mt7996_dma_init(struct mt7996_dev *dev)
developer064da3c2023-06-13 15:57:26 +0800820 return ret;
821 }
822
823-
824- if (dev->rro_support) {
825+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) &&
826+ dev->rro_support) {
827 /* rx rro data queue for band0 */
828 dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags = MT_RRO_Q_DATA(0);
829 dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags |= MT_QFLAG_MAGIC;
830+ dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags |= MT_WED_Q_RX(0);
831 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0],
832 MT_RXQ_ID(MT_RXQ_RRO_BAND0),
833 MT7996_RX_RING_SIZE,
developerc2cfe0f2023-09-22 04:11:09 +0800834@@ -439,8 +562,7 @@ int mt7996_dma_init(struct mt7996_dev *dev)
developer064da3c2023-06-13 15:57:26 +0800835 return ret;
836
837 /* tx free notify event from WA for band0 */
838- if (mtk_wed_device_active(wed))
839- dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
840+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
841 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
842 MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
843 MT7996_RX_MCU_RING_SIZE,
developerc2cfe0f2023-09-22 04:11:09 +0800844@@ -453,6 +575,7 @@ int mt7996_dma_init(struct mt7996_dev *dev)
developer064da3c2023-06-13 15:57:26 +0800845 /* rx rro data queue for band2 */
846 dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags = MT_RRO_Q_DATA(1);
847 dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags |= MT_QFLAG_MAGIC;
848+ dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags |= MT_WED_Q_RX(1);
849 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2],
850 MT_RXQ_ID(MT_RXQ_RRO_BAND2),
851 MT7996_RX_RING_SIZE,
developerc2cfe0f2023-09-22 04:11:09 +0800852@@ -530,18 +653,18 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
developer064da3c2023-06-13 15:57:26 +0800853
854 /* reset hw queues */
855 for (i = 0; i < __MT_TXQ_MAX; i++) {
856- mt76_queue_reset(dev, dev->mphy.q_tx[i]);
857+ mt76_queue_reset(dev, dev->mphy.q_tx[i], false);
858 if (phy2)
859- mt76_queue_reset(dev, phy2->q_tx[i]);
860+ mt76_queue_reset(dev, phy2->q_tx[i], false);
861 if (phy3)
862- mt76_queue_reset(dev, phy3->q_tx[i]);
863+ mt76_queue_reset(dev, phy3->q_tx[i], false);
864 }
865
866 for (i = 0; i < __MT_MCUQ_MAX; i++)
867- mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
868+ mt76_queue_reset(dev, dev->mt76.q_mcu[i], false);
869
870 mt76_for_each_q_rx(&dev->mt76, i) {
871- mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
872+ mt76_queue_reset(dev, &dev->mt76.q_rx[i], false);
873 }
874
875 mt76_tx_status_check(&dev->mt76, true);
876diff --git a/mt7996/init.c b/mt7996/init.c
developerc2cfe0f2023-09-22 04:11:09 +0800877index f2d43d3dc..3a749475e 100644
developer064da3c2023-06-13 15:57:26 +0800878--- a/mt7996/init.c
879+++ b/mt7996/init.c
developerc2cfe0f2023-09-22 04:11:09 +0800880@@ -502,8 +502,13 @@ void mt7996_mac_init(struct mt7996_dev *dev)
developer064da3c2023-06-13 15:57:26 +0800881
882 /* rro module init */
883 mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE, 2);
884- mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 3);
885- mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 1);
886+ if (dev->rro_support) {
887+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 1);
888+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 0);
889+ } else {
890+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 3);
891+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 1);
892+ }
893
894 mt7996_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET),
895 MCU_WA_PARAM_HW_PATH_HIF_VER,
developerc2cfe0f2023-09-22 04:11:09 +0800896@@ -656,6 +661,114 @@ void mt7996_wfsys_reset(struct mt7996_dev *dev)
developer064da3c2023-06-13 15:57:26 +0800897 msleep(20);
898 }
899
900+static int mt7996_rro_init(struct mt7996_dev *dev)
901+{
902+ struct mt7996_rro_addr *ptr;
903+ struct mt7996_rro_cfg *rro = &dev->rro;
904+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
905+ u32 size, val = 0, reg = MT_RRO_ADDR_ELEM_SEG_ADDR0;
906+ int i, j;
907+ void *buf;
908+
909+ for (i = 0; i < MT7996_RRO_BA_BITMAP_CR_CNT; i++) {
910+ buf = dmam_alloc_coherent(dev->mt76.dma_dev,
911+ MT7996_BA_BITMAP_SZ_PER_CR,
912+ &rro->ba_bitmap_cache_pa[i],
913+ GFP_KERNEL);
914+ if (!buf)
915+ return -ENOMEM;
916+
917+ rro->ba_bitmap_cache_va[i] = buf;
918+ }
919+
920+ rro->win_sz = MT7996_RRO_WIN_SIZE_MAX;
921+ for (i = 0; i < MT7996_RRO_ADDR_ELEM_CR_CNT; i++) {
922+ size = MT7996_RRO_SESSION_PER_CR *
923+ rro->win_sz * sizeof(struct mt7996_rro_addr);
924+
925+ buf = dmam_alloc_coherent(dev->mt76.dma_dev, size,
926+ &rro->addr_elem_alloc_pa[i],
927+ GFP_KERNEL);
928+ if (!buf)
929+ return -ENOMEM;
930+ rro->addr_elem_alloc_va[i] = buf;
931+
932+ memset(rro->addr_elem_alloc_va[i], 0, size);
933+
934+ ptr = rro->addr_elem_alloc_va[i];
935+ for (j = 0; j < MT7996_RRO_SESSION_PER_CR * rro->win_sz; j++, ptr++)
936+ ptr->signature = 0xff;
937+
938+ wed->wlan.ind_cmd.addr_elem_phys[i] = rro->addr_elem_alloc_pa[i];
939+ }
940+
941+ rro->particular_se_id = MT7996_RRO_SESSION_MAX;
942+ size = rro->win_sz * sizeof(struct mt7996_rro_addr);
943+ buf = dmam_alloc_coherent(dev->mt76.dma_dev, size,
944+ &rro->particular_session_pa,
945+ GFP_KERNEL);
946+ if (!buf)
947+ return -ENOMEM;
948+
949+ rro->particular_session_va = buf;
950+ ptr = rro->particular_session_va;
951+ for (j = 0; j < rro->win_sz; j++, ptr++)
952+ ptr->signature = 0xff;
953+
954+ INIT_LIST_HEAD(&rro->pg_addr_cache);
955+ for (i = 0; i < MT7996_RRO_MSDU_PG_HASH_SIZE; i++)
956+ INIT_LIST_HEAD(&rro->pg_hash_head[i]);
957+
958+ /* rro hw init */
959+ /* TODO: remove line after WM has set */
960+ mt76_clear(dev, WF_RRO_AXI_MST_CFG, WF_RRO_AXI_MST_CFG_DIDX_OK);
961+
962+ /* setup BA bitmap cache address */
963+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE0,
964+ rro->ba_bitmap_cache_pa[0]);
965+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE1, 0);
966+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT0,
967+ rro->ba_bitmap_cache_pa[1]);
968+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT1, 0);
969+
970+ /* setup Address element address */
971+ for (i = 0; i < MT7996_RRO_ADDR_ELEM_CR_CNT; i++) {
972+ mt76_wr(dev, reg, rro->addr_elem_alloc_pa[i] >> 4);
973+ reg += 4;
974+ }
975+
976+ /* setup Address element address - separate address segment mode */
977+ mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE1,
978+ MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE);
979+
980+ wed->wlan.ind_cmd.win_size = ffs(rro->win_sz) - 6;
981+ wed->wlan.ind_cmd.particular_sid = rro->particular_se_id;
982+ wed->wlan.ind_cmd.particular_se_phys = rro->particular_session_pa;
983+ wed->wlan.ind_cmd.se_group_nums = MT7996_RRO_ADDR_ELEM_CR_CNT;
984+ wed->wlan.ind_cmd.ack_sn_addr = MT_RRO_ACK_SN_CTRL;
985+
986+ mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0x15010e00);
987+ mt76_set(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1,
988+ MT_RRO_IND_CMD_SIGNATURE_BASE1_EN);
989+
990+ /* particular session configure */
991+ /* use max session idx + 1 as particular session id */
992+ mt76_wr(dev, MT_RRO_PARTICULAR_CFG0,
993+ rro->particular_session_pa);
994+
995+ val = FIELD_PREP(MT_RRO_PARTICULAR_SID,
996+ MT7996_RRO_SESSION_MAX);
997+ val |= MT_RRO_PARTICULAR_CONFG_EN;
998+ mt76_wr(dev, MT_RRO_PARTICULAR_CFG1, val);
999+
1000+ /* interrupt enable */
1001+ mt76_wr(dev, MT_RRO_HOST_INT_ENA,
1002+ MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA);
1003+
1004+ /* rro ind cmd queue init */
1005+ return mt7996_dma_rro_init(dev);
1006+}
1007+
1008 static int mt7996_init_hardware(struct mt7996_dev *dev)
1009 {
1010 int ret, idx;
developerc2cfe0f2023-09-22 04:11:09 +08001011@@ -687,6 +800,13 @@ static int mt7996_init_hardware(struct mt7996_dev *dev)
developer064da3c2023-06-13 15:57:26 +08001012 if (ret)
1013 return ret;
1014
1015+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
1016+ dev->rro_support) {
1017+ ret = mt7996_rro_init(dev);
1018+ if (ret)
1019+ return ret;
1020+ }
1021+
1022 ret = mt7996_eeprom_init(dev);
1023 if (ret < 0)
1024 return ret;
developerc2cfe0f2023-09-22 04:11:09 +08001025@@ -1131,10 +1251,10 @@ int mt7996_register_device(struct mt7996_dev *dev)
1026 ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
1027
1028 if (mtk_wed_device_active(&dev->mt76.mmio.wed_ext)) {
1029- mt76_wr(dev, MT_INT1_MASK_CSR,
1030- dev->mt76.mmio.irqmask|MT_INT_TX_DONE_BAND2);
1031+ mt76_wr(dev, MT_INT_PCIE1_MASK_CSR,
1032+ MT_INT_TRX_DONE_EXT);
1033 mtk_wed_device_start(&dev->mt76.mmio.wed_ext,
1034- dev->mt76.mmio.irqmask |MT_INT_TX_DONE_BAND2);
1035+ MT_INT_TRX_DONE_EXT);
1036 }
1037
1038 dev->recovery.hw_init_done = true;
developer064da3c2023-06-13 15:57:26 +08001039diff --git a/mt7996/mac.c b/mt7996/mac.c
developerc2cfe0f2023-09-22 04:11:09 +08001040index e57bdee21..08a32195b 100644
developer064da3c2023-06-13 15:57:26 +08001041--- a/mt7996/mac.c
1042+++ b/mt7996/mac.c
developerc2cfe0f2023-09-22 04:11:09 +08001043@@ -393,8 +393,37 @@ mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
developer064da3c2023-06-13 15:57:26 +08001044 return 0;
1045 }
1046
1047+static void
1048+mt7996_wed_check_ppe(struct mt7996_dev *dev, struct mt76_queue *q,
1049+ struct mt7996_sta *msta, struct sk_buff *skb,
1050+ u32 info)
1051+{
1052+ struct ieee80211_vif *vif;
1053+ struct wireless_dev *wdev;
1054+
1055+ if (!msta || !msta->vif)
1056+ return;
1057+
1058+ if (!mt76_queue_is_wed_rx(q))
1059+ return;
1060+
1061+ if (!(info & MT_DMA_INFO_PPE_VLD))
1062+ return;
1063+
1064+ vif = container_of((void *)msta->vif, struct ieee80211_vif,
1065+ drv_priv);
1066+ wdev = ieee80211_vif_to_wdev(vif);
1067+ skb->dev = wdev->netdev;
1068+
1069+ mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb,
1070+ FIELD_GET(MT_DMA_PPE_CPU_REASON, info),
1071+ FIELD_GET(MT_DMA_PPE_ENTRY, info));
1072+}
1073+
1074+
1075 static int
1076-mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
1077+mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
1078+ struct sk_buff *skb, u32 *info)
1079 {
1080 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1081 struct mt76_phy *mphy = &dev->mt76.phy;
developerc2cfe0f2023-09-22 04:11:09 +08001082@@ -419,7 +448,10 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
developer064da3c2023-06-13 15:57:26 +08001083 u16 seq_ctrl = 0;
1084 __le16 fc = 0;
1085 int idx;
1086+ u8 hw_aggr = false;
1087+ struct mt7996_sta *msta = NULL;
1088
1089+ hw_aggr = status->aggr;
1090 memset(status, 0, sizeof(*status));
1091
1092 band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1);
developerc2cfe0f2023-09-22 04:11:09 +08001093@@ -446,8 +478,6 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
developer064da3c2023-06-13 15:57:26 +08001094 status->wcid = mt7996_rx_get_wcid(dev, idx, unicast);
1095
1096 if (status->wcid) {
1097- struct mt7996_sta *msta;
1098-
1099 msta = container_of(status->wcid, struct mt7996_sta, wcid);
developerc2cfe0f2023-09-22 04:11:09 +08001100 spin_lock_bh(&dev->mt76.sta_poll_lock);
1101 if (list_empty(&msta->wcid.poll_list))
1102@@ -656,13 +686,15 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
developer064da3c2023-06-13 15:57:26 +08001103 #endif
1104 } else {
1105 status->flag |= RX_FLAG_8023;
1106+ mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
1107+ *info);
1108 }
1109
developerc2cfe0f2023-09-22 04:11:09 +08001110 if (rxv && mode >= MT_PHY_TYPE_HE_SU && mode < MT_PHY_TYPE_EHT_SU &&
1111 !(status->flag & RX_FLAG_8023))
1112 mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
developer064da3c2023-06-13 15:57:26 +08001113
1114- if (!status->wcid || !ieee80211_is_data_qos(fc))
1115+ if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr)
1116 return 0;
1117
1118 status->aggr = unicast &&
developerc2cfe0f2023-09-22 04:11:09 +08001119@@ -1406,7 +1438,7 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
developer064da3c2023-06-13 15:57:26 +08001120 dev_kfree_skb(skb);
1121 break;
1122 case PKT_TYPE_NORMAL:
1123- if (!mt7996_mac_fill_rx(dev, skb)) {
1124+ if (!mt7996_mac_fill_rx(dev, q, skb, info)) {
1125 mt76_rx(&dev->mt76, q, skb);
1126 return;
1127 }
1128diff --git a/mt7996/mcu.c b/mt7996/mcu.c
developerc2cfe0f2023-09-22 04:11:09 +08001129index 5f18de031..2fc22d576 100644
developer064da3c2023-06-13 15:57:26 +08001130--- a/mt7996/mcu.c
1131+++ b/mt7996/mcu.c
developerc2cfe0f2023-09-22 04:11:09 +08001132@@ -1063,7 +1063,7 @@ int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif)
developer064da3c2023-06-13 15:57:26 +08001133 static int
1134 mt7996_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
1135 struct ieee80211_ampdu_params *params,
1136- bool enable, bool tx)
1137+ bool enable, bool tx, bool rro_enable)
1138 {
1139 struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
1140 struct sta_rec_ba_uni *ba;
developerc2cfe0f2023-09-22 04:11:09 +08001141@@ -1084,6 +1084,8 @@ mt7996_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
developer064da3c2023-06-13 15:57:26 +08001142 ba->ba_en = enable << params->tid;
1143 ba->amsdu = params->amsdu;
1144 ba->tid = params->tid;
1145+ if (rro_enable && !tx && enable)
1146+ ba->ba_rdd_rro = true;
1147
1148 return mt76_mcu_skb_send_msg(dev, skb,
1149 MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
developerc2cfe0f2023-09-22 04:11:09 +08001150@@ -1101,7 +1103,7 @@ int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
developer064da3c2023-06-13 15:57:26 +08001151 msta->wcid.amsdu = false;
1152
1153 return mt7996_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
1154- enable, true);
1155+ enable, true, dev->rro_support);
1156 }
1157
1158 int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
developerc2cfe0f2023-09-22 04:11:09 +08001159@@ -1112,7 +1114,7 @@ int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
developer064da3c2023-06-13 15:57:26 +08001160 struct mt7996_vif *mvif = msta->vif;
1161
1162 return mt7996_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
1163- enable, false);
1164+ enable, false, dev->rro_support);
1165 }
1166
1167 static void
1168diff --git a/mt7996/mmio.c b/mt7996/mmio.c
developerc2cfe0f2023-09-22 04:11:09 +08001169index ad2482ef2..1805d892f 100644
developer064da3c2023-06-13 15:57:26 +08001170--- a/mt7996/mmio.c
1171+++ b/mt7996/mmio.c
developerc2cfe0f2023-09-22 04:11:09 +08001172@@ -336,7 +336,8 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
1173
1174 dev->rro_support = true;
1175
1176- hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
1177+ if (dev->hif2)
1178+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
1179
1180 if (hif2)
1181 wed = &dev->mt76.mmio.wed_ext;
1182@@ -369,9 +370,15 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
developer064da3c2023-06-13 15:57:26 +08001183 wed->wlan.txfree_tbit = ffs(MT_INT_RX_DONE_WA_TRI) - 1;
1184 }
1185
1186+ wed->wlan.wpdma_rx_glo = wed->wlan.phy_base + hif1_ofs + MT_WFDMA0_GLO_CFG;
developerc2cfe0f2023-09-22 04:11:09 +08001187+ wed->wlan.wpdma_rx[0] = wed->wlan.phy_base + hif1_ofs +
1188+ MT_RXQ_RING_BASE(MT7996_RXQ_BAND2) +
1189+ MT7996_RXQ_BAND2 * MT_RING_SIZE;
developer064da3c2023-06-13 15:57:26 +08001190+
1191 wed->wlan.chip_id = 0x7991;
1192 wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND2) - 1;
1193 } else {
1194+ wed->wlan.hwrro = dev->rro_support; /* default on */
1195 wed->wlan.wpdma_int = wed->wlan.phy_base + MT_INT_SOURCE_CSR;
1196 wed->wlan.wpdma_mask = wed->wlan.phy_base + MT_INT_MASK_CSR;
1197 wed->wlan.wpdma_tx = wed->wlan.phy_base + MT_TXQ_RING_BASE(0) +
developerc2cfe0f2023-09-22 04:11:09 +08001198@@ -383,13 +390,33 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
1199 MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) +
1200 MT7996_RXQ_BAND0 * MT_RING_SIZE;
developer064da3c2023-06-13 15:57:26 +08001201
1202+ wed->wlan.wpdma_rx_rro[0] = wed->wlan.phy_base +
1203+ MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND0) +
1204+ MT7996_RXQ_RRO_BAND0 * MT_RING_SIZE;
1205+ wed->wlan.wpdma_rx_rro[1] = wed->wlan.phy_base + hif1_ofs +
1206+ MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND2) +
1207+ MT7996_RXQ_RRO_BAND2 * MT_RING_SIZE;
1208+ wed->wlan.wpdma_rx_pg = wed->wlan.phy_base +
1209+ MT_RXQ_RING_BASE(MT7996_RXQ_MSDU_PG_BAND0) +
1210+ MT7996_RXQ_MSDU_PG_BAND0 * MT_RING_SIZE;
1211+
1212 wed->wlan.rx_nbuf = 65536;
1213 wed->wlan.rx_npkt = 24576;
1214+ if (dev->hif2)
1215+ wed->wlan.rx_npkt += 8192;
1216+
1217 wed->wlan.rx_size = SKB_WITH_OVERHEAD(MT_RX_BUF_SIZE);
1218
1219 wed->wlan.rx_tbit[0] = ffs(MT_INT_RX_DONE_BAND0) - 1;
1220 wed->wlan.rx_tbit[1] = ffs(MT_INT_RX_DONE_BAND2) - 1;
1221
1222+ wed->wlan.rro_rx_tbit[0] = ffs(MT_INT_RX_DONE_RRO_BAND0) - 1;
1223+ wed->wlan.rro_rx_tbit[1] = ffs(MT_INT_RX_DONE_RRO_BAND2) - 1;
1224+
1225+ wed->wlan.rx_pg_tbit[0] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND0) - 1;
1226+ wed->wlan.rx_pg_tbit[1] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND1) - 1;
1227+ wed->wlan.rx_pg_tbit[2] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND2) - 1;
1228+
1229 wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND0) - 1;
1230 wed->wlan.tx_tbit[1] = ffs(MT_INT_TX_DONE_BAND1) - 1;
1231 if (dev->rro_support) {
developerc2cfe0f2023-09-22 04:11:09 +08001232@@ -401,6 +428,8 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
developer064da3c2023-06-13 15:57:26 +08001233 wed->wlan.wpdma_txfree = wed->wlan.phy_base + MT_RXQ_RING_BASE(0) +
1234 MT7996_RXQ_MCU_WA_MAIN * MT_RING_SIZE;
1235 }
1236+
1237+ dev->mt76.rx_token_size += wed->wlan.rx_npkt;
1238 }
1239
developerc2cfe0f2023-09-22 04:11:09 +08001240 wed->wlan.nbuf = MT7996_TOKEN_SIZE;
1241@@ -417,8 +446,6 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
developer064da3c2023-06-13 15:57:26 +08001242 wed->wlan.release_rx_buf = mt7996_mmio_wed_release_rx_buf;
1243 wed->wlan.update_wo_rx_stats = NULL;
1244
1245- dev->mt76.rx_token_size += wed->wlan.rx_npkt;
1246-
1247 if (mtk_wed_device_attach(wed))
1248 return 0;
1249
developerc2cfe0f2023-09-22 04:11:09 +08001250@@ -530,12 +557,15 @@ static void mt7996_irq_tasklet(struct tasklet_struct *t)
1251 dev->mt76.mmio.irqmask);
1252 if (intr1 & MT_INT_RX_TXFREE_EXT)
1253 napi_schedule(&dev->mt76.napi[MT_RXQ_TXFREE_BAND2]);
1254+
1255+ if (intr1 & MT_INT_RX_DONE_BAND2_EXT)
1256+ napi_schedule(&dev->mt76.napi[MT_RXQ_BAND2]);
1257 }
1258
1259 if (mtk_wed_device_active(wed)) {
1260 mtk_wed_device_irq_set_mask(wed, 0);
1261 intr = mtk_wed_device_irq_get(wed, dev->mt76.mmio.irqmask);
1262- intr |= (intr1 & ~MT_INT_RX_TXFREE_EXT);
1263+ intr |= (intr1 & ~MT_INT_TRX_DONE_EXT);
1264 } else {
1265 mt76_wr(dev, MT_INT_MASK_CSR, 0);
1266 if (dev->hif2)
1267@@ -581,10 +611,9 @@ static void mt7996_irq_tasklet(struct tasklet_struct *t)
developer064da3c2023-06-13 15:57:26 +08001268 irqreturn_t mt7996_irq_handler(int irq, void *dev_instance)
1269 {
1270 struct mt7996_dev *dev = dev_instance;
1271- struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
1272
1273- if (mtk_wed_device_active(wed))
1274- mtk_wed_device_irq_set_mask(wed, 0);
1275+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
1276+ mtk_wed_device_irq_set_mask(&dev->mt76.mmio.wed, 0);
1277 else
1278 mt76_wr(dev, MT_INT_MASK_CSR, 0);
1279
developerc2cfe0f2023-09-22 04:11:09 +08001280@@ -616,6 +645,7 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
developer064da3c2023-06-13 15:57:26 +08001281 SURVEY_INFO_TIME_RX |
1282 SURVEY_INFO_TIME_BSS_RX,
1283 .token_size = MT7996_TOKEN_SIZE,
1284+ .rx_token_size = MT7996_RX_TOKEN_SIZE,
1285 .tx_prepare_skb = mt7996_tx_prepare_skb,
1286 .tx_complete_skb = mt76_connac_tx_complete_skb,
1287 .rx_skb = mt7996_queue_rx_skb,
1288diff --git a/mt7996/mt7996.h b/mt7996/mt7996.h
developerc2cfe0f2023-09-22 04:11:09 +08001289index d09358305..cf2a66df2 100644
developer064da3c2023-06-13 15:57:26 +08001290--- a/mt7996/mt7996.h
1291+++ b/mt7996/mt7996.h
developerc2cfe0f2023-09-22 04:11:09 +08001292@@ -40,6 +40,7 @@
developer064da3c2023-06-13 15:57:26 +08001293 #define MT7996_EEPROM_SIZE 7680
1294 #define MT7996_EEPROM_BLOCK_SIZE 16
1295 #define MT7996_TOKEN_SIZE 16384
1296+#define MT7996_RX_TOKEN_SIZE 16384
developerc2cfe0f2023-09-22 04:11:09 +08001297 #define MT7996_SW_TOKEN_SIZE 1024
developer064da3c2023-06-13 15:57:26 +08001298
1299 #define MT7996_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
developerc2cfe0f2023-09-22 04:11:09 +08001300@@ -65,6 +66,24 @@
developer064da3c2023-06-13 15:57:26 +08001301 #define MT7996_SKU_RATE_NUM 417
1302 #define MT7996_SKU_PATH_NUM 494
1303
1304+#define MT7996_RRO_MSDU_PG_HASH_SIZE 127
1305+#define MT7996_RRO_SESSION_MAX 1024
1306+#define MT7996_RRO_WIN_SIZE_MAX 1024
1307+#define MT7996_RRO_ADDR_ELEM_CR_CNT 128
1308+#define MT7996_RRO_BA_BITMAP_CR_CNT 2
1309+#define MT7996_RRO_SESSION_PER_CR (MT7996_RRO_SESSION_MAX / \
1310+ MT7996_RRO_ADDR_ELEM_CR_CNT)
1311+#define MT7996_BA_BITMAP_SZ_PER_SESSION 128
1312+#define MT7996_BA_BITMAP_SZ_PER_CR ((MT7996_RRO_SESSION_MAX * \
1313+ MT7996_BA_BITMAP_SZ_PER_SESSION) / \
1314+ MT7996_RRO_BA_BITMAP_CR_CNT)
1315+#define MT7996_SKB_TRUESIZE(x) ((x) + \
1316+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
1317+#define MT7996_RX_BUF_SIZE MT7996_SKB_TRUESIZE(1800)
1318+#define MT7996_RX_MSDU_PAGE_SIZE MT7996_SKB_TRUESIZE(128)
1319+
1320+#define MT7996_WED_RX_TOKEN_SIZE 32768
1321+
1322 struct mt7996_vif;
1323 struct mt7996_sta;
1324 struct mt7996_dfs_pulse;
developerc2cfe0f2023-09-22 04:11:09 +08001325@@ -109,6 +128,16 @@ enum mt7996_rxq_id {
developer064da3c2023-06-13 15:57:26 +08001326 MT7996_RXQ_BAND0 = 4,
1327 MT7996_RXQ_BAND1 = 4,/* unused */
1328 MT7996_RXQ_BAND2 = 5,
1329+ MT7996_RXQ_RRO_BAND0 = 8,
1330+ MT7996_RXQ_RRO_BAND1 = 8,/* unused */
1331+ MT7996_RXQ_RRO_BAND2 = 6,
1332+ MT7996_RXQ_MSDU_PG_BAND0 = 10,
1333+ MT7996_RXQ_MSDU_PG_BAND1 = 11,
1334+ MT7996_RXQ_MSDU_PG_BAND2 = 12,
1335+ MT7996_RXQ_TXFREE0 = 9,
1336+ MT7996_RXQ_TXFREE1 = 9,
1337+ MT7996_RXQ_TXFREE2 = 7,
1338+ MT7996_RXQ_RRO_IND = 0,
1339 };
1340
1341 struct mt7996_twt_flow {
developerc2cfe0f2023-09-22 04:11:09 +08001342@@ -216,6 +245,31 @@ struct mt7996_air_monitor_ctrl {
developer064da3c2023-06-13 15:57:26 +08001343 };
1344 #endif
1345
1346+struct mt7996_rro_addr {
1347+ u32 head_pkt_l;
1348+ u32 head_pkt_h : 4;
1349+ u32 seg_cnt : 11;
1350+ u32 out_of_range: 1;
1351+ u32 rsv : 8;
1352+ u32 signature : 8;
1353+};
1354+
1355+struct mt7996_rro_cfg {
1356+ u32 ind_signature;
1357+ void *ba_bitmap_cache_va[MT7996_RRO_BA_BITMAP_CR_CNT];
1358+ void *addr_elem_alloc_va[MT7996_RRO_ADDR_ELEM_CR_CNT];
1359+ void *particular_session_va;
1360+ u32 particular_se_id;
1361+ dma_addr_t ba_bitmap_cache_pa[MT7996_RRO_BA_BITMAP_CR_CNT];
1362+ dma_addr_t addr_elem_alloc_pa[MT7996_RRO_ADDR_ELEM_CR_CNT];
1363+ dma_addr_t particular_session_pa;
1364+ u16 win_sz;
1365+
1366+ spinlock_t lock;
1367+ struct list_head pg_addr_cache;
1368+ struct list_head pg_hash_head[MT7996_RRO_MSDU_PG_HASH_SIZE];
1369+};
1370+
1371 struct mt7996_phy {
1372 struct mt76_phy *mt76;
1373 struct mt7996_dev *dev;
developerc2cfe0f2023-09-22 04:11:09 +08001374@@ -338,6 +392,9 @@ struct mt7996_dev {
developer064da3c2023-06-13 15:57:26 +08001375 bool flash_mode:1;
1376 bool has_eht:1;
1377
1378+ bool rro_support:1;
1379+ struct mt7996_rro_cfg rro;
1380+
1381 bool testmode_enable;
1382 bool bin_file_mode;
1383 u8 eeprom_mode;
developerc2cfe0f2023-09-22 04:11:09 +08001384@@ -662,6 +719,7 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
developer064da3c2023-06-13 15:57:26 +08001385 struct ieee80211_sta *sta,
1386 struct mt76_tx_info *tx_info);
1387 void mt7996_tx_token_put(struct mt7996_dev *dev);
1388+int mt7996_dma_rro_init(struct mt7996_dev *dev);
1389 void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1390 struct sk_buff *skb, u32 *info);
1391 bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len);
developerc2cfe0f2023-09-22 04:11:09 +08001392diff --git a/mt7996/pci.c b/mt7996/pci.c
1393index 085408571..9a134fcab 100644
1394--- a/mt7996/pci.c
1395+++ b/mt7996/pci.c
1396@@ -124,6 +124,8 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
1397 mdev = &dev->mt76;
1398 mt7996_wfsys_reset(dev);
1399 hif2 = mt7996_pci_init_hif2(pdev);
1400+ if (hif2)
1401+ dev->hif2 = hif2;
1402
1403 ret = mt7996_mmio_wed_init(dev, pdev, false, &irq);
1404 if (ret < 0)
1405@@ -148,7 +150,6 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
1406
1407 if (hif2) {
1408 hif2_dev = container_of(hif2->dev, struct pci_dev, dev);
1409- dev->hif2 = hif2;
1410
1411 ret = mt7996_mmio_wed_init(dev, hif2_dev, true, &irq);
1412 if (ret < 0)
developer064da3c2023-06-13 15:57:26 +08001413diff --git a/mt7996/regs.h b/mt7996/regs.h
developerc2cfe0f2023-09-22 04:11:09 +08001414index ca7c2a811..c34357c3e 100644
developer064da3c2023-06-13 15:57:26 +08001415--- a/mt7996/regs.h
1416+++ b/mt7996/regs.h
1417@@ -39,6 +39,40 @@ enum base_rev {
1418
1419 #define __BASE(_id, _band) (dev->reg.base[(_id)].band_base[(_band)])
1420
1421+
1422+/* RRO TOP */
1423+#define MT_RRO_TOP_BASE 0xA000
1424+#define MT_RRO_TOP(ofs) (MT_RRO_TOP_BASE + (ofs))
1425+
1426+#define MT_RRO_BA_BITMAP_BASE0 MT_RRO_TOP(0x8)
1427+#define MT_RRO_BA_BITMAP_BASE1 MT_RRO_TOP(0xC)
1428+#define WF_RRO_AXI_MST_CFG MT_RRO_TOP(0xB8)
1429+#define WF_RRO_AXI_MST_CFG_DIDX_OK BIT(12)
1430+#define MT_RRO_ADDR_ARRAY_BASE1 MT_RRO_TOP(0x34)
1431+#define MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE BIT(31)
1432+
1433+#define MT_RRO_IND_CMD_SIGNATURE_BASE0 MT_RRO_TOP(0x38)
1434+#define MT_RRO_IND_CMD_SIGNATURE_BASE1 MT_RRO_TOP(0x3C)
1435+#define MT_RRO_IND_CMD_0_CTRL0 MT_RRO_TOP(0x40)
1436+#define MT_RRO_IND_CMD_SIGNATURE_BASE1_EN BIT(31)
1437+
1438+#define MT_RRO_PARTICULAR_CFG0 MT_RRO_TOP(0x5C)
1439+#define MT_RRO_PARTICULAR_CFG1 MT_RRO_TOP(0x60)
1440+#define MT_RRO_PARTICULAR_CONFG_EN BIT(31)
1441+#define MT_RRO_PARTICULAR_SID GENMASK(30, 16)
1442+
1443+#define MT_RRO_BA_BITMAP_BASE_EXT0 MT_RRO_TOP(0x70)
1444+#define MT_RRO_BA_BITMAP_BASE_EXT1 MT_RRO_TOP(0x74)
1445+#define MT_RRO_HOST_INT_ENA MT_RRO_TOP(0x204)
1446+#define MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA BIT(0)
1447+
1448+#define MT_RRO_ADDR_ELEM_SEG_ADDR0 MT_RRO_TOP(0x400)
1449+
1450+#define MT_RRO_ACK_SN_CTRL MT_RRO_TOP(0x50)
1451+#define MT_RRO_ACK_SN_CTRL_SN_MASK GENMASK(27, 16)
1452+#define MT_RRO_ACK_SN_CTRL_SESSION_MASK GENMASK(11, 0)
1453+
1454+
1455 #define MT_MCU_INT_EVENT 0x2108
1456 #define MT_MCU_INT_EVENT_DMA_STOPPED BIT(0)
1457 #define MT_MCU_INT_EVENT_DMA_INIT BIT(1)
developerc2cfe0f2023-09-22 04:11:09 +08001458@@ -407,6 +441,7 @@ enum base_rev {
developer064da3c2023-06-13 15:57:26 +08001459 #define MT_MCUQ_RING_BASE(q) (MT_Q_BASE(q) + 0x300)
1460 #define MT_TXQ_RING_BASE(q) (MT_Q_BASE(__TXQ(q)) + 0x300)
1461 #define MT_RXQ_RING_BASE(q) (MT_Q_BASE(__RXQ(q)) + 0x500)
1462+#define MT_RXQ_RRO_IND_RING_BASE MT_RRO_TOP(0x40)
1463
1464 #define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \
1465 MT_MCUQ_ID(q) * 0x4)
developerc2cfe0f2023-09-22 04:11:09 +08001466@@ -432,8 +467,19 @@ enum base_rev {
1467 #define MT_INT_RX_TXFREE_MAIN BIT(17)
1468 #define MT_INT_RX_TXFREE_TRI BIT(15)
developer064da3c2023-06-13 15:57:26 +08001469 #define MT_INT_MCU_CMD BIT(29)
developerc2cfe0f2023-09-22 04:11:09 +08001470+
1471+#define MT_INT_RX_DONE_BAND2_EXT BIT(23)
developer064da3c2023-06-13 15:57:26 +08001472 #define MT_INT_RX_TXFREE_EXT BIT(26)
1473
1474+#define MT_INT_RX_DONE_RRO_BAND0 BIT(16)
1475+#define MT_INT_RX_DONE_RRO_BAND1 BIT(16)
1476+#define MT_INT_RX_DONE_RRO_BAND2 BIT(14)
1477+#define MT_INT_RX_DONE_RRO_IND BIT(11)
1478+#define MT_INT_RX_DONE_MSDU_PG_BAND0 BIT(18)
1479+#define MT_INT_RX_DONE_MSDU_PG_BAND1 BIT(19)
1480+#define MT_INT_RX_DONE_MSDU_PG_BAND2 BIT(23)
1481+
1482+
1483 #define MT_INT_RX(q) (dev->q_int_mask[__RXQ(q)])
1484 #define MT_INT_TX_MCU(q) (dev->q_int_mask[(q)])
1485
developerc2cfe0f2023-09-22 04:11:09 +08001486@@ -441,20 +487,31 @@ enum base_rev {
developer064da3c2023-06-13 15:57:26 +08001487 MT_INT_RX(MT_RXQ_MCU_WA))
1488
1489 #define MT_INT_BAND0_RX_DONE (MT_INT_RX(MT_RXQ_MAIN) | \
1490- MT_INT_RX(MT_RXQ_MAIN_WA))
1491+ MT_INT_RX(MT_RXQ_MAIN_WA) | \
1492+ MT_INT_RX(MT_RXQ_TXFREE_BAND0))
1493
1494 #define MT_INT_BAND1_RX_DONE (MT_INT_RX(MT_RXQ_BAND1) | \
1495 MT_INT_RX(MT_RXQ_BAND1_WA) | \
1496- MT_INT_RX(MT_RXQ_MAIN_WA))
1497+ MT_INT_RX(MT_RXQ_MAIN_WA) | \
1498+ MT_INT_RX(MT_RXQ_TXFREE_BAND0))
1499
1500 #define MT_INT_BAND2_RX_DONE (MT_INT_RX(MT_RXQ_BAND2) | \
1501 MT_INT_RX(MT_RXQ_BAND2_WA) | \
1502- MT_INT_RX(MT_RXQ_MAIN_WA))
1503+ MT_INT_RX(MT_RXQ_MAIN_WA) | \
1504+ MT_INT_RX(MT_RXQ_TXFREE_BAND0))
1505+
1506+#define MT_INT_RRO_RX_DONE (MT_INT_RX(MT_RXQ_RRO_BAND0) | \
1507+ MT_INT_RX(MT_RXQ_RRO_BAND1) | \
1508+ MT_INT_RX(MT_RXQ_RRO_BAND2) | \
1509+ MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND0) | \
1510+ MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND1) | \
1511+ MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND2))
1512
1513 #define MT_INT_RX_DONE_ALL (MT_INT_RX_DONE_MCU | \
1514 MT_INT_BAND0_RX_DONE | \
1515 MT_INT_BAND1_RX_DONE | \
1516- MT_INT_BAND2_RX_DONE)
1517+ MT_INT_BAND2_RX_DONE | \
1518+ MT_INT_RRO_RX_DONE)
1519
1520 #define MT_INT_TX_DONE_FWDL BIT(26)
1521 #define MT_INT_TX_DONE_MCU_WM BIT(27)
developerc2cfe0f2023-09-22 04:11:09 +08001522@@ -463,6 +520,10 @@ enum base_rev {
1523 #define MT_INT_TX_DONE_BAND1 BIT(31)
1524 #define MT_INT_TX_DONE_BAND2 BIT(15)
1525
1526+#define MT_INT_TRX_DONE_EXT (MT_INT_TX_DONE_BAND2 | \
1527+ MT_INT_RX_DONE_BAND2_EXT | \
1528+ MT_INT_RX_TXFREE_EXT)
1529+
1530 #define MT_INT_TX_DONE_MCU (MT_INT_TX_MCU(MT_MCUQ_WA) | \
1531 MT_INT_TX_MCU(MT_MCUQ_WM) | \
1532 MT_INT_TX_MCU(MT_MCUQ_FWDL))
developer064da3c2023-06-13 15:57:26 +08001533--
developerc2cfe0f2023-09-22 04:11:09 +080015342.39.2
developer064da3c2023-06-13 15:57:26 +08001535