blob: 36576ab25e344a2eff6a21dfbe707f9e73098e72 [file] [log] [blame]
developer1f55fcf2024-10-17 14:52:33 +08001From c43d67fb45091ce36aae1f3096a5b8c6cdd46891 Mon Sep 17 00:00:00 2001
developer05f3b2b2024-08-19 19:17:34 +08002From: Rex Lu <rex.lu@mediatek.com>
3Date: Tue, 6 Aug 2024 10:06:10 +0800
developer1f55fcf2024-10-17 14:52:33 +08004Subject: [PATCH 182/193] mtk: mt76: mt7996: separate hwrro from wed
developer05f3b2b2024-08-19 19:17:34 +08005
61. separate hwrro from wed
72. support mt7996/mt7992 run hwrro 3.0 without wed
8
9Signed-off-by: Rex Lu <rex.lu@mediatek.com>
10---
11 dma.c | 78 ++++++++--
12 dma.h | 6 +-
13 mac80211.c | 5 +
14 mt76.h | 19 ++-
15 mt7996/dma.c | 77 +++++++---
16 mt7996/init.c | 60 ++++----
17 mt7996/mac.c | 388 +++++++++++++++++++++++++++++++++++++++++++++++-
18 mt7996/mmio.c | 2 +
19 mt7996/mt7996.h | 53 +++++++
20 mt7996/pci.c | 4 +
21 mt7996/regs.h | 1 +
22 11 files changed, 617 insertions(+), 76 deletions(-)
23
24diff --git a/dma.c b/dma.c
developer1f55fcf2024-10-17 14:52:33 +080025index 81e7619..7598823 100644
developer05f3b2b2024-08-19 19:17:34 +080026--- a/dma.c
27+++ b/dma.c
28@@ -231,7 +231,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
29 struct mt76_queue_entry *entry = &q->entry[q->head];
30 struct mt76_desc *desc;
31 int idx = q->head;
32- u32 buf1 = 0, ctrl;
33+ u32 buf1 = 0, ctrl, info = 0;
34 int rx_token;
35
36 if (mt76_queue_is_wed_rro_ind(q)) {
37@@ -248,7 +248,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
38 buf1 = FIELD_PREP(MT_DMA_CTL_SDP0_H, buf->addr >> 32);
39 #endif
40
41- if (mt76_queue_is_wed_rx(q)) {
42+ if (mt76_queue_is_wed_rx(q) || mt76_queue_is_wed_rro_data(q)) {
43 if (!rxwi) {
44 rxwi = mt76_get_rxwi(dev);
45 if (!rxwi) {
46@@ -266,12 +266,24 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
47
48 buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
49 ctrl |= MT_DMA_CTL_TO_HOST;
50+ rxwi->qid = q - dev->q_rx;
51+ }
52+
53+ if (mt76_queue_is_wed_rro_msdu_pg(q)) {
54+ if (dev->drv->rx_rro_fill_msdu_pg(dev, q, buf->addr, data))
55+ return -ENOMEM;
56+ }
57+
58+ if (q->flags & MT_QFLAG_WED_RRO_EN) {
59+ info |= FIELD_PREP(MT_DMA_MAGIC_MASK, q->magic_cnt);
60+ if ((q->head + 1) == q->ndesc)
61+ q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_MAGIC_CNT;
62 }
63
64 WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr));
65 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
66 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
67- WRITE_ONCE(desc->info, 0);
68+ WRITE_ONCE(desc->info, cpu_to_le32(info));
69
70 done:
71 entry->dma_addr[0] = buf->addr;
72@@ -433,7 +445,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
73 void *buf = e->buf;
74 int reason;
75
76- if (mt76_queue_is_wed_rro_ind(q))
77+ if (mt76_queue_is_wed_rro(q))
78 goto done;
79
80 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
81@@ -558,15 +570,28 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
82
83 if (mt76_queue_is_wed_rro_data(q) ||
84 mt76_queue_is_wed_rro_msdu_pg(q))
85- return NULL;
86+ goto done;
87+
88+ if (mt76_queue_is_wed_rro_ind(q)) {
89+ struct mt76_wed_rro_ind *cmd;
90
91- if (!mt76_queue_is_wed_rro_ind(q)) {
92+ if (flush)
93+ goto done;
94+
95+ cmd = q->entry[idx].buf;
96+ if (cmd->magic_cnt != q->magic_cnt)
97+ return NULL;
98+
99+ if (q->tail == q->ndesc - 1)
100+ q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_WED_IND_CMD_CNT;
101+ } else {
102 if (flush)
103 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
104 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
105 return NULL;
106 }
107
108+done:
109 q->tail = (q->tail + 1) % q->ndesc;
110 q->queued--;
111
112@@ -750,8 +775,8 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
113 break;
114 }
115
116- qbuf.addr = addr + offset;
117 done:
118+ qbuf.addr = addr + offset;
119 qbuf.len = len - offset;
120 qbuf.skip_unmap = false;
121 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf, NULL) < 0) {
122@@ -856,7 +881,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
123
124 spin_unlock_bh(&q->lock);
125
126- if (mt76_queue_is_wed_rx(q))
127+ if (mt76_queue_is_wed_rx(q) || mt76_queue_is_wed_rro(q))
128 return;
129
130 if (!q->rx_page.va)
131@@ -934,8 +959,9 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
132 bool allow_direct = !mt76_queue_is_wed_rx(q);
133 bool more;
134
135- if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
136- mt76_queue_is_wed_tx_free(q)) {
137+ if ((q->flags & MT_QFLAG_WED_RRO_EN) ||
138+ (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
139+ mt76_queue_is_wed_tx_free(q))) {
140 dma_idx = Q_READ(q, dma_idx);
141 check_ddone = true;
142 }
143@@ -957,6 +983,14 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
144 if (!data)
145 break;
146
147+ if (mt76_queue_is_wed_rro_ind(q) && dev->drv->rx_rro_ind_process)
148+ dev->drv->rx_rro_ind_process(dev, data);
149+
150+ if (mt76_queue_is_wed_rro(q)) {
151+ done++;
152+ continue;
153+ }
154+
155 if (drop || (len == 0))
156 goto free_frag;
157
158@@ -1037,11 +1071,18 @@ int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
159 EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
160
161 static int
162-mt76_dma_init(struct mt76_dev *dev,
163+__mt76_dma_init(struct mt76_dev *dev, enum mt76_rxq_id qid,
164 int (*poll)(struct napi_struct *napi, int budget))
165 {
166 int i;
167
168+ if (qid < __MT_RXQ_MAX && dev->q_rx[qid].ndesc) {
169+ netif_napi_add(&dev->napi_dev, &dev->napi[qid], poll);
170+ mt76_dma_rx_fill(dev, &dev->q_rx[qid], false);
171+ napi_enable(&dev->napi[qid]);
172+ return 0;
173+ }
174+
175 init_dummy_netdev(&dev->napi_dev);
176 init_dummy_netdev(&dev->tx_napi_dev);
177 snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
178@@ -1063,6 +1104,20 @@ mt76_dma_init(struct mt76_dev *dev,
179 return 0;
180 }
181
182+static int
183+mt76_dma_rx_queue_init(struct mt76_dev *dev, enum mt76_rxq_id qid,
184+ int (*poll)(struct napi_struct *napi, int budget))
185+{
186+ return __mt76_dma_init(dev, qid, poll);
187+}
188+
189+static int
190+mt76_dma_init(struct mt76_dev *dev,
191+ int (*poll)(struct napi_struct *napi, int budget))
192+{
193+ return __mt76_dma_init(dev, __MT_RXQ_MAX, poll);
194+}
195+
196 static const struct mt76_queue_ops mt76_dma_ops = {
197 .init = mt76_dma_init,
198 .alloc = mt76_dma_alloc_queue,
199@@ -1070,6 +1125,7 @@ static const struct mt76_queue_ops mt76_dma_ops = {
200 .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
201 .tx_queue_skb = mt76_dma_tx_queue_skb,
202 .tx_cleanup = mt76_dma_tx_cleanup,
203+ .rx_init = mt76_dma_rx_queue_init,
204 .rx_cleanup = mt76_dma_rx_cleanup,
205 .rx_reset = mt76_dma_rx_reset,
206 .kick = mt76_dma_kick_queue,
207diff --git a/dma.h b/dma.h
developer1f55fcf2024-10-17 14:52:33 +0800208index 718122d..393be98 100644
developer05f3b2b2024-08-19 19:17:34 +0800209--- a/dma.h
210+++ b/dma.h
211@@ -31,8 +31,12 @@
212 #define MT_DMA_CTL_PN_CHK_FAIL BIT(13)
213 #define MT_DMA_CTL_VER_MASK BIT(7)
214
215-#define MT_DMA_RRO_EN BIT(13)
216+#define MT_DMA_SDP0 GENMASK(15, 0)
217+#define MT_DMA_TOKEN_ID GENMASK(31, 16)
218+#define MT_DMA_MAGIC_MASK GENMASK(31, 28)
219+#define MT_DMA_RRO_EN BIT(13)
220
221+#define MT_DMA_MAGIC_CNT 16
222 #define MT_DMA_WED_IND_CMD_CNT 8
223 #define MT_DMA_WED_IND_REASON GENMASK(15, 12)
224
225diff --git a/mac80211.c b/mac80211.c
developer1f55fcf2024-10-17 14:52:33 +0800226index 6190822..d87aba3 100644
developer05f3b2b2024-08-19 19:17:34 +0800227--- a/mac80211.c
228+++ b/mac80211.c
229@@ -732,6 +732,7 @@ static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
230 struct sk_buff *skb = phy->rx_amsdu[q].head;
231 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
232 struct mt76_dev *dev = phy->dev;
233+ struct mt76_queue *rxq = &dev->q_rx[q];
234
235 phy->rx_amsdu[q].head = NULL;
236 phy->rx_amsdu[q].tail = NULL;
237@@ -763,6 +764,10 @@ static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
238 return;
239 }
240 }
241+
242+ if (mt76_queue_is_wed_rro_data(rxq))
243+ q = MT_RXQ_RRO_IND;
244+
245 __skb_queue_tail(&dev->rx_skb[q], skb);
246 }
247
248diff --git a/mt76.h b/mt76.h
developer1f55fcf2024-10-17 14:52:33 +0800249index 5d7b8c4..50d5277 100644
developer05f3b2b2024-08-19 19:17:34 +0800250--- a/mt76.h
251+++ b/mt76.h
developerd0c89452024-10-11 16:53:27 +0800252@@ -277,6 +277,7 @@ struct mt76_queue {
developer05f3b2b2024-08-19 19:17:34 +0800253
254 u8 buf_offset;
255 u16 flags;
256+ u8 magic_cnt;
257
258 struct mtk_wed_device *wed;
259 u32 wed_regs;
developerd0c89452024-10-11 16:53:27 +0800260@@ -333,6 +334,9 @@ struct mt76_queue_ops {
developer05f3b2b2024-08-19 19:17:34 +0800261 void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q,
262 bool flush);
263
264+ int (*rx_init)(struct mt76_dev *dev, enum mt76_rxq_id qid,
265+ int (*poll)(struct napi_struct *napi, int budget));
266+
267 void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q);
268
269 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
developerd0c89452024-10-11 16:53:27 +0800270@@ -478,6 +482,7 @@ struct mt76_rxwi_cache {
developer05f3b2b2024-08-19 19:17:34 +0800271 dma_addr_t dma_addr;
272
273 void *ptr;
274+ u8 qid;
275 };
276
277 struct mt76_rx_tid {
developerd0c89452024-10-11 16:53:27 +0800278@@ -591,6 +596,10 @@ struct mt76_driver_ops {
developer05f3b2b2024-08-19 19:17:34 +0800279 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
280 struct sk_buff *skb, u32 *info);
281
282+ void (*rx_rro_ind_process)(struct mt76_dev *dev, void *data);
283+ int (*rx_rro_fill_msdu_pg)(struct mt76_dev *dev, struct mt76_queue *q,
284+ dma_addr_t p, void *data);
285+
286 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
287
288 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
developerd0c89452024-10-11 16:53:27 +0800289@@ -1321,6 +1330,7 @@ static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q,
developer05f3b2b2024-08-19 19:17:34 +0800290 #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mphy), __VA_ARGS__)
291 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
292 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
293+#define mt76_queue_rx_init(dev, ...) (dev)->mt76.queue_ops->rx_init(&((dev)->mt76), __VA_ARGS__)
294 #define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__)
295 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
296 #define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__)
developerd0c89452024-10-11 16:53:27 +0800297@@ -1887,13 +1897,8 @@ static inline bool mt76_queue_is_wed_rro_msdu_pg(struct mt76_queue *q)
developer05f3b2b2024-08-19 19:17:34 +0800298
299 static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q)
300 {
301- if (!(q->flags & MT_QFLAG_WED))
302- return false;
303-
304- return FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX ||
305- mt76_queue_is_wed_rro_ind(q) || mt76_queue_is_wed_rro_data(q) ||
306- mt76_queue_is_wed_rro_msdu_pg(q);
307-
308+ return (q->flags & MT_QFLAG_WED) &&
309+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX;
310 }
311
312 struct mt76_txwi_cache *
313diff --git a/mt7996/dma.c b/mt7996/dma.c
developer1f55fcf2024-10-17 14:52:33 +0800314index bbc3814..e4b0af2 100644
developer05f3b2b2024-08-19 19:17:34 +0800315--- a/mt7996/dma.c
316+++ b/mt7996/dma.c
317@@ -300,7 +300,7 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
318 }
319
320 if (mt7996_band_valid(dev, MT_BAND2))
321- irq_mask |= MT_INT_BAND2_RX_DONE;
322+ irq_mask |= MT_INT_BAND2_RX_DONE | MT_INT_TX_RX_DONE_EXT;
323
324 if (mtk_wed_device_active(wed) && wed_reset) {
325 u32 wed_irq_mask = irq_mask;
326@@ -465,7 +465,6 @@ static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
327 mt7996_dma_start(dev, reset, true);
328 }
329
330-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
331 int mt7996_dma_rro_init(struct mt7996_dev *dev)
332 {
333 struct mt76_dev *mdev = &dev->mt76;
334@@ -474,7 +473,9 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
335
336 /* ind cmd */
337 mdev->q_rx[MT_RXQ_RRO_IND].flags = MT_WED_RRO_Q_IND;
338- mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed;
339+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
340+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
341+ mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed;
342 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_IND],
343 MT_RXQ_ID(MT_RXQ_RRO_IND),
344 MT7996_RX_RING_SIZE,
345@@ -485,7 +486,9 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
346 /* rx msdu page queue for band0 */
347 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags =
348 MT_WED_RRO_Q_MSDU_PG(0) | MT_QFLAG_WED_RRO_EN;
349- mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed;
350+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
351+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
352+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed;
353 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0],
354 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0),
355 MT7996_RX_RING_SIZE,
356@@ -498,7 +501,9 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
357 /* rx msdu page queue for band1 */
358 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags =
359 MT_WED_RRO_Q_MSDU_PG(1) | MT_QFLAG_WED_RRO_EN;
360- mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed;
361+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
362+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
363+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed;
364 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1],
365 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1),
366 MT7996_RX_RING_SIZE,
367@@ -512,7 +517,9 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
368 /* rx msdu page queue for band2 */
369 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags =
370 MT_WED_RRO_Q_MSDU_PG(2) | MT_QFLAG_WED_RRO_EN;
371- mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed;
372+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
373+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
374+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed;
375 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2],
376 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2),
377 MT7996_RX_RING_SIZE,
378@@ -522,15 +529,37 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
379 return ret;
380 }
381
382- irq_mask = mdev->mmio.irqmask | MT_INT_RRO_RX_DONE |
383- MT_INT_TX_DONE_BAND2;
384- mt76_wr(dev, MT_INT_MASK_CSR, irq_mask);
385- mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false);
386- mt7996_irq_enable(dev, irq_mask);
387+
388+
389+ if (mtk_wed_device_active(&mdev->mmio.wed)) {
390+ irq_mask = mdev->mmio.irqmask | MT_INT_RRO_RX_DONE |
391+ MT_INT_TX_DONE_BAND2;
392+
393+ if (mtk_wed_get_rx_capa(&mdev->mmio.wed))
394+ irq_mask &= ~MT_INT_RX_DONE_RRO_IND;
395+
396+ mt76_wr(dev, MT_INT_MASK_CSR, irq_mask);
397+ mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false);
398+ mt7996_irq_enable(dev, irq_mask);
399+ } else {
400+ if (is_mt7996(&dev->mt76)) {
401+ mt76_queue_rx_init(dev, MT_RXQ_TXFREE_BAND0, mt76_dma_rx_poll);
402+ mt76_queue_rx_init(dev, MT_RXQ_TXFREE_BAND2, mt76_dma_rx_poll);
403+ mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND1, mt76_dma_rx_poll);
404+ mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND2, mt76_dma_rx_poll);
405+ }
406+ mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND0, mt76_dma_rx_poll);
407+ mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND1, mt76_dma_rx_poll);
408+ if (mt7996_band_valid(dev, MT_BAND2))
409+ mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND2, mt76_dma_rx_poll);
410+ mt76_queue_rx_init(dev, MT_RXQ_RRO_IND, mt76_dma_rx_poll);
411+ mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND0, mt76_dma_rx_poll);
412+
413+ mt7996_irq_enable(dev, MT_INT_RRO_RX_DONE);
414+ }
415
416 return 0;
417 }
418-#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
419
420 int mt7996_dma_init(struct mt7996_dev *dev)
421 {
422@@ -691,12 +720,12 @@ int mt7996_dma_init(struct mt7996_dev *dev)
423 return ret;
424 }
425
426- if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) &&
427- dev->has_rro) {
428+ if (dev->has_rro) {
429 /* rx rro data queue for band0 */
430 dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags =
431 MT_WED_RRO_Q_DATA(0) | MT_QFLAG_WED_RRO_EN;
432- dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed;
433+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
434+ dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed;
435 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0],
436 MT_RXQ_ID(MT_RXQ_RRO_BAND0),
437 MT7996_RX_RING_SIZE,
438@@ -708,7 +737,8 @@ int mt7996_dma_init(struct mt7996_dev *dev)
439 if (is_mt7992(&dev->mt76)) {
440 dev->mt76.q_rx[MT_RXQ_RRO_BAND1].flags =
441 MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
442- dev->mt76.q_rx[MT_RXQ_RRO_BAND1].wed = wed;
443+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
444+ dev->mt76.q_rx[MT_RXQ_RRO_BAND1].wed = wed;
445 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND1],
446 MT_RXQ_ID(MT_RXQ_RRO_BAND1),
447 MT7996_RX_RING_SIZE,
448@@ -718,9 +748,10 @@ int mt7996_dma_init(struct mt7996_dev *dev)
449 return ret;
450 } else {
451 /* tx free notify event from WA for band0 */
452- dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
453- dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
454-
455+ if (mtk_wed_device_active(wed)) {
456+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
457+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
458+ }
459 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
460 MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
461 MT7996_RX_MCU_RING_SIZE,
462@@ -734,7 +765,8 @@ int mt7996_dma_init(struct mt7996_dev *dev)
463 /* rx rro data queue for band2 */
464 dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags =
465 MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
466- dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed;
467+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
468+ dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed;
469 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2],
470 MT_RXQ_ID(MT_RXQ_RRO_BAND2),
471 MT7996_RX_RING_SIZE,
472@@ -811,6 +843,11 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
473 dev_info(dev->mt76.dev,"%s L1 SER rx queue clean up done.",
474 wiphy_name(dev->mt76.hw->wiphy));
475
476+ if (dev->has_rro && !mtk_wed_device_active(&dev->mt76.mmio.wed)) {
477+ mt7996_rro_msdu_pg_free(dev);
478+ mt7996_rx_token_put(dev);
479+ }
480+
481 mt76_tx_status_check(&dev->mt76, true);
482
483 if (!force)
484diff --git a/mt7996/init.c b/mt7996/init.c
developer1f55fcf2024-10-17 14:52:33 +0800485index e9e2ca1..9f80143 100644
developer05f3b2b2024-08-19 19:17:34 +0800486--- a/mt7996/init.c
487+++ b/mt7996/init.c
developer1f55fcf2024-10-17 14:52:33 +0800488@@ -928,7 +928,6 @@ void mt7996_wfsys_reset(struct mt7996_dev *dev)
developer05f3b2b2024-08-19 19:17:34 +0800489
490 void mt7996_rro_hw_init(struct mt7996_dev *dev)
491 {
492-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
493 struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
494 u32 reg = MT_RRO_ADDR_ELEM_SEG_ADDR0;
495 int i;
developer1f55fcf2024-10-17 14:52:33 +0800496@@ -936,6 +935,10 @@ void mt7996_rro_hw_init(struct mt7996_dev *dev)
developer05f3b2b2024-08-19 19:17:34 +0800497 if (!dev->has_rro)
498 return;
499
500+ INIT_LIST_HEAD(&dev->wed_rro.pg_addr_cache);
501+ for (i = 0; i < MT7996_RRO_MSDU_PG_HASH_SIZE; i++)
502+ INIT_LIST_HEAD(&dev->wed_rro.pg_hash_head[i]);
503+
504 if (is_mt7992(&dev->mt76)) {
505 /* set emul 3.0 function */
506 mt76_wr(dev, MT_RRO_3_0_EMU_CONF,
developer1f55fcf2024-10-17 14:52:33 +0800507@@ -944,9 +947,6 @@ void mt7996_rro_hw_init(struct mt7996_dev *dev)
developer05f3b2b2024-08-19 19:17:34 +0800508 mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE0,
509 dev->wed_rro.addr_elem[0].phy_addr);
510 } else {
511- INIT_LIST_HEAD(&dev->wed_rro.pg_addr_cache);
512- for (i = 0; i < MT7996_RRO_MSDU_PG_HASH_SIZE; i++)
513- INIT_LIST_HEAD(&dev->wed_rro.pg_hash_head[i]);
514
515 /* TODO: remove line after WM has set */
516 mt76_clear(dev, WF_RRO_AXI_MST_CFG, WF_RRO_AXI_MST_CFG_DIDX_OK);
developer1f55fcf2024-10-17 14:52:33 +0800517@@ -969,18 +969,24 @@ void mt7996_rro_hw_init(struct mt7996_dev *dev)
developer05f3b2b2024-08-19 19:17:34 +0800518 mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE1,
519 MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE);
520 }
521- wed->wlan.ind_cmd.win_size = ffs(MT7996_RRO_WINDOW_MAX_LEN) - 6;
522- if (is_mt7996(&dev->mt76))
523- wed->wlan.ind_cmd.particular_sid = MT7996_RRO_MAX_SESSION;
524- else
525- wed->wlan.ind_cmd.particular_sid = 1;
526- wed->wlan.ind_cmd.particular_se_phys = dev->wed_rro.session.phy_addr;
527- wed->wlan.ind_cmd.se_group_nums = MT7996_RRO_ADDR_ELEM_LEN;
528- wed->wlan.ind_cmd.ack_sn_addr = MT_RRO_ACK_SN_CTRL;
529
530- mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0x15010e00);
531- mt76_set(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1,
532- MT_RRO_IND_CMD_SIGNATURE_BASE1_EN);
533+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
534+ wed->wlan.ind_cmd.win_size = ffs(MT7996_RRO_WINDOW_MAX_LEN) - 6;
535+ if (is_mt7996(&dev->mt76))
536+ wed->wlan.ind_cmd.particular_sid = MT7996_RRO_MAX_SESSION;
537+ else
538+ wed->wlan.ind_cmd.particular_sid = 1;
539+ wed->wlan.ind_cmd.particular_se_phys = dev->wed_rro.session.phy_addr;
540+ wed->wlan.ind_cmd.se_group_nums = MT7996_RRO_ADDR_ELEM_LEN;
541+ wed->wlan.ind_cmd.ack_sn_addr = MT_RRO_ACK_SN_CTRL;
542+
543+ mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0x15010e00);
544+ mt76_set(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1,
545+ MT_RRO_IND_CMD_SIGNATURE_BASE1_EN);
546+ } else {
547+ mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0);
548+ mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1, 0);
549+ }
550
551 /* particular session configure */
552 /* use max session idx + 1 as particular session id */
developer1f55fcf2024-10-17 14:52:33 +0800553@@ -1009,12 +1015,10 @@ void mt7996_rro_hw_init(struct mt7996_dev *dev)
developer05f3b2b2024-08-19 19:17:34 +0800554 mt76_wr(dev, MT_RRO_HOST_INT_ENA,
555 MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA);
556
557-#endif
558 }
559
560 static int mt7996_wed_rro_init(struct mt7996_dev *dev)
561 {
562-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
563 struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
564 struct mt7996_wed_rro_addr *addr;
565 void *ptr;
developer1f55fcf2024-10-17 14:52:33 +0800566@@ -1023,9 +1027,6 @@ static int mt7996_wed_rro_init(struct mt7996_dev *dev)
developer05f3b2b2024-08-19 19:17:34 +0800567 if (!dev->has_rro)
568 return 0;
569
570- if (!mtk_wed_device_active(wed))
571- return 0;
572-
573 for (i = 0; i < ARRAY_SIZE(dev->wed_rro.ba_bitmap); i++) {
574 ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
575 MT7996_RRO_BA_BITMAP_CR_SIZE,
developer1f55fcf2024-10-17 14:52:33 +0800576@@ -1056,9 +1057,8 @@ static int mt7996_wed_rro_init(struct mt7996_dev *dev)
developer05f3b2b2024-08-19 19:17:34 +0800577 addr->signature = 0xff;
578 addr++;
579 }
580-
581- wed->wlan.ind_cmd.addr_elem_phys[i] =
582- dev->wed_rro.addr_elem[i].phy_addr;
583+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
584+ wed->wlan.ind_cmd.addr_elem_phys[i] = dev->wed_rro.addr_elem[i].phy_addr;
585 }
586
587 for (i = 0; i < MT7996_RRO_MSDU_PG_CR_CNT; i++) {
developer1f55fcf2024-10-17 14:52:33 +0800588@@ -1090,22 +1090,15 @@ static int mt7996_wed_rro_init(struct mt7996_dev *dev)
developer05f3b2b2024-08-19 19:17:34 +0800589 mt7996_rro_hw_init(dev);
590
591 return mt7996_dma_rro_init(dev);
592-#else
593- return 0;
594-#endif
595 }
596
597 static void mt7996_wed_rro_free(struct mt7996_dev *dev)
598 {
599-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
600 int i;
601
602 if (!dev->has_rro)
603 return;
604
605- if (!mtk_wed_device_active(&dev->mt76.mmio.wed))
606- return;
607-
608 for (i = 0; i < ARRAY_SIZE(dev->wed_rro.ba_bitmap); i++) {
609 if (!dev->wed_rro.ba_bitmap[i].ptr)
610 continue;
developer1f55fcf2024-10-17 14:52:33 +0800611@@ -1145,12 +1138,10 @@ static void mt7996_wed_rro_free(struct mt7996_dev *dev)
developer05f3b2b2024-08-19 19:17:34 +0800612 sizeof(struct mt7996_wed_rro_addr),
613 dev->wed_rro.session.ptr,
614 dev->wed_rro.session.phy_addr);
615-#endif
616 }
617
618 static void mt7996_wed_rro_work(struct work_struct *work)
619 {
620-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
621 struct mt7996_dev *dev;
622 LIST_HEAD(list);
623
developer1f55fcf2024-10-17 14:52:33 +0800624@@ -1193,7 +1184,6 @@ reset:
developer05f3b2b2024-08-19 19:17:34 +0800625 out:
626 kfree(e);
627 }
628-#endif
629 }
630
developerd0c89452024-10-11 16:53:27 +0800631 static int mt7996_variant_type_init(struct mt7996_dev *dev)
developer1f55fcf2024-10-17 14:52:33 +0800632@@ -1845,6 +1835,10 @@ void mt7996_unregister_device(struct mt7996_dev *dev)
developer05f3b2b2024-08-19 19:17:34 +0800633 mt7996_mcu_exit(dev);
634 mt7996_tx_token_put(dev);
635 mt7996_dma_cleanup(dev);
636+ if (dev->has_rro && !mtk_wed_device_active(&dev->mt76.mmio.wed)) {
637+ mt7996_rro_msdu_pg_free(dev);
638+ mt7996_rx_token_put(dev);
639+ }
640 tasklet_disable(&dev->mt76.irq_tasklet);
641
642 mt76_free_device(&dev->mt76);
643diff --git a/mt7996/mac.c b/mt7996/mac.c
developer1f55fcf2024-10-17 14:52:33 +0800644index 58b462e..b0b664e 100644
developer05f3b2b2024-08-19 19:17:34 +0800645--- a/mt7996/mac.c
646+++ b/mt7996/mac.c
developerd0c89452024-10-11 16:53:27 +0800647@@ -1517,6 +1517,387 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
developer05f3b2b2024-08-19 19:17:34 +0800648 }
649 }
650
651+static struct mt7996_msdu_pg_addr *
652+mt7996_alloc_pg_addr(struct mt7996_dev *dev)
653+{
654+ struct mt7996_msdu_pg_addr *p;
655+ int size;
656+
657+ size = L1_CACHE_ALIGN(sizeof(*p));
658+ p = kzalloc(size, GFP_ATOMIC);
659+ if (!p)
660+ return NULL;
661+
662+ INIT_LIST_HEAD(&p->list);
663+
664+ return p;
665+}
666+
667+static struct mt7996_msdu_pg_addr *
668+__mt7996_get_pg_addr(struct mt7996_dev *dev)
669+{
670+ struct mt7996_msdu_pg_addr *p = NULL;
671+
672+ spin_lock(&dev->wed_rro.lock);
673+ if (!list_empty(&dev->wed_rro.pg_addr_cache)) {
674+ p = list_first_entry(&dev->wed_rro.pg_addr_cache,
675+ struct mt7996_msdu_pg_addr,
676+ list);
677+ if (p)
678+ list_del(&p->list);
679+ }
680+ spin_unlock(&dev->wed_rro.lock);
681+
682+ return p;
683+}
684+
685+struct mt7996_msdu_pg_addr *
686+mt7996_get_pg_addr(struct mt7996_dev *dev)
687+{
688+ struct mt7996_msdu_pg_addr *p = __mt7996_get_pg_addr(dev);
689+
690+ if (p)
691+ return p;
692+
693+ return mt7996_alloc_pg_addr(dev);
694+}
695+
696+static void
697+mt7996_put_pg_addr(struct mt7996_dev *dev,
698+ struct mt7996_msdu_pg_addr *p)
699+{
700+ if (!p)
701+ return;
702+
703+ if (p->buf) {
704+ skb_free_frag(p->buf);
705+ p->buf = NULL;
706+ }
707+
708+ spin_lock(&dev->wed_rro.lock);
709+ list_add(&p->list, &dev->wed_rro.pg_addr_cache);
710+ spin_unlock(&dev->wed_rro.lock);
711+}
712+
713+static void
714+mt7996_free_pg_addr(struct mt7996_dev *dev)
715+{
716+ struct mt7996_msdu_pg_addr *pg_addr;
717+
718+ local_bh_disable();
719+ while ((pg_addr = __mt7996_get_pg_addr(dev)) != NULL) {
720+ if (pg_addr->buf) {
721+ skb_free_frag(pg_addr->buf);
722+ pg_addr->buf = NULL;
723+ }
724+ kfree(pg_addr);
725+ }
726+ local_bh_enable();
727+}
728+
729+static u32
730+mt7996_rro_msdu_pg_hash(dma_addr_t pa)
731+{
732+ u32 sum = 0;
733+ u16 i = 0;
734+
735+ while (pa != 0) {
736+ sum += (u32) ((pa & 0xff) + i) % MT7996_RRO_MSDU_PG_HASH_SIZE;
737+ pa >>= 8;
738+ i += 13;
739+ }
740+
741+ return sum % MT7996_RRO_MSDU_PG_HASH_SIZE;
742+}
743+
744+static struct mt7996_msdu_pg_addr *
745+mt7996_rro_msdu_pg_search(struct mt7996_dev *dev, dma_addr_t pa)
746+{
747+ struct mt7996_msdu_pg_addr *pg_addr, *tmp;
748+ u32 hash_idx = mt7996_rro_msdu_pg_hash(pa);
749+ struct list_head *head;
750+ u8 found = 0;
751+
752+ spin_lock(&dev->wed_rro.lock);
753+ head = &dev->wed_rro.pg_hash_head[hash_idx];
754+ list_for_each_entry_safe(pg_addr, tmp, head, list) {
755+ if (pg_addr->dma_addr == pa) {
756+ list_del(&pg_addr->list);
757+ found = 1;
758+ break;
759+ }
760+ }
761+ spin_unlock(&dev->wed_rro.lock);
762+
763+ return (found == 1) ? pg_addr : NULL;
764+}
765+
766+void mt7996_rro_msdu_pg_free(struct mt7996_dev *dev)
767+{
768+ struct mt7996_msdu_pg_addr *pg_addr, *tmp;
769+ struct list_head *head;
770+ u32 i;
771+
772+ local_bh_disable();
773+ for (i = 0; i < MT7996_RRO_MSDU_PG_HASH_SIZE; i++) {
774+ head = &dev->wed_rro.pg_hash_head[i];
775+ list_for_each_entry_safe(pg_addr, tmp, head, list) {
776+ list_del_init(&pg_addr->list);
777+ dma_unmap_single(dev->mt76.dma_dev, pg_addr->dma_addr,
778+ SKB_WITH_OVERHEAD(pg_addr->q->buf_size),
779+ DMA_FROM_DEVICE);
780+ if (pg_addr->buf) {
781+ skb_free_frag(pg_addr->buf);
782+ pg_addr->buf = NULL;
783+ }
784+ kfree(pg_addr);
785+ }
786+ }
787+ local_bh_enable();
788+
789+ mt7996_free_pg_addr(dev);
790+
791+ mt76_for_each_q_rx(&dev->mt76, i) {
792+ struct mt76_queue *q = &dev->mt76.q_rx[i];
793+ struct page *page;
794+
795+ if (mt76_queue_is_wed_rro_msdu_pg(q)) {
796+ if (!q->rx_page.va)
797+ continue;
798+
799+ page = virt_to_page(q->rx_page.va);
800+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
801+ memset(&q->rx_page, 0, sizeof(q->rx_page));
802+ }
803+ }
804+}
805+
806+void mt7996_rx_token_put(struct mt7996_dev *dev)
807+{
808+ struct mt76_queue *q;
809+ struct page *page;
810+ int i;
811+
812+ for (i = 0; i < dev->mt76.rx_token_size; i++) {
813+ struct mt76_rxwi_cache *r;
814+
815+ r = mt76_rx_token_release(&dev->mt76, i);
816+ if (!r || !r->ptr)
817+ continue;
818+
819+ q = &dev->mt76.q_rx[r->qid];
820+ dma_unmap_single(dev->mt76.dma_dev, r->dma_addr,
821+ SKB_WITH_OVERHEAD(q->buf_size),
822+ DMA_FROM_DEVICE);
823+ skb_free_frag(r->ptr);
824+ r->dma_addr = 0;
825+ r->ptr = NULL;
826+
827+ mt76_put_rxwi(&dev->mt76, r);
828+ }
829+
830+ mt76_for_each_q_rx(&dev->mt76, i) {
831+ struct mt76_queue *q = &dev->mt76.q_rx[i];
832+
833+ if (mt76_queue_is_wed_rro_data(q)) {
834+ if (!q->rx_page.va)
835+ continue;
836+
837+ page = virt_to_page(q->rx_page.va);
838+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
839+ memset(&q->rx_page, 0, sizeof(q->rx_page));
840+ }
841+ }
842+
843+ mt76_free_pending_rxwi(&dev->mt76);
844+}
845+
846+int mt7996_rro_fill_msdu_page(struct mt76_dev *mdev, struct mt76_queue *q,
847+ dma_addr_t p, void *data)
848+{
849+ struct mt7996_msdu_pg_addr *pg_addr;
850+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
851+ struct mt7996_msdu_pg *pg = data;
852+ u32 hash_idx;
853+
854+ pg->owner = 1;
855+ pg_addr = mt7996_get_pg_addr(dev);
856+ if (!pg_addr)
857+ return -ENOMEM;
858+
859+ pg_addr->buf = data;
860+ pg_addr->dma_addr = p;
861+ pg_addr->q = q;
862+ hash_idx = mt7996_rro_msdu_pg_hash(pg_addr->dma_addr);
863+
864+ spin_lock(&dev->wed_rro.lock);
865+ list_add_tail(&pg_addr->list,
866+ &dev->wed_rro.pg_hash_head[hash_idx]);
867+ spin_unlock(&dev->wed_rro.lock);
868+
869+ return 0;
870+}
871+
872+static struct mt7996_wed_rro_addr *
873+mt7996_rro_get_addr_elem(struct mt7996_dev *dev, u16 seid, u16 sn)
874+{
875+ u32 idx;
876+ void *addr;
877+
878+ if (seid == MT7996_RRO_MAX_SESSION) {
879+ addr = dev->wed_rro.session.ptr;
880+ idx = sn % MT7996_RRO_WINDOW_MAX_LEN;
881+ } else {
882+ addr = dev->wed_rro.addr_elem[seid/ MT7996_RRO_BA_BITMAP_SESSION_SIZE].ptr;
883+ idx = (seid % MT7996_RRO_BA_BITMAP_SESSION_SIZE) * MT7996_RRO_WINDOW_MAX_LEN
884+ + (sn % MT7996_RRO_WINDOW_MAX_LEN);
885+ }
886+ return addr + idx * sizeof(struct mt7996_wed_rro_addr);
887+}
888+
889+void mt7996_rro_rx_process(struct mt76_dev *mdev, void *data)
890+{
891+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
892+ struct mt76_wed_rro_ind *cmd = (struct mt76_wed_rro_ind *)data;
893+ struct mt76_rxwi_cache *r;
894+ struct mt76_rx_status *status;
895+ struct mt76_queue *q;
896+ struct mt7996_wed_rro_addr *elem;
897+ struct mt7996_msdu_pg_addr *pg_addr = NULL;
898+ struct mt7996_msdu_pg *pg = NULL;
899+ struct mt7996_rro_hif *rxd;
900+ struct sk_buff *skb;
901+ dma_addr_t msdu_pg_pa;
902+ int len, data_len, i, j, sn;
903+ void *buf;
904+ u8 more, qid;
905+ u32 info = 0;
906+
907+ for (i = 0; i < cmd->ind_cnt; i++) {
908+ sn = (cmd->start_sn + i) & GENMASK(11, 0);
909+ elem = mt7996_rro_get_addr_elem(dev, cmd->se_id, sn);
910+ if (elem->signature != (sn / MT7996_RRO_WINDOW_MAX_LEN)) {
911+ elem->signature = 0xff;
912+ goto update_ack_sn;
913+ }
914+
915+ msdu_pg_pa = elem->head_high;
916+ msdu_pg_pa <<= 32;
917+ msdu_pg_pa |= elem->head_low;
918+
919+ for (j = 0; j < elem->count; j++) {
920+ if (pg_addr == NULL) {
921+ pg_addr = mt7996_rro_msdu_pg_search(dev, msdu_pg_pa);
922+
923+ if (pg_addr == NULL) {
924+ dev_info(mdev->dev, "pg_addr(%llx) search fail\n",
925+ msdu_pg_pa);
926+ continue;
927+ }
928+
929+ dma_unmap_single(mdev->dma_dev, pg_addr->dma_addr,
930+ SKB_WITH_OVERHEAD(pg_addr->q->buf_size),
931+ DMA_FROM_DEVICE);
932+
933+ pg = (struct mt7996_msdu_pg *) pg_addr->buf;
934+ }
935+
936+ rxd = &pg->rxd[j % MT7996_MAX_HIF_RXD_IN_PG];
937+ more = !rxd->ls;
938+ len = rxd->sdl;
939+
940+ r = mt76_rx_token_release(mdev, rxd->rx_token_id);
941+ if (!r)
942+ goto next_page_chk;
943+
944+ qid = r->qid;
945+ buf = r->ptr;
946+ q = &mdev->q_rx[qid];
947+ dma_unmap_single(mdev->dma_dev, r->dma_addr,
948+ SKB_WITH_OVERHEAD(q->buf_size),
949+ DMA_FROM_DEVICE);
950+ r->dma_addr = 0;
951+ r->ptr = NULL;
952+ mt76_put_rxwi(mdev, r);
953+ if (!buf)
954+ goto next_page_chk;
955+
956+ if (q->rx_head)
957+ data_len = q->buf_size;
958+ else
959+ data_len = SKB_WITH_OVERHEAD(q->buf_size);
960+
961+ if (data_len < len + q->buf_offset) {
962+ dev_kfree_skb(q->rx_head);
963+ skb_free_frag(buf);
964+ q->rx_head = NULL;
965+ goto next_page_chk;
966+ }
967+
968+ if (q->rx_head) {
969+ /* TDO: fragment error, skip handle */
970+ //mt76_add_fragment(mdev, q, buf, len, more, info);
971+ skb_free_frag(buf);
972+ if (!more) {
973+ dev_kfree_skb(q->rx_head);
974+ q->rx_head = NULL;
975+ }
976+ goto next_page_chk;
977+ }
978+
979+ if (!more && !mt7996_rx_check(mdev, buf, len))
980+ goto next_page_chk;
981+
982+ skb = build_skb(buf, q->buf_size);
983+ if (!skb)
984+ goto next_page_chk;
985+
986+ skb_reserve(skb, q->buf_offset);
987+ __skb_put(skb, len);
988+
989+ if (cmd->ind_reason == 1 || cmd->ind_reason == 2) {
990+ dev_kfree_skb(skb);
991+ goto next_page_chk;
992+ }
993+
994+ if (more) {
995+ q->rx_head = skb;
996+ goto next_page_chk;
997+ }
998+
999+ status = (struct mt76_rx_status *)skb->cb;
1000+ if (cmd->se_id != MT7996_RRO_MAX_SESSION)
1001+ status->aggr = true;
1002+
1003+ mt7996_queue_rx_skb(mdev, qid, skb, &info);
1004+
1005+next_page_chk:
1006+ if ((j + 1) % MT7996_MAX_HIF_RXD_IN_PG == 0) {
1007+ msdu_pg_pa = pg->next_pg_h;
1008+ msdu_pg_pa <<= 32;
1009+ msdu_pg_pa |= pg->next_pg_l;
1010+ mt7996_put_pg_addr(dev, pg_addr);
1011+ pg_addr = NULL;
1012+ }
1013+ }
1014+update_ack_sn:
1015+ if ((i + 1) % 4 == 0)
1016+ mt76_wr(dev, MT_RRO_ACK_SN_CTRL,
1017+ FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK, cmd->se_id) |
1018+ FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK, sn));
1019+ if (pg_addr) {
1020+ mt7996_put_pg_addr(dev, pg_addr);
1021+ pg_addr = NULL;
1022+ }
1023+ }
1024+
1025+ /* update ack_sn for remaining addr_elem */
1026+ if (i % 4 != 0)
1027+ mt76_wr(dev, MT_RRO_ACK_SN_CTRL,
1028+ FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK, cmd->se_id) |
1029+ FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK, sn));
1030+}
1031+
1032 void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy)
1033 {
1034 struct mt7996_dev *dev = phy->dev;
developerd0c89452024-10-11 16:53:27 +08001035@@ -2052,6 +2433,9 @@ void mt7996_mac_reset_work(struct work_struct *work)
developer05f3b2b2024-08-19 19:17:34 +08001036 dev_info(dev->mt76.dev,"%s L1 SER dma start done.",
1037 wiphy_name(dev->mt76.hw->wiphy));
1038
1039+ if (is_mt7992(&dev->mt76) && dev->has_rro)
1040+ mt76_wr(dev, MT_RRO_3_0_EMU_CONF, MT_RRO_3_0_EMU_CONF_EN_MASK);
1041+
1042 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
1043 u32 wed_irq_mask = MT_INT_RRO_RX_DONE | MT_INT_TX_DONE_BAND2 |
1044 dev->mt76.mmio.irqmask;
developerd0c89452024-10-11 16:53:27 +08001045@@ -2061,10 +2445,6 @@ void mt7996_mac_reset_work(struct work_struct *work)
developer05f3b2b2024-08-19 19:17:34 +08001046
1047 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
1048
1049- if (is_mt7992(&dev->mt76) && dev->has_rro)
1050- mt76_wr(dev, MT_RRO_3_0_EMU_CONF,
1051- MT_RRO_3_0_EMU_CONF_EN_MASK);
1052-
1053 mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask,
1054 true);
1055
1056diff --git a/mt7996/mmio.c b/mt7996/mmio.c
developer1f55fcf2024-10-17 14:52:33 +08001057index ee5ee5a..c84c1bb 100644
developer05f3b2b2024-08-19 19:17:34 +08001058--- a/mt7996/mmio.c
1059+++ b/mt7996/mmio.c
1060@@ -654,6 +654,8 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
1061 .rx_skb = mt7996_queue_rx_skb,
1062 .rx_check = mt7996_rx_check,
1063 .rx_poll_complete = mt7996_rx_poll_complete,
1064+ .rx_rro_ind_process = mt7996_rro_rx_process,
1065+ .rx_rro_fill_msdu_pg = mt7996_rro_fill_msdu_page,
1066 .sta_add = mt7996_mac_sta_add,
developerd0c89452024-10-11 16:53:27 +08001067 .sta_event = mt7996_mac_sta_event,
developer05f3b2b2024-08-19 19:17:34 +08001068 .sta_remove = mt7996_mac_sta_remove,
1069diff --git a/mt7996/mt7996.h b/mt7996/mt7996.h
developer1f55fcf2024-10-17 14:52:33 +08001070index 59f0be0..b115e7d 100644
developer05f3b2b2024-08-19 19:17:34 +08001071--- a/mt7996/mt7996.h
1072+++ b/mt7996/mt7996.h
developerd0c89452024-10-11 16:53:27 +08001073@@ -104,6 +104,7 @@
developer05f3b2b2024-08-19 19:17:34 +08001074
1075 #define MT7996_BUILD_TIME_LEN 24
1076
1077+#define MT7996_MAX_HIF_RXD_IN_PG 5
1078 #define MT7996_RRO_MSDU_PG_HASH_SIZE 127
1079 #define MT7996_RRO_MAX_SESSION 1024
1080 #define MT7996_RRO_WINDOW_MAX_LEN 1024
developerd0c89452024-10-11 16:53:27 +08001081@@ -533,6 +534,33 @@ int mt7996_mcu_set_muru_qos_cfg(struct mt7996_dev *dev, u16 wlan_idx, u8 dir,
developer05f3b2b2024-08-19 19:17:34 +08001082 u8 scs_id, u8 req_type, u8 *qos_ie, u8 qos_ie_len);
1083 #endif
1084
1085+struct mt7996_rro_hif {
1086+ u32 rx_blk_base_l;
1087+ u32 rx_blk_base_h: 4;
1088+ u32 eth_hdr_ofst : 7;
1089+ u32 rsv : 1;
1090+ u32 ring_no : 2;
1091+ u32 dst_sel : 2;
1092+ u32 sdl :14;
1093+ u32 ls : 1;
1094+ u32 rsv2 : 1;
1095+ u32 pn_31_0;
1096+ u32 pn_47_32 :16;
1097+ u32 cs_status : 4;
1098+ u32 cs_type : 4;
1099+ u32 c : 1;
1100+ u32 f : 1;
1101+ u32 un : 1;
1102+ u32 rsv3 : 1;
1103+ u32 is_fc_data : 1;
1104+ u32 uc : 1;
1105+ u32 mc : 1;
1106+ u32 bc : 1;
1107+ u16 rx_token_id;
1108+ u16 rsv4;
1109+ u32 rsv5;
1110+};
1111+
1112 struct mt7996_rro_ba_session {
1113 u32 ack_sn :12;
1114 u32 win_sz :3;
developerd0c89452024-10-11 16:53:27 +08001115@@ -548,6 +576,26 @@ struct mt7996_rro_ba_session {
developer05f3b2b2024-08-19 19:17:34 +08001116 u32 last_in_rxtime :12;
1117 };
1118
1119+struct mt7996_rro_ba_session_elem {
1120+ struct list_head poll_list;
1121+ u16 session_id;
1122+};
1123+
1124+struct mt7996_msdu_pg {
1125+ struct mt7996_rro_hif rxd[MT7996_MAX_HIF_RXD_IN_PG];
1126+ u32 next_pg_l;
1127+ u32 next_pg_h : 4;
1128+ u32 rsv :27;
1129+ u32 owner : 1;
1130+};
1131+
1132+struct mt7996_msdu_pg_addr {
1133+ struct list_head list;
1134+ dma_addr_t dma_addr;
1135+ struct mt76_queue *q;
1136+ void *buf;
1137+};
1138+
1139 struct mt7996_chanctx {
1140 struct cfg80211_chan_def chandef;
1141 struct mt7996_phy *phy;
developerd0c89452024-10-11 16:53:27 +08001142@@ -1261,6 +1309,11 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
developer05f3b2b2024-08-19 19:17:34 +08001143 void mt7996_tx_token_put(struct mt7996_dev *dev);
1144 void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1145 struct sk_buff *skb, u32 *info);
1146+void mt7996_rx_token_put(struct mt7996_dev *dev);
1147+void mt7996_rro_msdu_pg_free(struct mt7996_dev *dev);
1148+void mt7996_rro_rx_process(struct mt76_dev *mdev, void *data);
1149+int mt7996_rro_fill_msdu_page(struct mt76_dev *mdev, struct mt76_queue *q,
1150+ dma_addr_t p, void *data);
1151 bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len);
1152 void mt7996_stats_work(struct work_struct *work);
1153 void mt7996_scan_work(struct work_struct *work);
1154diff --git a/mt7996/pci.c b/mt7996/pci.c
developer1f55fcf2024-10-17 14:52:33 +08001155index 382b6a8..a010680 100644
developer05f3b2b2024-08-19 19:17:34 +08001156--- a/mt7996/pci.c
1157+++ b/mt7996/pci.c
1158@@ -13,6 +13,9 @@
1159 static bool hif2_enable = false;
1160 module_param(hif2_enable, bool, 0644);
1161
1162+static bool rro_enable = false;
1163+module_param(rro_enable, bool, 0644);
1164+
1165 static LIST_HEAD(hif_list);
1166 static DEFINE_SPINLOCK(hif_lock);
1167 static u32 hif_idx;
1168@@ -140,6 +143,7 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
1169 if (IS_ERR(dev))
1170 return PTR_ERR(dev);
1171
1172+ dev->has_rro = rro_enable;
1173 mdev = &dev->mt76;
1174 mt7996_wfsys_reset(dev);
1175 hif2 = mt7996_pci_init_hif2(pdev);
1176diff --git a/mt7996/regs.h b/mt7996/regs.h
developer1f55fcf2024-10-17 14:52:33 +08001177index a0b57e5..e86d5df 100644
developer05f3b2b2024-08-19 19:17:34 +08001178--- a/mt7996/regs.h
1179+++ b/mt7996/regs.h
1180@@ -561,6 +561,7 @@ enum offs_rev {
1181 #define MT_INT_RRO_RX_DONE (MT_INT_RX(MT_RXQ_RRO_BAND0) | \
1182 MT_INT_RX(MT_RXQ_RRO_BAND1) | \
1183 MT_INT_RX(MT_RXQ_RRO_BAND2) | \
1184+ MT_INT_RX(MT_RXQ_RRO_IND) | \
1185 MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND0) | \
1186 MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND1) | \
1187 MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND2))
1188--
developerd0c89452024-10-11 16:53:27 +080011892.45.2
developer05f3b2b2024-08-19 19:17:34 +08001190