[rdkb][common][bsp][Refactor and sync wifi from openwrt]

[Description]
3a2eef0b [MAC80211][Release][Update release note for Filogic 880/860 MLO Beta release]
cfbd2411 [MAC80211][Release][Filogic 880/860 MLO Beta release]
6c180e3f [MAC80211][WiFi7][misc][Add Eagle BE14000 efem default bin]
a55f34db [MAC80211][Release][Prepare for Filogic 880/860 release]
5b45ebca [MAC80211][WiFi7][hostapd][Add puncture bitmap to ucode]
95bbea73 [MAC80211][WiFi6][mt76][Add PID to only report data-frame TX rate]
b15ced26 [MAC80211][WiFi6][hostapd][Fix DFS channel selection issue]
d59133cb [MAC80211][WiFi6][mt76][Fix pse info not correct information]
3921b4b2 [MAC80211][WiFi6][mt76][Fix incomplete QoS-map setting to FW]
4e7690c7 [MAC80211][WiFi6/7][app][Change ATECHANNEL mapping cmd]
eb37af90 [MAC80211][WiFi7][app][Add support for per-packet bw & primary selection]
0ea82adf [MAC80211][WiFi6][core][Fix DFS CAC issue after CSA]

[Release-log]

Change-Id: I9bec97ec1b2e1c49ed43a812a07a5b21fcbb70a6
diff --git a/recipes-wifi/linux-mt76/files/patches-3.x/0195-mtk-mt76-mt7996-separate-hwrro-from-wed.patch b/recipes-wifi/linux-mt76/files/patches-3.x/0195-mtk-mt76-mt7996-separate-hwrro-from-wed.patch
new file mode 100644
index 0000000..29f0504
--- /dev/null
+++ b/recipes-wifi/linux-mt76/files/patches-3.x/0195-mtk-mt76-mt7996-separate-hwrro-from-wed.patch
@@ -0,0 +1,1190 @@
+From 8b65a89860eb0fcd914382b4297efa262f4c158c Mon Sep 17 00:00:00 2001
+From: Rex Lu <rex.lu@mediatek.com>
+Date: Tue, 6 Aug 2024 10:06:10 +0800
+Subject: [PATCH 195/199] mtk: mt76: mt7996: separate hwrro from wed
+
+1. separate hwrro from wed
+2. support mt7996/mt7992 run hwrro 3.0 without wed
+
+Signed-off-by: Rex Lu <rex.lu@mediatek.com>
+---
+ dma.c           |  78 ++++++++--
+ dma.h           |   6 +-
+ mac80211.c      |   5 +
+ mt76.h          |  19 ++-
+ mt7996/dma.c    |  77 +++++++---
+ mt7996/init.c   |  60 ++++----
+ mt7996/mac.c    | 388 +++++++++++++++++++++++++++++++++++++++++++++++-
+ mt7996/mmio.c   |   2 +
+ mt7996/mt7996.h |  53 +++++++
+ mt7996/pci.c    |   4 +
+ mt7996/regs.h   |   1 +
+ 11 files changed, 617 insertions(+), 76 deletions(-)
+
+diff --git a/dma.c b/dma.c
+index 81e76191..7598823a 100644
+--- a/dma.c
++++ b/dma.c
+@@ -231,7 +231,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ 	struct mt76_queue_entry *entry = &q->entry[q->head];
+ 	struct mt76_desc *desc;
+ 	int idx = q->head;
+-	u32 buf1 = 0, ctrl;
++	u32 buf1 = 0, ctrl, info = 0;
+ 	int rx_token;
+ 
+ 	if (mt76_queue_is_wed_rro_ind(q)) {
+@@ -248,7 +248,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ 	buf1 = FIELD_PREP(MT_DMA_CTL_SDP0_H, buf->addr >> 32);
+ #endif
+ 
+-	if (mt76_queue_is_wed_rx(q)) {
++	if (mt76_queue_is_wed_rx(q)  || mt76_queue_is_wed_rro_data(q)) {
+ 		if (!rxwi) {
+ 			rxwi = mt76_get_rxwi(dev);
+ 			if (!rxwi) {
+@@ -266,12 +266,24 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ 
+ 		buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
+ 		ctrl |= MT_DMA_CTL_TO_HOST;
++		rxwi->qid = q - dev->q_rx;
++	}
++
++	if (mt76_queue_is_wed_rro_msdu_pg(q)) {
++		if (dev->drv->rx_rro_fill_msdu_pg(dev, q, buf->addr, data))
++			return	-ENOMEM;
++	}
++
++	if (q->flags & MT_QFLAG_WED_RRO_EN) {
++		info |= FIELD_PREP(MT_DMA_MAGIC_MASK, q->magic_cnt);
++		if ((q->head + 1) == q->ndesc)
++			q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_MAGIC_CNT;
+ 	}
+ 
+ 	WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr));
+ 	WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
+ 	WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
+-	WRITE_ONCE(desc->info, 0);
++	WRITE_ONCE(desc->info, cpu_to_le32(info));
+ 
+ done:
+ 	entry->dma_addr[0] = buf->addr;
+@@ -433,7 +445,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+ 	void *buf = e->buf;
+ 	int reason;
+ 
+-	if (mt76_queue_is_wed_rro_ind(q))
++	if (mt76_queue_is_wed_rro(q))
+ 		goto done;
+ 
+ 	ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
+@@ -558,15 +570,28 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
+ 
+ 	if (mt76_queue_is_wed_rro_data(q) ||
+ 	    mt76_queue_is_wed_rro_msdu_pg(q))
+-		return NULL;
++		goto done;
++
++	if (mt76_queue_is_wed_rro_ind(q)) {
++		struct mt76_wed_rro_ind *cmd;
+ 
+-	if (!mt76_queue_is_wed_rro_ind(q)) {
++		if (flush)
++			goto done;
++
++		cmd = q->entry[idx].buf;
++		if (cmd->magic_cnt != q->magic_cnt)
++			return NULL;
++
++		if (q->tail == q->ndesc - 1)
++			q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_WED_IND_CMD_CNT;
++	} else {
+ 		if (flush)
+ 			q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+ 		else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
+ 			return NULL;
+ 	}
+ 
++done:
+ 	q->tail = (q->tail + 1) % q->ndesc;
+ 	q->queued--;
+ 
+@@ -750,8 +775,8 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+ 			break;
+ 		}
+ 
+-		qbuf.addr = addr + offset;
+ done:
++		qbuf.addr = addr + offset;
+ 		qbuf.len = len - offset;
+ 		qbuf.skip_unmap = false;
+ 		if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf, NULL) < 0) {
+@@ -856,7 +881,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
+ 
+ 	spin_unlock_bh(&q->lock);
+ 
+-	if (mt76_queue_is_wed_rx(q))
++	if (mt76_queue_is_wed_rx(q) || mt76_queue_is_wed_rro(q))
+ 		return;
+ 
+ 	if (!q->rx_page.va)
+@@ -934,8 +959,9 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
+ 	bool allow_direct = !mt76_queue_is_wed_rx(q);
+ 	bool more;
+ 
+-	if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
+-	    mt76_queue_is_wed_tx_free(q)) {
++	if ((q->flags & MT_QFLAG_WED_RRO_EN) ||
++	    (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
++	    mt76_queue_is_wed_tx_free(q))) {
+ 		dma_idx = Q_READ(q, dma_idx);
+ 		check_ddone = true;
+ 	}
+@@ -957,6 +983,14 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
+ 		if (!data)
+ 			break;
+ 
++		if (mt76_queue_is_wed_rro_ind(q) && dev->drv->rx_rro_ind_process)
++			dev->drv->rx_rro_ind_process(dev, data);
++
++		if (mt76_queue_is_wed_rro(q)) {
++			done++;
++			continue;
++		}
++
+ 		if (drop || (len == 0))
+ 			goto free_frag;
+ 
+@@ -1037,11 +1071,18 @@ int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
+ EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
+ 
+ static int
+-mt76_dma_init(struct mt76_dev *dev,
++__mt76_dma_init(struct mt76_dev *dev, enum mt76_rxq_id qid,
+ 	      int (*poll)(struct napi_struct *napi, int budget))
+ {
+ 	int i;
+ 
++	if (qid < __MT_RXQ_MAX && dev->q_rx[qid].ndesc) {
++		netif_napi_add(&dev->napi_dev, &dev->napi[qid], poll);
++		mt76_dma_rx_fill(dev, &dev->q_rx[qid], false);
++		napi_enable(&dev->napi[qid]);
++		return 0;
++	}
++
+ 	init_dummy_netdev(&dev->napi_dev);
+ 	init_dummy_netdev(&dev->tx_napi_dev);
+ 	snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
+@@ -1063,6 +1104,20 @@ mt76_dma_init(struct mt76_dev *dev,
+ 	return 0;
+ }
+ 
++static int
++mt76_dma_rx_queue_init(struct mt76_dev *dev, enum mt76_rxq_id qid,
++	      int (*poll)(struct napi_struct *napi, int budget))
++{
++	return __mt76_dma_init(dev, qid, poll);
++}
++
++static int
++mt76_dma_init(struct mt76_dev *dev,
++	      int (*poll)(struct napi_struct *napi, int budget))
++{
++	return __mt76_dma_init(dev, __MT_RXQ_MAX, poll);
++}
++
+ static const struct mt76_queue_ops mt76_dma_ops = {
+ 	.init = mt76_dma_init,
+ 	.alloc = mt76_dma_alloc_queue,
+@@ -1070,6 +1125,7 @@ static const struct mt76_queue_ops mt76_dma_ops = {
+ 	.tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
+ 	.tx_queue_skb = mt76_dma_tx_queue_skb,
+ 	.tx_cleanup = mt76_dma_tx_cleanup,
++	.rx_init = mt76_dma_rx_queue_init,
+ 	.rx_cleanup = mt76_dma_rx_cleanup,
+ 	.rx_reset = mt76_dma_rx_reset,
+ 	.kick = mt76_dma_kick_queue,
+diff --git a/dma.h b/dma.h
+index 718122d5..393be98a 100644
+--- a/dma.h
++++ b/dma.h
+@@ -31,8 +31,12 @@
+ #define MT_DMA_CTL_PN_CHK_FAIL		BIT(13)
+ #define MT_DMA_CTL_VER_MASK		BIT(7)
+ 
+-#define MT_DMA_RRO_EN		BIT(13)
++#define MT_DMA_SDP0			GENMASK(15, 0)
++#define MT_DMA_TOKEN_ID			GENMASK(31, 16)
++#define MT_DMA_MAGIC_MASK		GENMASK(31, 28)
++#define MT_DMA_RRO_EN			BIT(13)
+ 
++#define MT_DMA_MAGIC_CNT		16
+ #define MT_DMA_WED_IND_CMD_CNT		8
+ #define MT_DMA_WED_IND_REASON		GENMASK(15, 12)
+ 
+diff --git a/mac80211.c b/mac80211.c
+index d5f842db..d81bf667 100644
+--- a/mac80211.c
++++ b/mac80211.c
+@@ -732,6 +732,7 @@ static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
+ 	struct sk_buff *skb = phy->rx_amsdu[q].head;
+ 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ 	struct mt76_dev *dev = phy->dev;
++	struct mt76_queue *rxq = &dev->q_rx[q];
+ 
+ 	phy->rx_amsdu[q].head = NULL;
+ 	phy->rx_amsdu[q].tail = NULL;
+@@ -763,6 +764,10 @@ static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
+ 			return;
+ 		}
+ 	}
++
++	if (mt76_queue_is_wed_rro_data(rxq))
++		q = MT_RXQ_RRO_IND;
++
+ 	__skb_queue_tail(&dev->rx_skb[q], skb);
+ }
+ 
+diff --git a/mt76.h b/mt76.h
+index e5e5529d..58e9b0b7 100644
+--- a/mt76.h
++++ b/mt76.h
+@@ -276,6 +276,7 @@ struct mt76_queue {
+ 
+ 	u8 buf_offset;
+ 	u16 flags;
++	u8 magic_cnt;
+ 
+ 	struct mtk_wed_device *wed;
+ 	u32 wed_regs;
+@@ -329,6 +330,9 @@ struct mt76_queue_ops {
+ 	void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q,
+ 			   bool flush);
+ 
++	int (*rx_init)(struct mt76_dev *dev, enum mt76_rxq_id qid,
++		       int (*poll)(struct napi_struct *napi, int budget));
++
+ 	void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q);
+ 
+ 	void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
+@@ -472,6 +476,7 @@ struct mt76_rxwi_cache {
+ 	dma_addr_t dma_addr;
+ 
+ 	void *ptr;
++	u8 qid;
+ };
+ 
+ struct mt76_rx_tid {
+@@ -578,6 +583,10 @@ struct mt76_driver_ops {
+ 	void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
+ 		       struct sk_buff *skb, u32 *info);
+ 
++	void (*rx_rro_ind_process)(struct mt76_dev *dev, void *data);
++	int (*rx_rro_fill_msdu_pg)(struct mt76_dev *dev, struct mt76_queue *q,
++				   dma_addr_t p, void *data);
++
+ 	void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
+ 
+ 	void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
+@@ -1306,6 +1315,7 @@ static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q,
+ #define mt76_tx_queue_skb(dev, ...)	(dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mphy), __VA_ARGS__)
+ #define mt76_queue_rx_reset(dev, ...)	(dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
+ #define mt76_queue_tx_cleanup(dev, ...)	(dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
++#define mt76_queue_rx_init(dev, ...)	(dev)->mt76.queue_ops->rx_init(&((dev)->mt76), __VA_ARGS__)
+ #define mt76_queue_rx_cleanup(dev, ...)	(dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__)
+ #define mt76_queue_kick(dev, ...)	(dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
+ #define mt76_queue_reset(dev, ...)	(dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__)
+@@ -1871,13 +1881,8 @@ static inline bool mt76_queue_is_wed_rro_msdu_pg(struct mt76_queue *q)
+ 
+ static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q)
+ {
+-	if (!(q->flags & MT_QFLAG_WED))
+-		return false;
+-
+-	return FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX ||
+-	       mt76_queue_is_wed_rro_ind(q) || mt76_queue_is_wed_rro_data(q) ||
+-	       mt76_queue_is_wed_rro_msdu_pg(q);
+-
++	return (q->flags & MT_QFLAG_WED) &&
++	       FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX;
+ }
+ 
+ struct mt76_txwi_cache *
+diff --git a/mt7996/dma.c b/mt7996/dma.c
+index bbc3814d..e4b0af29 100644
+--- a/mt7996/dma.c
++++ b/mt7996/dma.c
+@@ -300,7 +300,7 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
+ 	}
+ 
+ 	if (mt7996_band_valid(dev, MT_BAND2))
+-		irq_mask |= MT_INT_BAND2_RX_DONE;
++		irq_mask |= MT_INT_BAND2_RX_DONE | MT_INT_TX_RX_DONE_EXT;
+ 
+ 	if (mtk_wed_device_active(wed) && wed_reset) {
+ 		u32 wed_irq_mask = irq_mask;
+@@ -465,7 +465,6 @@ static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
+ 	mt7996_dma_start(dev, reset, true);
+ }
+ 
+-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ int mt7996_dma_rro_init(struct mt7996_dev *dev)
+ {
+ 	struct mt76_dev *mdev = &dev->mt76;
+@@ -474,7 +473,9 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
+ 
+ 	/* ind cmd */
+ 	mdev->q_rx[MT_RXQ_RRO_IND].flags = MT_WED_RRO_Q_IND;
+-	mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed;
++	if (mtk_wed_device_active(&mdev->mmio.wed) &&
++	    mtk_wed_get_rx_capa(&mdev->mmio.wed))
++		mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed;
+ 	ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_IND],
+ 			       MT_RXQ_ID(MT_RXQ_RRO_IND),
+ 			       MT7996_RX_RING_SIZE,
+@@ -485,7 +486,9 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
+ 	/* rx msdu page queue for band0 */
+ 	mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags =
+ 		MT_WED_RRO_Q_MSDU_PG(0) | MT_QFLAG_WED_RRO_EN;
+-	mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed;
++	if (mtk_wed_device_active(&mdev->mmio.wed) &&
++	    mtk_wed_get_rx_capa(&mdev->mmio.wed))
++		mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed;
+ 	ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0],
+ 			       MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0),
+ 			       MT7996_RX_RING_SIZE,
+@@ -498,7 +501,9 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
+ 		/* rx msdu page queue for band1 */
+ 		mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags =
+ 			MT_WED_RRO_Q_MSDU_PG(1) | MT_QFLAG_WED_RRO_EN;
+-		mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed;
++		if (mtk_wed_device_active(&mdev->mmio.wed) &&
++		    mtk_wed_get_rx_capa(&mdev->mmio.wed))
++			mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed;
+ 		ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1],
+ 				       MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1),
+ 				       MT7996_RX_RING_SIZE,
+@@ -512,7 +517,9 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
+ 		/* rx msdu page queue for band2 */
+ 		mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags =
+ 			MT_WED_RRO_Q_MSDU_PG(2) | MT_QFLAG_WED_RRO_EN;
+-		mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed;
++		if (mtk_wed_device_active(&mdev->mmio.wed) &&
++		    mtk_wed_get_rx_capa(&mdev->mmio.wed))
++			mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed;
+ 		ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2],
+ 				       MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2),
+ 				       MT7996_RX_RING_SIZE,
+@@ -522,15 +529,37 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
+ 			return ret;
+ 	}
+ 
+-	irq_mask = mdev->mmio.irqmask | MT_INT_RRO_RX_DONE |
+-		   MT_INT_TX_DONE_BAND2;
+-	mt76_wr(dev, MT_INT_MASK_CSR, irq_mask);
+-	mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false);
+-	mt7996_irq_enable(dev, irq_mask);
++
++
++	if (mtk_wed_device_active(&mdev->mmio.wed)) {
++		irq_mask = mdev->mmio.irqmask | MT_INT_RRO_RX_DONE |
++			   MT_INT_TX_DONE_BAND2;
++
++		if (mtk_wed_get_rx_capa(&mdev->mmio.wed))
++			irq_mask &= ~MT_INT_RX_DONE_RRO_IND;
++
++		mt76_wr(dev, MT_INT_MASK_CSR, irq_mask);
++		mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false);
++		mt7996_irq_enable(dev, irq_mask);
++	} else {
++		if (is_mt7996(&dev->mt76)) {
++			mt76_queue_rx_init(dev, MT_RXQ_TXFREE_BAND0, mt76_dma_rx_poll);
++			mt76_queue_rx_init(dev, MT_RXQ_TXFREE_BAND2, mt76_dma_rx_poll);
++			mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND1, mt76_dma_rx_poll);
++			mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND2, mt76_dma_rx_poll);
++		}
++		mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND0, mt76_dma_rx_poll);
++		mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND1, mt76_dma_rx_poll);
++		if (mt7996_band_valid(dev, MT_BAND2))
++			mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND2, mt76_dma_rx_poll);
++		mt76_queue_rx_init(dev, MT_RXQ_RRO_IND, mt76_dma_rx_poll);
++		mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND0, mt76_dma_rx_poll);
++
++		mt7996_irq_enable(dev, MT_INT_RRO_RX_DONE);
++	}
+ 
+ 	return 0;
+ }
+-#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
+ 
+ int mt7996_dma_init(struct mt7996_dev *dev)
+ {
+@@ -691,12 +720,12 @@ int mt7996_dma_init(struct mt7996_dev *dev)
+ 			return ret;
+ 	}
+ 
+-	if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) &&
+-	    dev->has_rro) {
++	if (dev->has_rro) {
+ 		/* rx rro data queue for band0 */
+ 		dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags =
+ 			MT_WED_RRO_Q_DATA(0) | MT_QFLAG_WED_RRO_EN;
+-		dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed;
++		if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
++			dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed;
+ 		ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0],
+ 				       MT_RXQ_ID(MT_RXQ_RRO_BAND0),
+ 				       MT7996_RX_RING_SIZE,
+@@ -708,7 +737,8 @@ int mt7996_dma_init(struct mt7996_dev *dev)
+ 		if (is_mt7992(&dev->mt76)) {
+ 			dev->mt76.q_rx[MT_RXQ_RRO_BAND1].flags =
+ 				MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
+-			dev->mt76.q_rx[MT_RXQ_RRO_BAND1].wed = wed;
++			if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
++				dev->mt76.q_rx[MT_RXQ_RRO_BAND1].wed = wed;
+ 			ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND1],
+ 					       MT_RXQ_ID(MT_RXQ_RRO_BAND1),
+ 					       MT7996_RX_RING_SIZE,
+@@ -718,9 +748,10 @@ int mt7996_dma_init(struct mt7996_dev *dev)
+ 				return ret;
+ 		} else {
+ 			/* tx free notify event from WA for band0 */
+-			dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
+-			dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
+-
++			if (mtk_wed_device_active(wed)) {
++				dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
++				dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
++			}
+ 			ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
+ 					       MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
+ 					       MT7996_RX_MCU_RING_SIZE,
+@@ -734,7 +765,8 @@ int mt7996_dma_init(struct mt7996_dev *dev)
+ 			/* rx rro data queue for band2 */
+ 			dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags =
+ 				MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
+-			dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed;
++			if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
++				dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed;
+ 			ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2],
+ 					       MT_RXQ_ID(MT_RXQ_RRO_BAND2),
+ 					       MT7996_RX_RING_SIZE,
+@@ -811,6 +843,11 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
+ 		dev_info(dev->mt76.dev,"%s L1 SER rx queue clean up done.",
+ 			 wiphy_name(dev->mt76.hw->wiphy));
+ 
++	if (dev->has_rro && !mtk_wed_device_active(&dev->mt76.mmio.wed)) {
++		mt7996_rro_msdu_pg_free(dev);
++		mt7996_rx_token_put(dev);
++	}
++
+ 	mt76_tx_status_check(&dev->mt76, true);
+ 
+ 	if (!force)
+diff --git a/mt7996/init.c b/mt7996/init.c
+index bad4b1b7..0e647356 100644
+--- a/mt7996/init.c
++++ b/mt7996/init.c
+@@ -931,7 +931,6 @@ void mt7996_wfsys_reset(struct mt7996_dev *dev)
+ 
+ void mt7996_rro_hw_init(struct mt7996_dev *dev)
+ {
+-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ 	struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ 	u32 reg = MT_RRO_ADDR_ELEM_SEG_ADDR0;
+ 	int i;
+@@ -939,6 +938,10 @@ void mt7996_rro_hw_init(struct mt7996_dev *dev)
+ 	if (!dev->has_rro)
+ 		return;
+ 
++	INIT_LIST_HEAD(&dev->wed_rro.pg_addr_cache);
++	for (i = 0; i < MT7996_RRO_MSDU_PG_HASH_SIZE; i++)
++		INIT_LIST_HEAD(&dev->wed_rro.pg_hash_head[i]);
++
+ 	if (is_mt7992(&dev->mt76)) {
+ 		/* set emul 3.0 function */
+ 		mt76_wr(dev, MT_RRO_3_0_EMU_CONF,
+@@ -947,9 +950,6 @@ void mt7996_rro_hw_init(struct mt7996_dev *dev)
+ 		mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE0,
+ 			dev->wed_rro.addr_elem[0].phy_addr);
+ 	} else {
+-		INIT_LIST_HEAD(&dev->wed_rro.pg_addr_cache);
+-		for (i = 0; i < MT7996_RRO_MSDU_PG_HASH_SIZE; i++)
+-			INIT_LIST_HEAD(&dev->wed_rro.pg_hash_head[i]);
+ 
+ 		/* TODO: remove line after WM has set */
+ 		mt76_clear(dev, WF_RRO_AXI_MST_CFG, WF_RRO_AXI_MST_CFG_DIDX_OK);
+@@ -972,18 +972,24 @@ void mt7996_rro_hw_init(struct mt7996_dev *dev)
+ 		mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE1,
+ 			MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE);
+ 	}
+-	wed->wlan.ind_cmd.win_size = ffs(MT7996_RRO_WINDOW_MAX_LEN) - 6;
+-	if (is_mt7996(&dev->mt76))
+-		wed->wlan.ind_cmd.particular_sid = MT7996_RRO_MAX_SESSION;
+-	else
+-		wed->wlan.ind_cmd.particular_sid = 1;
+-	wed->wlan.ind_cmd.particular_se_phys = dev->wed_rro.session.phy_addr;
+-	wed->wlan.ind_cmd.se_group_nums = MT7996_RRO_ADDR_ELEM_LEN;
+-	wed->wlan.ind_cmd.ack_sn_addr = MT_RRO_ACK_SN_CTRL;
+ 
+-	mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0x15010e00);
+-	mt76_set(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1,
+-		 MT_RRO_IND_CMD_SIGNATURE_BASE1_EN);
++	if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
++		wed->wlan.ind_cmd.win_size = ffs(MT7996_RRO_WINDOW_MAX_LEN) - 6;
++		if (is_mt7996(&dev->mt76))
++			wed->wlan.ind_cmd.particular_sid = MT7996_RRO_MAX_SESSION;
++		else
++			wed->wlan.ind_cmd.particular_sid = 1;
++		wed->wlan.ind_cmd.particular_se_phys = dev->wed_rro.session.phy_addr;
++		wed->wlan.ind_cmd.se_group_nums = MT7996_RRO_ADDR_ELEM_LEN;
++		wed->wlan.ind_cmd.ack_sn_addr = MT_RRO_ACK_SN_CTRL;
++
++		mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0x15010e00);
++		mt76_set(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1,
++			MT_RRO_IND_CMD_SIGNATURE_BASE1_EN);
++	} else {
++		mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0);
++		mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1, 0);
++	}
+ 
+ 	/* particular session configure */
+ 	/* use max session idx + 1 as particular session id */
+@@ -1012,12 +1018,10 @@ void mt7996_rro_hw_init(struct mt7996_dev *dev)
+ 	mt76_wr(dev, MT_RRO_HOST_INT_ENA,
+ 		MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA);
+ 
+-#endif
+ }
+ 
+ static int mt7996_wed_rro_init(struct mt7996_dev *dev)
+ {
+-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ 	struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ 	struct mt7996_wed_rro_addr *addr;
+ 	void *ptr;
+@@ -1026,9 +1030,6 @@ static int mt7996_wed_rro_init(struct mt7996_dev *dev)
+ 	if (!dev->has_rro)
+ 		return 0;
+ 
+-	if (!mtk_wed_device_active(wed))
+-		return 0;
+-
+ 	for (i = 0; i < ARRAY_SIZE(dev->wed_rro.ba_bitmap); i++) {
+ 		ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
+ 					  MT7996_RRO_BA_BITMAP_CR_SIZE,
+@@ -1059,9 +1060,8 @@ static int mt7996_wed_rro_init(struct mt7996_dev *dev)
+ 			addr->signature = 0xff;
+ 			addr++;
+ 		}
+-
+-		wed->wlan.ind_cmd.addr_elem_phys[i] =
+-			dev->wed_rro.addr_elem[i].phy_addr;
++		if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
++			wed->wlan.ind_cmd.addr_elem_phys[i] = dev->wed_rro.addr_elem[i].phy_addr;
+ 	}
+ 
+ 	for (i = 0; i < MT7996_RRO_MSDU_PG_CR_CNT; i++) {
+@@ -1093,22 +1093,15 @@ static int mt7996_wed_rro_init(struct mt7996_dev *dev)
+ 	mt7996_rro_hw_init(dev);
+ 
+ 	return mt7996_dma_rro_init(dev);
+-#else
+-	return 0;
+-#endif
+ }
+ 
+ static void mt7996_wed_rro_free(struct mt7996_dev *dev)
+ {
+-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ 	int i;
+ 
+ 	if (!dev->has_rro)
+ 		return;
+ 
+-	if (!mtk_wed_device_active(&dev->mt76.mmio.wed))
+-		return;
+-
+ 	for (i = 0; i < ARRAY_SIZE(dev->wed_rro.ba_bitmap); i++) {
+ 		if (!dev->wed_rro.ba_bitmap[i].ptr)
+ 			continue;
+@@ -1138,12 +1131,10 @@ static void mt7996_wed_rro_free(struct mt7996_dev *dev)
+ 			   sizeof(struct mt7996_wed_rro_addr),
+ 			   dev->wed_rro.session.ptr,
+ 			   dev->wed_rro.session.phy_addr);
+-#endif
+ }
+ 
+ static void mt7996_wed_rro_work(struct work_struct *work)
+ {
+-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ 	struct mt7996_dev *dev;
+ 	LIST_HEAD(list);
+ 
+@@ -1186,7 +1177,6 @@ reset:
+ out:
+ 		kfree(e);
+ 	}
+-#endif
+ }
+ 
+ int mt7996_get_chip_sku(struct mt7996_dev *dev)
+@@ -1828,6 +1818,10 @@ void mt7996_unregister_device(struct mt7996_dev *dev)
+ 	mt7996_mcu_exit(dev);
+ 	mt7996_tx_token_put(dev);
+ 	mt7996_dma_cleanup(dev);
++	if (dev->has_rro && !mtk_wed_device_active(&dev->mt76.mmio.wed)) {
++		mt7996_rro_msdu_pg_free(dev);
++		mt7996_rx_token_put(dev);
++	}
+ 	tasklet_disable(&dev->mt76.irq_tasklet);
+ 
+ 	mt76_free_device(&dev->mt76);
+diff --git a/mt7996/mac.c b/mt7996/mac.c
+index 27b4e99f..c0f282d1 100644
+--- a/mt7996/mac.c
++++ b/mt7996/mac.c
+@@ -1505,6 +1505,387 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ 	}
+ }
+ 
++static struct mt7996_msdu_pg_addr *
++mt7996_alloc_pg_addr(struct mt7996_dev *dev)
++{
++	struct mt7996_msdu_pg_addr *p;
++	int size;
++
++	size = L1_CACHE_ALIGN(sizeof(*p));
++	p = kzalloc(size, GFP_ATOMIC);
++	if (!p)
++		return NULL;
++
++	INIT_LIST_HEAD(&p->list);
++
++	return p;
++}
++
++static struct mt7996_msdu_pg_addr *
++__mt7996_get_pg_addr(struct mt7996_dev *dev)
++{
++	struct mt7996_msdu_pg_addr *p = NULL;
++
++	spin_lock(&dev->wed_rro.lock);
++	if (!list_empty(&dev->wed_rro.pg_addr_cache)) {
++		p = list_first_entry(&dev->wed_rro.pg_addr_cache,
++				     struct mt7996_msdu_pg_addr,
++				     list);
++		if (p)
++			list_del(&p->list);
++	}
++	spin_unlock(&dev->wed_rro.lock);
++
++	return p;
++}
++
++struct mt7996_msdu_pg_addr *
++mt7996_get_pg_addr(struct mt7996_dev *dev)
++{
++	struct mt7996_msdu_pg_addr *p = __mt7996_get_pg_addr(dev);
++
++	if (p)
++		return p;
++
++	return mt7996_alloc_pg_addr(dev);
++}
++
++static void
++mt7996_put_pg_addr(struct mt7996_dev *dev,
++		struct mt7996_msdu_pg_addr *p)
++{
++	if (!p)
++		return;
++
++	if (p->buf) {
++		skb_free_frag(p->buf);
++		p->buf = NULL;
++	}
++
++	spin_lock(&dev->wed_rro.lock);
++	list_add(&p->list, &dev->wed_rro.pg_addr_cache);
++	spin_unlock(&dev->wed_rro.lock);
++}
++
++static void
++mt7996_free_pg_addr(struct mt7996_dev *dev)
++{
++	struct mt7996_msdu_pg_addr *pg_addr;
++
++	local_bh_disable();
++	while ((pg_addr = __mt7996_get_pg_addr(dev)) != NULL) {
++		if (pg_addr->buf) {
++			skb_free_frag(pg_addr->buf);
++			pg_addr->buf = NULL;
++		}
++		kfree(pg_addr);
++	}
++	local_bh_enable();
++}
++
++static u32
++mt7996_rro_msdu_pg_hash(dma_addr_t pa)
++{
++	u32 sum = 0;
++	u16 i = 0;
++
++	while (pa != 0) {
++		sum += (u32) ((pa & 0xff) + i) % MT7996_RRO_MSDU_PG_HASH_SIZE;
++		pa >>= 8;
++		i += 13;
++	}
++
++	return sum % MT7996_RRO_MSDU_PG_HASH_SIZE;
++}
++
++static struct mt7996_msdu_pg_addr *
++mt7996_rro_msdu_pg_search(struct mt7996_dev *dev, dma_addr_t pa)
++{
++	struct mt7996_msdu_pg_addr *pg_addr, *tmp;
++	u32 hash_idx =  mt7996_rro_msdu_pg_hash(pa);
++	struct list_head *head;
++	u8 found = 0;
++
++	spin_lock(&dev->wed_rro.lock);
++	head = &dev->wed_rro.pg_hash_head[hash_idx];
++	list_for_each_entry_safe(pg_addr, tmp, head, list) {
++		if (pg_addr->dma_addr == pa) {
++			list_del(&pg_addr->list);
++			found = 1;
++			break;
++		}
++	}
++	spin_unlock(&dev->wed_rro.lock);
++
++	return (found == 1) ? pg_addr : NULL;
++}
++
++void mt7996_rro_msdu_pg_free(struct mt7996_dev *dev)
++{
++	struct mt7996_msdu_pg_addr *pg_addr, *tmp;
++	struct list_head *head;
++	u32 i;
++
++	local_bh_disable();
++	for (i = 0; i < MT7996_RRO_MSDU_PG_HASH_SIZE; i++) {
++		head = &dev->wed_rro.pg_hash_head[i];
++		list_for_each_entry_safe(pg_addr, tmp, head, list) {
++			list_del_init(&pg_addr->list);
++			dma_unmap_single(dev->mt76.dma_dev, pg_addr->dma_addr,
++					 SKB_WITH_OVERHEAD(pg_addr->q->buf_size),
++					 DMA_FROM_DEVICE);
++			if (pg_addr->buf) {
++				skb_free_frag(pg_addr->buf);
++				pg_addr->buf = NULL;
++			}
++			kfree(pg_addr);
++		}
++	}
++	local_bh_enable();
++
++	mt7996_free_pg_addr(dev);
++
++	mt76_for_each_q_rx(&dev->mt76, i) {
++		struct mt76_queue *q = &dev->mt76.q_rx[i];
++		struct page *page;
++
++		if (mt76_queue_is_wed_rro_msdu_pg(q)) {
++			if (!q->rx_page.va)
++				continue;
++
++			page = virt_to_page(q->rx_page.va);
++			__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
++			memset(&q->rx_page, 0, sizeof(q->rx_page));
++		}
++	}
++}
++
++void mt7996_rx_token_put(struct mt7996_dev *dev)
++{
++	struct mt76_queue *q;
++	struct page *page;
++	int i;
++
++	for (i = 0; i < dev->mt76.rx_token_size; i++) {
++		struct mt76_rxwi_cache *r;
++
++		r = mt76_rx_token_release(&dev->mt76, i);
++		if (!r || !r->ptr)
++			continue;
++
++		q = &dev->mt76.q_rx[r->qid];
++		dma_unmap_single(dev->mt76.dma_dev, r->dma_addr,
++				 SKB_WITH_OVERHEAD(q->buf_size),
++				 DMA_FROM_DEVICE);
++		skb_free_frag(r->ptr);
++		r->dma_addr = 0;
++		r->ptr = NULL;
++
++		mt76_put_rxwi(&dev->mt76, r);
++	}
++
++	mt76_for_each_q_rx(&dev->mt76, i) {
++		struct mt76_queue *q = &dev->mt76.q_rx[i];
++
++		if (mt76_queue_is_wed_rro_data(q)) {
++			if (!q->rx_page.va)
++				continue;
++
++			page = virt_to_page(q->rx_page.va);
++			__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
++			memset(&q->rx_page, 0, sizeof(q->rx_page));
++		}
++	}
++
++	mt76_free_pending_rxwi(&dev->mt76);
++}
++
++int mt7996_rro_fill_msdu_page(struct mt76_dev *mdev, struct mt76_queue *q,
++			 dma_addr_t p, void *data)
++{
++	struct mt7996_msdu_pg_addr *pg_addr;
++	struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
++	struct mt7996_msdu_pg *pg = data;
++	u32 hash_idx;
++
++	pg->owner = 1;
++	pg_addr = mt7996_get_pg_addr(dev);
++	if (!pg_addr)
++		return -ENOMEM;
++
++	pg_addr->buf = data;
++	pg_addr->dma_addr = p;
++	pg_addr->q = q;
++	hash_idx = mt7996_rro_msdu_pg_hash(pg_addr->dma_addr);
++
++	spin_lock(&dev->wed_rro.lock);
++	list_add_tail(&pg_addr->list,
++		      &dev->wed_rro.pg_hash_head[hash_idx]);
++	spin_unlock(&dev->wed_rro.lock);
++
++	return 0;
++}
++
++static struct mt7996_wed_rro_addr *
++mt7996_rro_get_addr_elem(struct mt7996_dev *dev, u16 seid, u16 sn)
++{
++	u32 idx;
++	void *addr;
++
++	if (seid == MT7996_RRO_MAX_SESSION) {
++		addr = dev->wed_rro.session.ptr;
++		idx = sn % MT7996_RRO_WINDOW_MAX_LEN;
++	} else {
++		addr = dev->wed_rro.addr_elem[seid/ MT7996_RRO_BA_BITMAP_SESSION_SIZE].ptr;
++		idx = (seid % MT7996_RRO_BA_BITMAP_SESSION_SIZE) * MT7996_RRO_WINDOW_MAX_LEN
++			+ (sn % MT7996_RRO_WINDOW_MAX_LEN);
++	}
++	return addr + idx * sizeof(struct mt7996_wed_rro_addr);
++}
++
++void mt7996_rro_rx_process(struct mt76_dev *mdev, void *data)
++{
++	struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
++	struct mt76_wed_rro_ind *cmd = (struct mt76_wed_rro_ind *)data;
++	struct mt76_rxwi_cache *r;
++	struct mt76_rx_status *status;
++	struct mt76_queue *q;
++	struct mt7996_wed_rro_addr *elem;
++	struct mt7996_msdu_pg_addr *pg_addr = NULL;
++	struct mt7996_msdu_pg *pg = NULL;
++	struct mt7996_rro_hif *rxd;
++	struct sk_buff *skb;
++	dma_addr_t msdu_pg_pa;
++	int len, data_len, i, j, sn;
++	void *buf;
++	u8 more, qid;
++	u32 info = 0;
++
++	for (i = 0; i < cmd->ind_cnt; i++) {
++		sn = (cmd->start_sn + i) & GENMASK(11, 0);
++		elem = mt7996_rro_get_addr_elem(dev, cmd->se_id, sn);
++		if (elem->signature != (sn / MT7996_RRO_WINDOW_MAX_LEN)) {
++			elem->signature = 0xff;
++			goto update_ack_sn;
++		}
++
++		msdu_pg_pa = elem->head_high;
++		msdu_pg_pa <<= 32;
++		msdu_pg_pa |= elem->head_low;
++
++		for (j = 0; j < elem->count; j++) {
++			if (pg_addr == NULL) {
++				pg_addr = mt7996_rro_msdu_pg_search(dev, msdu_pg_pa);
++
++				if (pg_addr == NULL) {
++					dev_info(mdev->dev, "pg_addr(%llx) search fail\n",
++						 msdu_pg_pa);
++					continue;
++				}
++
++				dma_unmap_single(mdev->dma_dev, pg_addr->dma_addr,
++						 SKB_WITH_OVERHEAD(pg_addr->q->buf_size),
++						 DMA_FROM_DEVICE);
++
++				pg = (struct mt7996_msdu_pg *) pg_addr->buf;
++			}
++
++			rxd = &pg->rxd[j % MT7996_MAX_HIF_RXD_IN_PG];
++			more = !rxd->ls;
++			len = rxd->sdl;
++
++			r = mt76_rx_token_release(mdev, rxd->rx_token_id);
++			if (!r)
++				goto next_page_chk;
++
++			qid = r->qid;
++			buf = r->ptr;
++			q = &mdev->q_rx[qid];
++			dma_unmap_single(mdev->dma_dev, r->dma_addr,
++					 SKB_WITH_OVERHEAD(q->buf_size),
++					 DMA_FROM_DEVICE);
++			r->dma_addr = 0;
++			r->ptr = NULL;
++			mt76_put_rxwi(mdev, r);
++			if (!buf)
++				goto next_page_chk;
++
++			if (q->rx_head)
++				data_len = q->buf_size;
++			else
++				data_len = SKB_WITH_OVERHEAD(q->buf_size);
++
++			if (data_len < len + q->buf_offset) {
++				dev_kfree_skb(q->rx_head);
++				skb_free_frag(buf);
++				q->rx_head = NULL;
++				goto next_page_chk;
++			}
++
++			if (q->rx_head) {
++				/* TDO: fragment error, skip handle */
++				//mt76_add_fragment(mdev, q, buf, len, more, info);
++				skb_free_frag(buf);
++				if (!more) {
++					dev_kfree_skb(q->rx_head);
++					q->rx_head = NULL;
++				}
++				goto next_page_chk;
++			}
++
++			if (!more && !mt7996_rx_check(mdev, buf, len))
++				goto next_page_chk;
++
++			skb = build_skb(buf, q->buf_size);
++			if (!skb)
++				goto next_page_chk;
++
++			skb_reserve(skb, q->buf_offset);
++			__skb_put(skb, len);
++
++			if (cmd->ind_reason == 1 || cmd->ind_reason == 2) {
++				dev_kfree_skb(skb);
++				goto next_page_chk;
++			}
++
++			if (more) {
++				q->rx_head = skb;
++				goto next_page_chk;
++			}
++
++			status = (struct mt76_rx_status *)skb->cb;
++			if (cmd->se_id != MT7996_RRO_MAX_SESSION)
++				status->aggr = true;
++
++			mt7996_queue_rx_skb(mdev, qid, skb, &info);
++
++next_page_chk:
++			if ((j + 1) % MT7996_MAX_HIF_RXD_IN_PG == 0) {
++				msdu_pg_pa = pg->next_pg_h;
++				msdu_pg_pa <<= 32;
++				msdu_pg_pa |= pg->next_pg_l;
++				mt7996_put_pg_addr(dev, pg_addr);
++				pg_addr = NULL;
++			}
++		}
++update_ack_sn:
++		if ((i + 1) % 4 == 0)
++			mt76_wr(dev, MT_RRO_ACK_SN_CTRL,
++				FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK, cmd->se_id) |
++				FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK, sn));
++		if (pg_addr) {
++			mt7996_put_pg_addr(dev, pg_addr);
++			pg_addr = NULL;
++		}
++	}
++
++	/* update ack_sn for remaining addr_elem */
++	if (i % 4 != 0)
++		mt76_wr(dev, MT_RRO_ACK_SN_CTRL,
++			FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK, cmd->se_id) |
++			FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK, sn));
++}
++
+ void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy)
+ {
+ 	struct mt7996_dev *dev = phy->dev;
+@@ -2040,6 +2421,9 @@ void mt7996_mac_reset_work(struct work_struct *work)
+ 	dev_info(dev->mt76.dev,"%s L1 SER dma start done.",
+ 		 wiphy_name(dev->mt76.hw->wiphy));
+ 
++	if (is_mt7992(&dev->mt76) && dev->has_rro)
++		mt76_wr(dev, MT_RRO_3_0_EMU_CONF, MT_RRO_3_0_EMU_CONF_EN_MASK);
++
+ 	if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ 		u32 wed_irq_mask = MT_INT_RRO_RX_DONE | MT_INT_TX_DONE_BAND2 |
+ 				   dev->mt76.mmio.irqmask;
+@@ -2049,10 +2433,6 @@ void mt7996_mac_reset_work(struct work_struct *work)
+ 
+ 		mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
+ 
+-		if (is_mt7992(&dev->mt76) && dev->has_rro)
+-			mt76_wr(dev, MT_RRO_3_0_EMU_CONF,
+-				MT_RRO_3_0_EMU_CONF_EN_MASK);
+-
+ 		mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask,
+ 					    true);
+ 
+diff --git a/mt7996/mmio.c b/mt7996/mmio.c
+index 58db5204..4d9cb9ff 100644
+--- a/mt7996/mmio.c
++++ b/mt7996/mmio.c
+@@ -654,6 +654,8 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
+ 		.rx_skb = mt7996_queue_rx_skb,
+ 		.rx_check = mt7996_rx_check,
+ 		.rx_poll_complete = mt7996_rx_poll_complete,
++		.rx_rro_ind_process = mt7996_rro_rx_process,
++		.rx_rro_fill_msdu_pg = mt7996_rro_fill_msdu_page,
+ 		.sta_add = mt7996_mac_sta_add,
+ 		.sta_assoc = mt7996_mac_sta_assoc,
+ 		.sta_remove = mt7996_mac_sta_remove,
+diff --git a/mt7996/mt7996.h b/mt7996/mt7996.h
+index 65a88e58..3aedaf5a 100644
+--- a/mt7996/mt7996.h
++++ b/mt7996/mt7996.h
+@@ -102,6 +102,7 @@
+ 
+ #define MT7996_BUILD_TIME_LEN		24
+ 
++#define MT7996_MAX_HIF_RXD_IN_PG	5
+ #define MT7996_RRO_MSDU_PG_HASH_SIZE	127
+ #define MT7996_RRO_MAX_SESSION		1024
+ #define MT7996_RRO_WINDOW_MAX_LEN	1024
+@@ -532,6 +533,33 @@ int mt7996_mcu_set_muru_qos_cfg(struct mt7996_dev *dev, u16 wlan_idx, u8 dir,
+ 				u8 scs_id, u8 req_type, u8 *qos_ie, u8 qos_ie_len);
+ #endif
+ 
++struct mt7996_rro_hif {
++	u32 rx_blk_base_l;
++	u32 rx_blk_base_h: 4;
++	u32 eth_hdr_ofst : 7;
++	u32 rsv          : 1;
++	u32 ring_no      : 2;
++	u32 dst_sel      : 2;
++	u32 sdl          :14;
++	u32 ls           : 1;
++	u32 rsv2         : 1;
++	u32 pn_31_0;
++	u32 pn_47_32     :16;
++	u32 cs_status    : 4;
++	u32 cs_type      : 4;
++	u32 c            : 1;
++	u32 f            : 1;
++	u32 un           : 1;
++	u32 rsv3         : 1;
++	u32 is_fc_data   : 1;
++	u32 uc           : 1;
++	u32 mc           : 1;
++	u32 bc           : 1;
++	u16 rx_token_id;
++	u16 rsv4;
++	u32 rsv5;
++};
++
+ struct mt7996_rro_ba_session {
+ 	u32 ack_sn         :12;
+ 	u32 win_sz         :3;
+@@ -547,6 +575,26 @@ struct mt7996_rro_ba_session {
+ 	u32 last_in_rxtime :12;
+ };
+ 
++struct mt7996_rro_ba_session_elem {
++	struct list_head poll_list;
++	u16 session_id;
++};
++
++struct mt7996_msdu_pg {
++	struct mt7996_rro_hif rxd[MT7996_MAX_HIF_RXD_IN_PG];
++	u32 next_pg_l;
++	u32 next_pg_h	: 4;
++	u32 rsv		:27;
++	u32 owner	: 1;
++};
++
++struct mt7996_msdu_pg_addr {
++	struct list_head list;
++	dma_addr_t dma_addr;
++	struct mt76_queue *q;
++	void *buf;
++};
++
+ struct mt7996_chanctx {
+ 	struct cfg80211_chan_def chandef;
+ 	struct mt7996_phy *phy;
+@@ -1257,6 +1305,11 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ void mt7996_tx_token_put(struct mt7996_dev *dev);
+ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ 			 struct sk_buff *skb, u32 *info);
++void mt7996_rx_token_put(struct mt7996_dev *dev);
++void mt7996_rro_msdu_pg_free(struct mt7996_dev *dev);
++void mt7996_rro_rx_process(struct mt76_dev *mdev, void *data);
++int mt7996_rro_fill_msdu_page(struct mt76_dev *mdev, struct mt76_queue *q,
++			      dma_addr_t p, void *data);
+ bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len);
+ void mt7996_stats_work(struct work_struct *work);
+ void mt7996_scan_work(struct work_struct *work);
+diff --git a/mt7996/pci.c b/mt7996/pci.c
+index 382b6a89..a010680f 100644
+--- a/mt7996/pci.c
++++ b/mt7996/pci.c
+@@ -13,6 +13,9 @@
+ static bool hif2_enable = false;
+ module_param(hif2_enable, bool, 0644);
+ 
++static bool rro_enable = false;
++module_param(rro_enable, bool, 0644);
++
+ static LIST_HEAD(hif_list);
+ static DEFINE_SPINLOCK(hif_lock);
+ static u32 hif_idx;
+@@ -140,6 +143,7 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
+ 	if (IS_ERR(dev))
+ 		return PTR_ERR(dev);
+ 
++	dev->has_rro = rro_enable;
+ 	mdev = &dev->mt76;
+ 	mt7996_wfsys_reset(dev);
+ 	hif2 = mt7996_pci_init_hif2(pdev);
+diff --git a/mt7996/regs.h b/mt7996/regs.h
+index e1893517..0ea055ba 100644
+--- a/mt7996/regs.h
++++ b/mt7996/regs.h
+@@ -561,6 +561,7 @@ enum offs_rev {
+ #define MT_INT_RRO_RX_DONE			(MT_INT_RX(MT_RXQ_RRO_BAND0) |		\
+ 						 MT_INT_RX(MT_RXQ_RRO_BAND1) |		\
+ 						 MT_INT_RX(MT_RXQ_RRO_BAND2) |		\
++						 MT_INT_RX(MT_RXQ_RRO_IND) |		\
+ 						 MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND0) |	\
+ 						 MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND1) |	\
+ 						 MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND2))
+-- 
+2.18.0
+