[][Fix build error by remove wed patch]

[Description]
Fix build error by remove wed patch]

[Release-log]
N/A

Change-Id: Iba9e06435680a45e1bb1b6037e419b617a6bad83
Reviewed-on: https://gerrit.mediatek.inc/c/openwrt/feeds/mtk_openwrt_feeds/+/5845110
diff --git a/autobuild_mac80211_release/package/kernel/mt76/patches/3000-mt76-remove-WED-support-patch-for-build-err.patch b/autobuild_mac80211_release/package/kernel/mt76/patches/3000-mt76-remove-WED-support-patch-for-build-err.patch
new file mode 100644
index 0000000..dd1e092
--- /dev/null
+++ b/autobuild_mac80211_release/package/kernel/mt76/patches/3000-mt76-remove-WED-support-patch-for-build-err.patch
@@ -0,0 +1,1117 @@
+From f00aa90bc4a1b77e1b0c884b24a71c1b49a94ee6 Mon Sep 17 00:00:00 2001
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Tue, 12 Apr 2022 15:58:28 +0800
+Subject: [PATCH] mt76:remove WED support patch for build err
+
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+---
+ dma.c           | 160 ++++++++++--------------------------------------
+ mac80211.c      |   4 +-
+ mmio.c          |   9 +--
+ mt76.h          |  25 ++------
+ mt7603/dma.c    |   8 +--
+ mt7615/dma.c    |   6 +-
+ mt76x02_mmio.c  |   4 +-
+ mt7915/dma.c    |  43 ++-----------
+ mt7915/mac.c    | 129 ++++++--------------------------------
+ mt7915/mac.h    |   2 -
+ mt7915/main.c   |  36 -----------
+ mt7915/mcu.c    |   3 -
+ mt7915/mmio.c   |  29 +++------
+ mt7915/mt7915.h |   2 -
+ mt7915/pci.c    |  96 +++--------------------------
+ mt7915/regs.h   |  17 +----
+ mt7921/dma.c    |   2 +-
+ tx.c            |  16 +----
+ 18 files changed, 94 insertions(+), 497 deletions(-)
+
+diff --git a/dma.c b/dma.c
+index 30de8be4..83787fc0 100644
+--- a/dma.c
++++ b/dma.c
+@@ -7,36 +7,9 @@
+ #include "mt76.h"
+ #include "dma.h"
+ 
+-#if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
+-
+-#define Q_READ(_dev, _q, _field) ({					\
+-	u32 _offset = offsetof(struct mt76_queue_regs, _field);		\
+-	u32 _val;							\
+-	if ((_q)->flags & MT_QFLAG_WED)					\
+-		_val = mtk_wed_device_reg_read(&(_dev)->mmio.wed,	\
+-					       ((_q)->wed_regs +	\
+-					        _offset));		\
+-	else								\
+-		_val = readl(&(_q)->regs->_field);			\
+-	_val;								\
+-})
+-
+-#define Q_WRITE(_dev, _q, _field, _val)	do {				\
+-	u32 _offset = offsetof(struct mt76_queue_regs, _field);		\
+-	if ((_q)->flags & MT_QFLAG_WED)					\
+-		mtk_wed_device_reg_write(&(_dev)->mmio.wed,		\
+-					 ((_q)->wed_regs + _offset),	\
+-					 _val);				\
+-	else								\
+-		writel(_val, &(_q)->regs->_field);			\
+-} while (0)
+-
+-#else
+-
+-#define Q_READ(_dev, _q, _field)	readl(&(_q)->regs->_field)
+-#define Q_WRITE(_dev, _q, _field, _val)	writel(_val, &(_q)->regs->_field)
++#define Q_READ(_dev, _q, _field)		readl(&(_q)->regs->_field)
++#define Q_WRITE(_dev, _q, _field, _val)		writel(_val, &(_q)->regs->_field)
+ 
+-#endif
+ 
+ static struct mt76_txwi_cache *
+ mt76_alloc_txwi(struct mt76_dev *dev)
+@@ -138,6 +111,36 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
+ 	mt76_dma_sync_idx(dev, q);
+ }
+ 
++static int
++mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
++		     int idx, int n_desc, int bufsize,
++		     u32 ring_base)
++{
++	int size;
++
++	spin_lock_init(&q->lock);
++	spin_lock_init(&q->cleanup_lock);
++
++	q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
++	q->ndesc = n_desc;
++	q->buf_size = bufsize;
++	q->hw_idx = idx;
++
++	size = q->ndesc * sizeof(struct mt76_desc);
++	q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
++	if (!q->desc)
++		return -ENOMEM;
++
++	size = q->ndesc * sizeof(*q->entry);
++	q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
++	if (!q->entry)
++		return -ENOMEM;
++
++	mt76_dma_queue_reset(dev, q);
++
++	return 0;
++}
++
+ static int
+ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ 		 struct mt76_queue_buf *buf, int nbufs, u32 info,
+@@ -483,85 +486,6 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
+ 	return frames;
+ }
+ 
+-static int
+-mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
+-{
+-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+-	struct mtk_wed_device *wed = &dev->mmio.wed;
+-	int ret, type, ring;
+-	u8 flags = q->flags;
+-
+-	if (!mtk_wed_device_active(wed))
+-		q->flags &= ~MT_QFLAG_WED;
+-
+-	if (!(q->flags & MT_QFLAG_WED))
+-		return 0;
+-
+-	type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
+-	ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
+-
+-	switch (type) {
+-	case MT76_WED_Q_TX:
+-		ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs);
+-		if (!ret)
+-			q->wed_regs = wed->tx_ring[ring].reg_base;
+-		break;
+-	case MT76_WED_Q_TXFREE:
+-		/* WED txfree queue needs ring to be initialized before setup */
+-		q->flags = 0;
+-		mt76_dma_queue_reset(dev, q);
+-		mt76_dma_rx_fill(dev, q);
+-		q->flags = flags;
+-
+-		ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
+-		if (!ret)
+-			q->wed_regs = wed->txfree_ring.reg_base;
+-		break;
+-	default:
+-		ret = -EINVAL;
+-	}
+-
+-	return ret;
+-#else
+-	return 0;
+-#endif
+-}
+-
+-static int
+-mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
+-		     int idx, int n_desc, int bufsize,
+-		     u32 ring_base)
+-{
+-	int ret, size;
+-
+-	spin_lock_init(&q->lock);
+-	spin_lock_init(&q->cleanup_lock);
+-
+-	q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
+-	q->ndesc = n_desc;
+-	q->buf_size = bufsize;
+-	q->hw_idx = idx;
+-
+-	size = q->ndesc * sizeof(struct mt76_desc);
+-	q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
+-	if (!q->desc)
+-		return -ENOMEM;
+-
+-	size = q->ndesc * sizeof(*q->entry);
+-	q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
+-	if (!q->entry)
+-		return -ENOMEM;
+-
+-	ret = mt76_dma_wed_setup(dev, q);
+-	if (ret)
+-		return ret;
+-
+-	if (q->flags != MT_WED_Q_TXFREE)
+-		mt76_dma_queue_reset(dev, q);
+-
+-	return 0;
+-}
+-
+ static void
+ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
+ {
+@@ -643,29 +567,14 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
+ static int
+ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
+ {
+-	int len, data_len, done = 0, dma_idx;
++	int len, data_len, done = 0;
+ 	struct sk_buff *skb;
+ 	unsigned char *data;
+-	bool check_ddone = false;
+ 	bool more;
+ 
+-	if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
+-	    q->flags == MT_WED_Q_TXFREE) {
+-		dma_idx = Q_READ(dev, q, dma_idx);
+-		check_ddone = true;
+-	}
+-
+ 	while (done < budget) {
+ 		u32 info;
+ 
+-		if (check_ddone) {
+-			if (q->tail == dma_idx)
+-				dma_idx = Q_READ(dev, q, dma_idx);
+-
+-			if (q->tail == dma_idx)
+-				break;
+-		}
+-
+ 		data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
+ 		if (!data)
+ 			break;
+@@ -806,8 +715,5 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
+ 	}
+ 
+ 	mt76_free_pending_txwi(dev);
+-
+-	if (mtk_wed_device_active(&dev->mmio.wed))
+-		mtk_wed_device_detach(&dev->mmio.wed);
+ }
+ EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
+diff --git a/mac80211.c b/mac80211.c
+index 21eaf994..ceab8412 100644
+--- a/mac80211.c
++++ b/mac80211.c
+@@ -1596,7 +1596,7 @@ EXPORT_SYMBOL_GPL(mt76_get_antenna);
+ 
+ struct mt76_queue *
+ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
+-		int ring_base, u32 flags)
++		int ring_base)
+ {
+ 	struct mt76_queue *hwq;
+ 	int err;
+@@ -1605,8 +1605,6 @@ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
+ 	if (!hwq)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	hwq->flags = flags;
+-
+ 	err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
+ 	if (err < 0)
+ 		return ERR_PTR(err);
+diff --git a/mmio.c b/mmio.c
+index 86e3d2ac..26353b6b 100644
+--- a/mmio.c
++++ b/mmio.c
+@@ -73,13 +73,8 @@ void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
+ 	spin_lock_irqsave(&dev->mmio.irq_lock, flags);
+ 	dev->mmio.irqmask &= ~clear;
+ 	dev->mmio.irqmask |= set;
+-	if (addr) {
+-		if (mtk_wed_device_active(&dev->mmio.wed))
+-			mtk_wed_device_irq_set_mask(&dev->mmio.wed,
+-						    dev->mmio.irqmask);
+-		else
+-			mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
+-	}
++	if (addr)
++		mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
+ 	spin_unlock_irqrestore(&dev->mmio.irq_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
+diff --git a/mt76.h b/mt76.h
+index 768880f0..31671387 100644
+--- a/mt76.h
++++ b/mt76.h
+@@ -13,7 +13,6 @@
+ #include <linux/leds.h>
+ #include <linux/usb.h>
+ #include <linux/average.h>
+-#include <linux/soc/mediatek/mtk_wed.h>
+ #include <net/mac80211.h>
+ #include "util.h"
+ #include "testmode.h"
+@@ -27,16 +26,6 @@
+ 
+ #define MT76_TOKEN_FREE_THR	64
+ 
+-#define MT_QFLAG_WED_RING	GENMASK(1, 0)
+-#define MT_QFLAG_WED_TYPE	GENMASK(3, 2)
+-#define MT_QFLAG_WED		BIT(4)
+-
+-#define __MT_WED_Q(_type, _n)	(MT_QFLAG_WED | \
+-				 FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
+-				 FIELD_PREP(MT_QFLAG_WED_RING, _n))
+-#define MT_WED_Q_TX(_n)		__MT_WED_Q(MT76_WED_Q_TX, _n)
+-#define MT_WED_Q_TXFREE		__MT_WED_Q(MT76_WED_Q_TXFREE, 0)
+-
+ struct mt76_dev;
+ struct mt76_phy;
+ struct mt76_wcid;
+@@ -186,9 +175,6 @@ struct mt76_queue {
+ 	u8 buf_offset;
+ 	u8 hw_idx;
+ 	u8 qid;
+-	u8 flags;
+-
+-	u32 wed_regs;
+ 
+ 	dma_addr_t desc_dma;
+ 	struct sk_buff *rx_head;
+@@ -556,8 +542,6 @@ struct mt76_mmio {
+ 	void __iomem *regs;
+ 	spinlock_t irq_lock;
+ 	u32 irqmask;
+-
+-	struct mtk_wed_device wed;
+ };
+ 
+ struct mt76_rx_status {
+@@ -785,7 +769,6 @@ struct mt76_dev {
+ 
+ 	spinlock_t token_lock;
+ 	struct idr token;
+-	u16 wed_token_count;
+ 	u16 token_count;
+ 	u16 token_size;
+ 
+@@ -1011,14 +994,14 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len);
+ 
+ struct mt76_queue *
+ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
+-		int ring_base, u32 flags);
++		int ring_base);
+ u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx);
+ static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
+-				     int n_desc, int ring_base, u32 flags)
++				     int n_desc, int ring_base)
+ {
+ 	struct mt76_queue *q;
+ 
+-	q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, flags);
++	q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base);
+ 	if (IS_ERR(q))
+ 		return PTR_ERR(q);
+ 
+@@ -1033,7 +1016,7 @@ static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx,
+ {
+ 	struct mt76_queue *q;
+ 
+-	q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, 0);
++	q = mt76_init_queue(dev, qid, idx, n_desc, ring_base);
+ 	if (IS_ERR(q))
+ 		return PTR_ERR(q);
+ 
+diff --git a/mt7603/dma.c b/mt7603/dma.c
+index 590cff9d..37b092e3 100644
+--- a/mt7603/dma.c
++++ b/mt7603/dma.c
+@@ -173,13 +173,13 @@ int mt7603_dma_init(struct mt7603_dev *dev)
+ 
+ 	for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
+ 		ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
+-					 MT7603_TX_RING_SIZE, MT_TX_RING_BASE, 0);
++					 MT7603_TX_RING_SIZE, MT_TX_RING_BASE);
+ 		if (ret)
+ 			return ret;
+ 	}
+ 
+ 	ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
+-				 MT7603_PSD_RING_SIZE, MT_TX_RING_BASE, 0);
++				 MT7603_PSD_RING_SIZE, MT_TX_RING_BASE);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -189,12 +189,12 @@ int mt7603_dma_init(struct mt7603_dev *dev)
+ 		return ret;
+ 
+ 	ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_BEACON, MT_TX_HW_QUEUE_BCN,
+-				 MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0);
++				 MT_MCU_RING_SIZE, MT_TX_RING_BASE);
+ 	if (ret)
+ 		return ret;
+ 
+ 	ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_CAB, MT_TX_HW_QUEUE_BMC,
+-				 MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0);
++				 MT_MCU_RING_SIZE, MT_TX_RING_BASE);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/mt7615/dma.c b/mt7615/dma.c
+index 3a79a2d4..00aefea1 100644
+--- a/mt7615/dma.c
++++ b/mt7615/dma.c
+@@ -26,14 +26,14 @@ mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
+ 	for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
+ 		ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
+ 					 MT7615_TX_RING_SIZE / 2,
+-					 MT_TX_RING_BASE, 0);
++					 MT_TX_RING_BASE);
+ 		if (ret)
+ 			return ret;
+ 	}
+ 
+ 	ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT7622_TXQ_MGMT,
+ 				 MT7615_TX_MGMT_RING_SIZE,
+-				 MT_TX_RING_BASE, 0);
++				 MT_TX_RING_BASE);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -55,7 +55,7 @@ mt7615_init_tx_queues(struct mt7615_dev *dev)
+ 		return mt7622_init_tx_queues_multi(dev);
+ 
+ 	ret = mt76_init_tx_queue(&dev->mphy, 0, 0, MT7615_TX_RING_SIZE,
+-				 MT_TX_RING_BASE, 0);
++				 MT_TX_RING_BASE);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/mt76x02_mmio.c b/mt76x02_mmio.c
+index 0fa3c7c3..8bcd8afa 100644
+--- a/mt76x02_mmio.c
++++ b/mt76x02_mmio.c
+@@ -191,13 +191,13 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
+ 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ 		ret = mt76_init_tx_queue(&dev->mphy, i, mt76_ac_to_hwq(i),
+ 					 MT76x02_TX_RING_SIZE,
+-					 MT_TX_RING_BASE, 0);
++					 MT_TX_RING_BASE);
+ 		if (ret)
+ 			return ret;
+ 	}
+ 
+ 	ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
+-				 MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE, 0);
++				 MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/mt7915/dma.c b/mt7915/dma.c
+index 219b440f..60b4368a 100644
+--- a/mt7915/dma.c
++++ b/mt7915/dma.c
+@@ -8,16 +8,9 @@
+ static int
+ mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base)
+ {
+-	struct mt7915_dev *dev = phy->dev;
+ 	int i, err;
+ 
+-	if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
+-		ring_base = MT_WED_TX_RING_BASE;
+-		idx -= MT_TXQ_ID(0);
+-	}
+-
+-	err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, ring_base,
+-				 MT_WED_Q_TX(idx));
++	err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, ring_base);
+ 	if (err < 0)
+ 		return err;
+ 
+@@ -326,14 +319,6 @@ static int mt7915_dma_enable(struct mt7915_dev *dev)
+ 	if (dev->dbdc_support || dev->phy.band_idx)
+ 		irq_mask |= MT_INT_BAND1_RX_DONE;
+ 
+-	if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+-		u32 wed_irq_mask = irq_mask;
+-
+-		wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
+-		mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask);
+-		mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask);
+-	}
+-
+ 	mt7915_irq_enable(dev, irq_mask);
+ 
+ 	return 0;
+@@ -342,7 +327,6 @@ static int mt7915_dma_enable(struct mt7915_dev *dev)
+ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
+ {
+ 	struct mt76_dev *mdev = &dev->mt76;
+-	u32 wa_rx_base, wa_rx_idx;
+ 	u32 hif1_ofs = 0;
+ 	int ret;
+ 
+@@ -355,17 +339,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
+ 
+ 	mt7915_dma_disable(dev, true);
+ 
+-	if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+-		mt76_set(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED);
+-
+-		mt76_wr(dev, MT_WFDMA_WED_RING_CONTROL,
+-			FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) |
+-			FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) |
+-			FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1, 1));
+-	} else {
+-		mt76_clear(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED);
+-	}
+-
+ 	/* init tx queue */
+ 	ret = mt7915_init_tx_queues(&dev->phy,
+ 				    MT_TXQ_ID(dev->phy.band_idx),
+@@ -417,17 +390,11 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
+ 		return ret;
+ 
+ 	/* event from WA */
+-	if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+-		wa_rx_base = MT_WED_RX_RING_BASE;
+-		wa_rx_idx = MT7915_RXQ_MCU_WA;
+-		dev->mt76.q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE;
+-	} else {
+-		wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MCU_WA);
+-		wa_rx_idx = MT_RXQ_ID(MT_RXQ_MCU_WA);
+-	}
+ 	ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
+-			       wa_rx_idx, MT7915_RX_MCU_RING_SIZE,
+-			       MT_RX_BUF_SIZE, wa_rx_base);
++			       MT_RXQ_ID(MT_RXQ_MCU_WA),
++			       MT7915_RX_MCU_RING_SIZE,
++			       MT_RX_BUF_SIZE,
++			       MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/mt7915/mac.c b/mt7915/mac.c
+index 8df38a23..8b0b90da 100644
+--- a/mt7915/mac.c
++++ b/mt7915/mac.c
+@@ -1379,29 +1379,6 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ 	return 0;
+ }
+ 
+-u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
+-{
+-	struct mt7915_txp *txp = ptr + MT_TXD_SIZE;
+-	__le32 *txwi = ptr;
+-	u32 val;
+-
+-	memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
+-
+-	val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
+-	      FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
+-	txwi[0] = cpu_to_le32(val);
+-
+-	val = MT_TXD1_LONG_FORMAT |
+-	      FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
+-	txwi[1] = cpu_to_le32(val);
+-
+-	txp->token = cpu_to_le16(token_id);
+-	txp->nbuf = 1;
+-	txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
+-
+-	return MT_TXD_SIZE + sizeof(*txp);
+-}
+-
+ static void
+ mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
+ {
+@@ -1435,7 +1412,7 @@ mt7915_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+ 
+ 	txp = mt7915_txwi_to_txp(dev, t);
+ 	for (i = 0; i < txp->nbuf; i++)
+-		dma_unmap_single(dev->dma_dev, le32_to_cpu(txp->buf[i]),
++		dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
+ 				 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
+ }
+ 
+@@ -1444,7 +1421,6 @@ mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
+ 		 struct ieee80211_sta *sta, struct list_head *free_list)
+ {
+ 	struct mt76_dev *mdev = &dev->mt76;
+-	struct mt7915_sta *msta;
+ 	struct mt76_wcid *wcid;
+ 	__le32 *txwi;
+ 	u16 wcid_idx;
+@@ -1457,24 +1433,13 @@ mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
+ 	if (sta) {
+ 		wcid = (struct mt76_wcid *)sta->drv_priv;
+ 		wcid_idx = wcid->idx;
++
++		if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
++			mt7915_tx_check_aggr(sta, txwi);
+ 	} else {
+ 		wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
+-		wcid = rcu_dereference(dev->mt76.wcid[wcid_idx]);
+-
+-		if (wcid && wcid->sta) {
+-			msta = container_of(wcid, struct mt7915_sta, wcid);
+-			sta = container_of((void *)msta, struct ieee80211_sta,
+-					  drv_priv);
+-			spin_lock_bh(&dev->sta_poll_lock);
+-			if (list_empty(&msta->poll_list))
+-				list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+-			spin_unlock_bh(&dev->sta_poll_lock);
+-		}
+ 	}
+ 
+-	if (sta && likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+-		mt7915_tx_check_aggr(sta, txwi);
+-
+ 	__mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
+ 
+ out:
+@@ -1482,55 +1447,29 @@ out:
+ 	mt76_put_txwi(mdev, t);
+ }
+ 
+-static void
+-mt7915_mac_tx_free_prepare(struct mt7915_dev *dev)
+-{
+-	struct mt76_dev *mdev = &dev->mt76;
+-	struct mt76_phy *mphy_ext = mdev->phy2;
+-
+-	/* clean DMA queues and unmap buffers first */
+-	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
+-	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
+-	if (mphy_ext) {
+-		mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false);
+-		mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false);
+-	}
+-}
+-
+-static void
+-mt7915_mac_tx_free_done(struct mt7915_dev *dev,
+-			struct list_head *free_list, bool wake)
+-{
+-	struct sk_buff *skb, *tmp;
+-
+-	mt7915_mac_sta_poll(dev);
+-
+-	if (wake)
+-		mt76_set_tx_blocked(&dev->mt76, false);
+-
+-	mt76_worker_schedule(&dev->mt76.tx_worker);
+-
+-	list_for_each_entry_safe(skb, tmp, free_list, list) {
+-		skb_list_del_init(skb);
+-		napi_consume_skb(skb, 1);
+-	}
+-}
+-
+ static void
+ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
+ {
+ 	struct mt7915_tx_free *free = (struct mt7915_tx_free *)data;
+ 	struct mt76_dev *mdev = &dev->mt76;
++	struct mt76_phy *mphy_ext = mdev->phy2;
+ 	struct mt76_txwi_cache *txwi;
+ 	struct ieee80211_sta *sta = NULL;
+ 	LIST_HEAD(free_list);
++	struct sk_buff *skb, *tmp;
+ 	void *end = data + len;
+ 	bool v3, wake = false;
+ 	u16 total, count = 0;
+ 	u32 txd = le32_to_cpu(free->txd);
+ 	__le32 *cur_info;
+ 
+-	mt7915_mac_tx_free_prepare(dev);
++	/* clean DMA queues and unmap buffers first */
++	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
++	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
++	if (mphy_ext) {
++		mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false);
++		mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false);
++	}
+ 
+ 	total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
+ 	v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4);
+@@ -1587,38 +1526,17 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
+ 		}
+ 	}
+ 
+-	mt7915_mac_tx_free_done(dev, &free_list, wake);
+-}
+-
+-static void
+-mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len)
+-{
+-	struct mt7915_tx_free *free = (struct mt7915_tx_free *)data;
+-	struct mt76_dev *mdev = &dev->mt76;
+-	__le16 *info = (__le16 *)free->info;
+-	void *end = data + len;
+-	LIST_HEAD(free_list);
+-	bool wake = false;
+-	u8 i, count;
+-
+-	mt7915_mac_tx_free_prepare(dev);
+-
+-	count = FIELD_GET(MT_TX_FREE_MSDU_CNT_V0, le16_to_cpu(free->ctrl));
+-	if (WARN_ON_ONCE((void *)&info[count] > end))
+-		return;
++	mt7915_mac_sta_poll(dev);
+ 
+-	for (i = 0; i < count; i++) {
+-		struct mt76_txwi_cache *txwi;
+-		u16 msdu = le16_to_cpu(info[i]);
++	if (wake)
++		mt76_set_tx_blocked(&dev->mt76, false);
+ 
+-		txwi = mt76_token_release(mdev, msdu, &wake);
+-		if (!txwi)
+-			continue;
++	mt76_worker_schedule(&dev->mt76.tx_worker);
+ 
+-		mt7915_txwi_free(dev, txwi, NULL, &free_list);
++	list_for_each_entry_safe(skb, tmp, &free_list, list) {
++		skb_list_del_init(skb);
++		napi_consume_skb(skb, 1);
+ 	}
+-
+-	mt7915_mac_tx_free_done(dev, &free_list, wake);
+ }
+ 
+ static bool
+@@ -1798,9 +1716,6 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
+ 	case PKT_TYPE_TXRX_NOTIFY:
+ 		mt7915_mac_tx_free(dev, data, len);
+ 		return false;
+-	case PKT_TYPE_TXRX_NOTIFY_V0:
+-		mt7915_mac_tx_free_v0(dev, data, len);
+-		return false;
+ 	case PKT_TYPE_TXS:
+ 		for (rxd += 2; rxd + 8 <= end; rxd += 8)
+ 		    mt7915_mac_add_txs(dev, rxd);
+@@ -1828,10 +1743,6 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ 		mt7915_mac_tx_free(dev, skb->data, skb->len);
+ 		napi_consume_skb(skb, 1);
+ 		break;
+-	case PKT_TYPE_TXRX_NOTIFY_V0:
+-		mt7915_mac_tx_free_v0(dev, skb->data, skb->len);
+-		napi_consume_skb(skb, 1);
+-		break;
+ 	case PKT_TYPE_RX_EVENT:
+ 		mt7915_mcu_rx_event(dev, skb);
+ 		break;
+diff --git a/mt7915/mac.h b/mt7915/mac.h
+index c5fd1a61..5add1dd3 100644
+--- a/mt7915/mac.h
++++ b/mt7915/mac.h
+@@ -24,7 +24,6 @@ enum rx_pkt_type {
+ 	PKT_TYPE_TXRX_NOTIFY,
+ 	PKT_TYPE_RX_EVENT,
+ 	PKT_TYPE_RX_FW_MONITOR = 0x0c,
+-	PKT_TYPE_TXRX_NOTIFY_V0 = 0x18,
+ };
+ 
+ /* RXD DW1 */
+@@ -312,7 +311,6 @@ struct mt7915_tx_free {
+ 
+ #define MT_TX_FREE_VER			GENMASK(18, 16)
+ #define MT_TX_FREE_MSDU_CNT		GENMASK(9, 0)
+-#define MT_TX_FREE_MSDU_CNT_V0	GENMASK(6, 0)
+ #define MT_TX_FREE_WLAN_ID		GENMASK(23, 14)
+ #define MT_TX_FREE_LATENCY		GENMASK(12, 0)
+ /* 0: success, others: dropped */
+diff --git a/mt7915/main.c b/mt7915/main.c
+index 23f89379..09659dc7 100644
+--- a/mt7915/main.c
++++ b/mt7915/main.c
+@@ -1414,39 +1414,6 @@ out:
+ 	return ret;
+ }
+ 
+-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+-static int
+-mt7915_net_fill_forward_path(struct ieee80211_hw *hw,
+-			     struct ieee80211_vif *vif,
+-			     struct ieee80211_sta *sta,
+-			     struct net_device_path_ctx *ctx,
+-			     struct net_device_path *path)
+-{
+-	struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+-	struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+-	struct mt7915_dev *dev = mt7915_hw_dev(hw);
+-	struct mt7915_phy *phy = mt7915_hw_phy(hw);
+-	struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+-
+-	if (!mtk_wed_device_active(wed))
+-		return -ENODEV;
+-
+-	if (msta->wcid.idx > 0xff)
+-		return -EIO;
+-
+-	path->type = DEV_PATH_MTK_WDMA;
+-	path->dev = ctx->dev;
+-	path->mtk_wdma.wdma_idx = wed->wdma_idx;
+-	path->mtk_wdma.bss = mvif->mt76.idx;
+-	path->mtk_wdma.wcid = msta->wcid.idx;
+-	path->mtk_wdma.queue = phy != &dev->phy;
+-
+-	ctx->dev = NULL;
+-
+-	return 0;
+-}
+-#endif
+-
+ const struct ieee80211_ops mt7915_ops = {
+ 	.tx = mt7915_tx,
+ 	.start = mt7915_start,
+@@ -1494,7 +1461,4 @@ const struct ieee80211_ops mt7915_ops = {
+ 	.sta_add_debugfs = mt7915_sta_add_debugfs,
+ #endif
+ 	.set_radar_background = mt7915_set_radar_background,
+-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+-	.net_fill_forward_path = mt7915_net_fill_forward_path,
+-#endif
+ };
+diff --git a/mt7915/mcu.c b/mt7915/mcu.c
+index 8d7e7422..afdbb5aa 100644
+--- a/mt7915/mcu.c
++++ b/mt7915/mcu.c
+@@ -2527,9 +2527,6 @@ int mt7915_run_firmware(struct mt7915_dev *dev)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (mtk_wed_device_active(&dev->mt76.mmio.wed))
+-		mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(CAPABILITY), 0, 0, 0);
+-
+ 	ret = mt7915_mcu_set_mwds(dev, 1);
+ 	if (ret)
+ 		return ret;
+diff --git a/mt7915/mmio.c b/mt7915/mmio.c
+index f07b0565..561ac650 100644
+--- a/mt7915/mmio.c
++++ b/mt7915/mmio.c
+@@ -555,21 +555,15 @@ static void mt7915_rx_poll_complete(struct mt76_dev *mdev,
+ static void mt7915_irq_tasklet(struct tasklet_struct *t)
+ {
+ 	struct mt7915_dev *dev = from_tasklet(dev, t, irq_tasklet);
+-	struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ 	u32 intr, intr1, mask;
+ 
+-	if (mtk_wed_device_active(wed)) {
+-		mtk_wed_device_irq_set_mask(wed, 0);
+-		intr = mtk_wed_device_irq_get(wed, dev->mt76.mmio.irqmask);
+-	} else {
+-		mt76_wr(dev, MT_INT_MASK_CSR, 0);
+-		if (dev->hif2)
+-			mt76_wr(dev, MT_INT1_MASK_CSR, 0);
++	mt76_wr(dev, MT_INT_MASK_CSR, 0);
++	if (dev->hif2)
++		mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+ 
+-		intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+-		intr &= dev->mt76.mmio.irqmask;
+-		mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+-	}
++	intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
++	intr &= dev->mt76.mmio.irqmask;
++	mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+ 
+ 	if (dev->hif2) {
+ 		intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
+@@ -624,15 +618,10 @@ static void mt7915_irq_tasklet(struct tasklet_struct *t)
+ irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
+ {
+ 	struct mt7915_dev *dev = dev_instance;
+-	struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ 
+-	if (mtk_wed_device_active(wed)) {
+-		mtk_wed_device_irq_set_mask(wed, 0);
+-	} else {
+-		mt76_wr(dev, MT_INT_MASK_CSR, 0);
+-		if (dev->hif2)
+-			mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+-	}
++	mt76_wr(dev, MT_INT_MASK_CSR, 0);
++	if (dev->hif2)
++		mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+ 
+ 	if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ 		return IRQ_NONE;
+diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
+index b7838385..454232ba 100644
+--- a/mt7915/mt7915.h
++++ b/mt7915/mt7915.h
+@@ -517,8 +517,6 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
+ void mt7915_wfsys_reset(struct mt7915_dev *dev);
+ irqreturn_t mt7915_irq_handler(int irq, void *dev_instance);
+ u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif);
+-u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id);
+-
+ int mt7915_register_device(struct mt7915_dev *dev);
+ void mt7915_unregister_device(struct mt7915_dev *dev);
+ int mt7915_eeprom_init(struct mt7915_dev *dev);
+diff --git a/mt7915/pci.c b/mt7915/pci.c
+index f6222ef1..1bab1cbb 100644
+--- a/mt7915/pci.c
++++ b/mt7915/pci.c
+@@ -12,9 +12,6 @@
+ #include "mac.h"
+ #include "../trace.h"
+ 
+-static bool wed_enable = false;
+-module_param(wed_enable, bool, 0644);
+-
+ static LIST_HEAD(hif_list);
+ static DEFINE_SPINLOCK(hif_lock);
+ static u32 hif_idx;
+@@ -95,79 +92,12 @@ static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
+ 	return 0;
+ }
+ 
+-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+-static int mt7915_wed_offload_enable(struct mtk_wed_device *wed)
+-{
+-	struct mt7915_dev *dev;
+-	int ret;
+-
+-	dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
+-
+-	spin_lock_bh(&dev->mt76.token_lock);
+-	dev->mt76.token_size = wed->wlan.token_start;
+-	spin_unlock_bh(&dev->mt76.token_lock);
+-
+-	ret = wait_event_timeout(dev->mt76.tx_wait,
+-				 !dev->mt76.wed_token_count, HZ);
+-	if (!ret)
+-		return -EAGAIN;
+-
+-	return 0;
+-}
+-
+-static void mt7915_wed_offload_disable(struct mtk_wed_device *wed)
+-{
+-	struct mt7915_dev *dev;
+-
+-	dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
+-
+-	spin_lock_bh(&dev->mt76.token_lock);
+-	dev->mt76.token_size = MT7915_TOKEN_SIZE;
+-	spin_unlock_bh(&dev->mt76.token_lock);
+-}
+-#endif
+-
+-static int
+-mt7915_pci_wed_init(struct mt7915_dev *dev, struct pci_dev *pdev, int *irq)
+-{
+-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+-	struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+-	int ret;
+-
+-	if (!wed_enable)
+-		return 0;
+-
+-	wed->wlan.pci_dev = pdev;
+-	wed->wlan.wpdma_phys = pci_resource_start(pdev, 0) +
+-			       MT_WFDMA_EXT_CSR_BASE;
+-	wed->wlan.nbuf = 4096;
+-	wed->wlan.token_start = MT7915_TOKEN_SIZE - wed->wlan.nbuf;
+-	wed->wlan.init_buf = mt7915_wed_init_buf;
+-	wed->wlan.offload_enable = mt7915_wed_offload_enable;
+-	wed->wlan.offload_disable = mt7915_wed_offload_disable;
+-
+-	if (mtk_wed_device_attach(wed) != 0)
+-		return 0;
+-
+-	*irq = wed->irq;
+-	dev->mt76.dma_dev = wed->dev;
+-
+-	ret = dma_set_mask(wed->dev, DMA_BIT_MASK(32));
+-	if (ret)
+-		return ret;
+-
+-	return 1;
+-#else
+-	return 0;
+-#endif
+-}
+-
+ static int mt7915_pci_probe(struct pci_dev *pdev,
+ 			    const struct pci_device_id *id)
+ {
+-	struct mt7915_hif *hif2 = NULL;
+ 	struct mt7915_dev *dev;
+ 	struct mt76_dev *mdev;
++	struct mt7915_hif *hif2;
+ 	int irq;
+ 	int ret;
+ 
+@@ -199,24 +129,15 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
+ 	mt7915_wfsys_reset(dev);
+ 	hif2 = mt7915_pci_init_hif2(pdev);
+ 
+-	ret = mt7915_pci_wed_init(dev, pdev, &irq);
++	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ 	if (ret < 0)
+-		goto free_wed_or_irq_vector;
+-
+-	if (!ret) {
+-		hif2 = mt7915_pci_init_hif2(pdev);
+-
+-		ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+-		if (ret < 0)
+-			goto free_device;
+-
+-		irq = pdev->irq;
+-	}
++		goto free_device;
+ 
++	irq = pdev->irq;
+ 	ret = devm_request_irq(mdev->dev, irq, mt7915_irq_handler,
+ 			       IRQF_SHARED, KBUILD_MODNAME, dev);
+ 	if (ret)
+-		goto free_wed_or_irq_vector;
++		goto free_irq_vector;
+ 
+ 	mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ 
+@@ -253,11 +174,8 @@ free_hif2:
+ 	if (dev->hif2)
+ 		put_device(dev->hif2->dev);
+ 	devm_free_irq(mdev->dev, irq, dev);
+-free_wed_or_irq_vector:
+-	if (mtk_wed_device_active(&mdev->mmio.wed))
+-		mtk_wed_device_detach(&mdev->mmio.wed);
+-	else
+-		pci_free_irq_vectors(pdev);
++free_irq_vector:
++	pci_free_irq_vectors(pdev);
+ free_device:
+ 	mt76_free_device(&dev->mt76);
+ 
+diff --git a/mt7915/regs.h b/mt7915/regs.h
+index a3b9bf4c..27f682fe 100644
+--- a/mt7915/regs.h
++++ b/mt7915/regs.h
+@@ -591,31 +591,18 @@ enum offs_rev {
+ 
+ /* WFDMA CSR */
+ #define MT_WFDMA_EXT_CSR_BASE		__REG(WFDMA_EXT_CSR_ADDR)
+-#define MT_WFDMA_EXT_CSR_PHYS_BASE	0x18027000
+ #define MT_WFDMA_EXT_CSR(ofs)		(MT_WFDMA_EXT_CSR_BASE + (ofs))
+-#define MT_WFDMA_EXT_CSR_PHYS(ofs)	(MT_WFDMA_EXT_CSR_PHYS_BASE + (ofs))
+ 
+-#define MT_WFDMA_HOST_CONFIG		MT_WFDMA_EXT_CSR_PHYS(0x30)
++#define MT_WFDMA_HOST_CONFIG		MT_WFDMA_EXT_CSR(0x30)
+ #define MT_WFDMA_HOST_CONFIG_PDMA_BAND	BIT(0)
+-#define MT_WFDMA_HOST_CONFIG_WED	BIT(1)
+ 
+-#define MT_WFDMA_WED_RING_CONTROL	MT_WFDMA_EXT_CSR_PHYS(0x34)
+-#define MT_WFDMA_WED_RING_CONTROL_TX0	GENMASK(4, 0)
+-#define MT_WFDMA_WED_RING_CONTROL_TX1	GENMASK(12, 8)
+-#define MT_WFDMA_WED_RING_CONTROL_RX1	GENMASK(20, 16)
+-
+-#define MT_WFDMA_EXT_CSR_HIF_MISC	MT_WFDMA_EXT_CSR_PHYS(0x44)
++#define MT_WFDMA_EXT_CSR_HIF_MISC	MT_WFDMA_EXT_CSR(0x44)
+ #define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY	BIT(0)
+ 
+ #define MT_PCIE_RECOG_ID		0xd7090
+ #define MT_PCIE_RECOG_ID_MASK		GENMASK(30, 0)
+ #define MT_PCIE_RECOG_ID_SEM		BIT(31)
+ 
+-#define MT_INT_WED_MASK_CSR		MT_WFDMA_EXT_CSR(0x204)
+-
+-#define MT_WED_TX_RING_BASE		MT_WFDMA_EXT_CSR(0x300)
+-#define MT_WED_RX_RING_BASE		MT_WFDMA_EXT_CSR(0x400)
+-
+ /* WFDMA0 PCIE1 */
+ #define MT_WFDMA0_PCIE1_BASE		__REG(WFDMA0_PCIE1_ADDR)
+ #define MT_WFDMA0_PCIE1(ofs)		(MT_WFDMA0_PCIE1_BASE + (ofs))
+diff --git a/mt7921/dma.c b/mt7921/dma.c
+index 2939cf9b..ca7e20fb 100644
+--- a/mt7921/dma.c
++++ b/mt7921/dma.c
+@@ -9,7 +9,7 @@ static int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc)
+ {
+ 	int i, err;
+ 
+-	err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE, 0);
++	err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE);
+ 	if (err < 0)
+ 		return err;
+ 
+diff --git a/tx.c b/tx.c
+index 892f3618..2c26b955 100644
+--- a/tx.c
++++ b/tx.c
+@@ -725,12 +725,6 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
+ 	if (token >= 0)
+ 		dev->token_count++;
+ 
+-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+-	if (mtk_wed_device_active(&dev->mmio.wed) &&
+-	    token >= dev->mmio.wed.wlan.token_start)
+-		dev->wed_token_count++;
+-#endif
+-
+ 	if (dev->token_count >= dev->token_size - MT76_TOKEN_FREE_THR)
+ 		__mt76_set_tx_blocked(dev, true);
+ 
+@@ -748,17 +742,9 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
+ 	spin_lock_bh(&dev->token_lock);
+ 
+ 	txwi = idr_remove(&dev->token, token);
+-	if (txwi) {
++	if (txwi)
+ 		dev->token_count--;
+ 
+-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+-		if (mtk_wed_device_active(&dev->mmio.wed) &&
+-		    token >= dev->mmio.wed.wlan.token_start &&
+-		    --dev->wed_token_count == 0)
+-			wake_up(&dev->tx_wait);
+-#endif
+-	}
+-
+ 	if (dev->token_count < dev->token_size - MT76_TOKEN_FREE_THR &&
+ 	    dev->phy.q_tx[0]->blocked)
+ 		*wake = true;
+-- 
+2.18.0
+