[][MAC80211][Rebase Patches][Align mt76 patches number rules]

[Description]
Fix patch number rule

0001-0899: Staging upstream patches, prepare to upstream
0900-0999: For backport5.15 or backport6.x build fixes
1000-1999: Internal dev (vendor/Test/DebugTool), no plan to upstream short-term
3000-3999: WED Patches

[Release-log]
N/A

Change-Id: Ie7d4a73262db774e8b49960c9fa2ff62eff72015
Reviewed-on: https://gerrit.mediatek.inc/c/openwrt/feeds/mtk_openwrt_feeds/+/7334531
diff --git a/autobuild_mac80211_release/package/kernel/mt76/patches/3005-wifi-mt76-mt7915-wed-add-rxwi-for-further-in-chip-rr.patch b/autobuild_mac80211_release/package/kernel/mt76/patches/3005-wifi-mt76-mt7915-wed-add-rxwi-for-further-in-chip-rr.patch
new file mode 100644
index 0000000..4d2ecce
--- /dev/null
+++ b/autobuild_mac80211_release/package/kernel/mt76/patches/3005-wifi-mt76-mt7915-wed-add-rxwi-for-further-in-chip-rr.patch
@@ -0,0 +1,506 @@
+From 749eff80bbf6cd121877bb2235095e4908733956 Mon Sep 17 00:00:00 2001
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Fri, 6 Jan 2023 18:18:50 +0800
+Subject: [PATCH 3005/3012] wifi: mt76: mt7915: wed: add rxwi for further in
+ chip rro
+
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+---
+ dma.c           | 93 +++++++++++++++++++++++++------------------------
+ mac80211.c      |  2 +-
+ mt76.h          | 24 ++++++++-----
+ mt7915/dma.c    |  2 --
+ mt7915/mmio.c   | 29 +++++++--------
+ mt7915/mt7915.h |  1 +
+ tx.c            | 16 ++++-----
+ 7 files changed, 87 insertions(+), 80 deletions(-)
+
+diff --git a/dma.c b/dma.c
+index b2046b43..7792cabf 100644
+--- a/dma.c
++++ b/dma.c
+@@ -59,17 +59,17 @@ mt76_alloc_txwi(struct mt76_dev *dev)
+ 	return t;
+ }
+ 
+-static struct mt76_txwi_cache *
++static struct mt76_rxwi_cache *
+ mt76_alloc_rxwi(struct mt76_dev *dev)
+ {
+-	struct mt76_txwi_cache *t;
++	struct mt76_rxwi_cache *r;
+ 
+-	t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
+-	if (!t)
++	r = kzalloc(L1_CACHE_ALIGN(sizeof(*r)), GFP_ATOMIC);
++	if (!r)
+ 		return NULL;
+ 
+-	t->ptr = NULL;
+-	return t;
++	r->ptr = NULL;
++	return r;
+ }
+ 
+ static struct mt76_txwi_cache *
+@@ -88,20 +88,20 @@ __mt76_get_txwi(struct mt76_dev *dev)
+ 	return t;
+ }
+ 
+-static struct mt76_txwi_cache *
++static struct mt76_rxwi_cache *
+ __mt76_get_rxwi(struct mt76_dev *dev)
+ {
+-	struct mt76_txwi_cache *t = NULL;
++	struct mt76_rxwi_cache *r = NULL;
+ 
+-	spin_lock(&dev->wed_lock);
++	spin_lock(&dev->lock);
+ 	if (!list_empty(&dev->rxwi_cache)) {
+-		t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
++		r = list_first_entry(&dev->rxwi_cache, struct mt76_rxwi_cache,
+ 				     list);
+-		list_del(&t->list);
++		list_del(&r->list);
+ 	}
+-	spin_unlock(&dev->wed_lock);
++	spin_unlock(&dev->lock);
+ 
+-	return t;
++	return r;
+ }
+ 
+ static struct mt76_txwi_cache *
+@@ -115,13 +115,13 @@ mt76_get_txwi(struct mt76_dev *dev)
+ 	return mt76_alloc_txwi(dev);
+ }
+ 
+-struct mt76_txwi_cache *
++struct mt76_rxwi_cache *
+ mt76_get_rxwi(struct mt76_dev *dev)
+ {
+-	struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
++	struct mt76_rxwi_cache *r = __mt76_get_rxwi(dev);
+ 
+-	if (t)
+-		return t;
++	if (r)
++		return r;
+ 
+ 	return mt76_alloc_rxwi(dev);
+ }
+@@ -140,14 +140,14 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+ EXPORT_SYMBOL_GPL(mt76_put_txwi);
+ 
+ void
+-mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
++mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r)
+ {
+-	if (!t)
++	if (!r)
+ 		return;
+ 
+-	spin_lock(&dev->wed_lock);
+-	list_add(&t->list, &dev->rxwi_cache);
+-	spin_unlock(&dev->wed_lock);
++	spin_lock(&dev->lock);
++	list_add(&r->list, &dev->rxwi_cache);
++	spin_unlock(&dev->lock);
+ }
+ EXPORT_SYMBOL_GPL(mt76_put_rxwi);
+ 
+@@ -168,13 +168,13 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
+ void
+ mt76_free_pending_rxwi(struct mt76_dev *dev)
+ {
+-	struct mt76_txwi_cache *t;
++	struct mt76_rxwi_cache *r;
+ 
+ 	local_bh_disable();
+-	while ((t = __mt76_get_rxwi(dev)) != NULL) {
+-		if (t->ptr)
+-			skb_free_frag(t->ptr);
+-		kfree(t);
++	while ((r = __mt76_get_rxwi(dev)) != NULL) {
++		if (r->ptr)
++			skb_free_frag(r->ptr);
++		kfree(r);
+ 	}
+ 	local_bh_enable();
+ }
+@@ -212,7 +212,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ {
+ 	struct mt76_desc *desc = &q->desc[q->head];
+ 	struct mt76_queue_entry *entry = &q->entry[q->head];
+-	struct mt76_txwi_cache *txwi = NULL;
++	struct mt76_rxwi_cache *rxwi = NULL;
+ 	u32 buf1 = 0, ctrl;
+ 	int idx = q->head;
+ 	int rx_token;
+@@ -220,13 +220,13 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ 	ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
+ 
+ 	if (mt76_queue_is_wed_rx(q)) {
+-		txwi = mt76_get_rxwi(dev);
+-		if (!txwi)
++		rxwi = mt76_get_rxwi(dev);
++		if (!rxwi)
+ 			return -ENOMEM;
+ 
+-		rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
++		rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr);
+ 		if (rx_token < 0) {
+-			mt76_put_rxwi(dev, txwi);
++			mt76_put_rxwi(dev, rxwi);
+ 			return -ENOMEM;
+ 		}
+ 
+@@ -241,7 +241,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ 
+ 	entry->dma_addr[0] = buf->addr;
+ 	entry->dma_len[0] = buf->len;
+-	entry->txwi = txwi;
++	entry->rxwi = rxwi;
+ 	entry->buf = data;
+ 	entry->wcid = 0xffff;
+ 	entry->skip_buf1 = true;
+@@ -254,7 +254,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ static int
+ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ 		 struct mt76_queue_buf *buf, int nbufs, u32 info,
+-		 struct sk_buff *skb, void *txwi)
++		 struct sk_buff *skb, void *txwi, void *rxwi)
+ {
+ 	struct mt76_queue_entry *entry;
+ 	struct mt76_desc *desc;
+@@ -307,6 +307,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ 	}
+ 
+ 	q->entry[idx].txwi = txwi;
++	q->entry[idx].rxwi = rxwi;
+ 	q->entry[idx].skb = skb;
+ 	q->entry[idx].wcid = 0xffff;
+ 
+@@ -405,13 +406,13 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+ 		u32 buf1 = le32_to_cpu(desc->buf1);
+ 		u32 id, find = 0;
+ 		u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
+-		struct mt76_txwi_cache *t;
++		struct mt76_rxwi_cache *r;
+ 
+ 		if (*more) {
+ 			spin_lock_bh(&dev->rx_token_lock);
+ 
+-			idr_for_each_entry(&dev->rx_token, t, id) {
+-				if (t->dma_addr == le32_to_cpu(desc->buf0)) {
++			idr_for_each_entry(&dev->rx_token, r, id) {
++				if (r->dma_addr == le32_to_cpu(desc->buf0)) {
+ 					find = 1;
+ 					token = id;
+ 
+@@ -428,19 +429,19 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+ 				return NULL;
+ 		}
+ 
+-		t = mt76_rx_token_release(dev, token);
+-		if (!t)
++		r = mt76_rx_token_release(dev, token);
++		if (!r)
+ 			return NULL;
+ 
+-		dma_unmap_single(dev->dma_dev, t->dma_addr,
++		dma_unmap_single(dev->dma_dev, r->dma_addr,
+ 				 SKB_WITH_OVERHEAD(q->buf_size),
+ 				 DMA_FROM_DEVICE);
+ 
+-		buf = t->ptr;
+-		t->dma_addr = 0;
+-		t->ptr = NULL;
++		buf = r->ptr;
++		r->dma_addr = 0;
++		r->ptr = NULL;
+ 
+-		mt76_put_rxwi(dev, t);
++		mt76_put_rxwi(dev, r);
+ 
+ 		if (drop) {
+ 			u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
+@@ -501,7 +502,7 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
+ 	buf.len = skb->len;
+ 
+ 	spin_lock_bh(&q->lock);
+-	mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
++	mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL, NULL);
+ 	mt76_dma_kick_queue(dev, q);
+ 	spin_unlock_bh(&q->lock);
+ 
+@@ -578,7 +579,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ 		goto unmap;
+ 
+ 	return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
+-				tx_info.info, tx_info.skb, t);
++				tx_info.info, tx_info.skb, t, NULL);
+ 
+ unmap:
+ 	for (n--; n > 0; n--)
+diff --git a/mac80211.c b/mac80211.c
+index 7bfb7a24..4c88710f 100644
+--- a/mac80211.c
++++ b/mac80211.c
+@@ -602,7 +602,6 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
+ 	spin_lock_init(&dev->lock);
+ 	spin_lock_init(&dev->cc_lock);
+ 	spin_lock_init(&dev->status_lock);
+-	spin_lock_init(&dev->wed_lock);
+ 	mutex_init(&dev->mutex);
+ 	init_waitqueue_head(&dev->tx_wait);
+ 
+@@ -633,6 +632,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
+ 	INIT_LIST_HEAD(&dev->txwi_cache);
+ 	INIT_LIST_HEAD(&dev->rxwi_cache);
+ 	dev->token_size = dev->drv->token_size;
++	dev->rx_token_size = dev->drv->rx_token_size;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
+ 		skb_queue_head_init(&dev->rx_skb[i]);
+diff --git a/mt76.h b/mt76.h
+index 1af5edfe..d7b0f1bb 100644
+--- a/mt76.h
++++ b/mt76.h
+@@ -165,6 +165,7 @@ struct mt76_queue_entry {
+ 	};
+ 	union {
+ 		struct mt76_txwi_cache *txwi;
++		struct mt76_rxwi_cache *rxwi;
+ 		struct urb *urb;
+ 		int buf_sz;
+ 	};
+@@ -356,10 +357,15 @@ struct mt76_txwi_cache {
+ 	struct list_head list;
+ 	dma_addr_t dma_addr;
+ 
+-	union {
+-		struct sk_buff *skb;
+-		void *ptr;
+-	};
++	struct sk_buff *skb;
++};
++
++struct mt76_rxwi_cache {
++	struct list_head list;
++	dma_addr_t dma_addr;
++
++	void *ptr;
++	u32 token;
+ };
+ 
+ struct mt76_rx_tid {
+@@ -445,6 +451,7 @@ struct mt76_driver_ops {
+ 	u16 txwi_size;
+ 	u16 token_size;
+ 	u8 mcs_rates;
++	u16 rx_token_size;
+ 
+ 	void (*update_survey)(struct mt76_phy *phy);
+ 
+@@ -815,7 +822,6 @@ struct mt76_dev {
+ 
+ 	struct ieee80211_hw *hw;
+ 
+-	spinlock_t wed_lock;
+ 	spinlock_t lock;
+ 	spinlock_t cc_lock;
+ 
+@@ -1406,8 +1412,8 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
+ }
+ 
+ void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
+-void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
+-struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
++void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r);
++struct mt76_rxwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
+ void mt76_free_pending_rxwi(struct mt76_dev *dev);
+ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
+ 		      struct napi_struct *napi);
+@@ -1559,9 +1565,9 @@ struct mt76_txwi_cache *
+ mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
+ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
+ void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
+-struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
++struct mt76_rxwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
+ int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
+-			  struct mt76_txwi_cache *r, dma_addr_t phys);
++			  struct mt76_rxwi_cache *r, dma_addr_t phys);
+ 
+ static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
+ {
+diff --git a/mt7915/dma.c b/mt7915/dma.c
+index d64f492a..08322453 100644
+--- a/mt7915/dma.c
++++ b/mt7915/dma.c
+@@ -493,7 +493,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
+ 		    mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
+ 			dev->mt76.q_rx[MT_RXQ_MAIN].flags =
+ 				MT_WED_Q_RX(MT7915_RXQ_BAND0);
+-			dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
+ 		}
+ 
+ 		ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
+@@ -530,7 +529,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
+ 		    mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
+ 			dev->mt76.q_rx[MT_RXQ_BAND1].flags =
+ 				MT_WED_Q_RX(MT7915_RXQ_BAND1);
+-			dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
+ 		}
+ 
+ 		/* rx data queue for band1 */
+diff --git a/mt7915/mmio.c b/mt7915/mmio.c
+index 0e79faf2..fc9aadb1 100644
+--- a/mt7915/mmio.c
++++ b/mt7915/mmio.c
+@@ -610,18 +610,18 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
+ 				sizeof(struct skb_shared_info));
+ 
+ 	for (i = 0; i < dev->mt76.rx_token_size; i++) {
+-		struct mt76_txwi_cache *t;
++		struct mt76_rxwi_cache *r;
+ 
+-		t = mt76_rx_token_release(&dev->mt76, i);
+-		if (!t || !t->ptr)
++		r = mt76_rx_token_release(&dev->mt76, i);
++		if (!r || !r->ptr)
+ 			continue;
+ 
+-		dma_unmap_single(dev->mt76.dma_dev, t->dma_addr,
++		dma_unmap_single(dev->mt76.dma_dev, r->dma_addr,
+ 				 wed->wlan.rx_size, DMA_FROM_DEVICE);
+-		__free_pages(virt_to_page(t->ptr), get_order(length));
+-		t->ptr = NULL;
++		__free_pages(virt_to_page(r->ptr), get_order(length));
++		r->ptr = NULL;
+ 
+-		mt76_put_rxwi(&dev->mt76, t);
++		mt76_put_rxwi(&dev->mt76, r);
+ 	}
+ 
+ 	mt76_free_pending_rxwi(&dev->mt76);
+@@ -639,18 +639,18 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
+ 				sizeof(struct skb_shared_info));
+ 
+ 	for (i = 0; i < size; i++) {
+-		struct mt76_txwi_cache *t = mt76_get_rxwi(&dev->mt76);
++		struct mt76_rxwi_cache *r = mt76_get_rxwi(&dev->mt76);
+ 		dma_addr_t phy_addr;
+ 		struct page *page;
+ 		int token;
+ 		void *ptr;
+ 
+-		if (!t)
++		if (!r)
+ 			goto unmap;
+ 
+ 		page = __dev_alloc_pages(GFP_KERNEL, get_order(length));
+ 		if (!page) {
+-			mt76_put_rxwi(&dev->mt76, t);
++			mt76_put_rxwi(&dev->mt76, r);
+ 			goto unmap;
+ 		}
+ 
+@@ -660,17 +660,17 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
+ 					  DMA_TO_DEVICE);
+ 		if (unlikely(dma_mapping_error(dev->mt76.dev, phy_addr))) {
+ 			__free_pages(page, get_order(length));
+-			mt76_put_rxwi(&dev->mt76, t);
++			mt76_put_rxwi(&dev->mt76, r);
+ 			goto unmap;
+ 		}
+ 
+ 		desc->buf0 = cpu_to_le32(phy_addr);
+-		token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr);
++		token = mt76_rx_token_consume(&dev->mt76, ptr, r, phy_addr);
+ 		if (token < 0) {
+ 			dma_unmap_single(dev->mt76.dma_dev, phy_addr,
+ 					 wed->wlan.rx_size, DMA_TO_DEVICE);
+ 			__free_pages(page, get_order(length));
+-			mt76_put_rxwi(&dev->mt76, t);
++			mt76_put_rxwi(&dev->mt76, r);
+ 			goto unmap;
+ 		}
+ 
+@@ -831,7 +831,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
+ 	wed->wlan.reset = mt7915_mmio_wed_reset;
+ 	wed->wlan.reset_complete = mt7915_mmio_wed_reset_complete;
+ 
+-	dev->mt76.rx_token_size = wed->wlan.rx_npkt;
++	dev->mt76.rx_token_size += wed->wlan.rx_npkt;
+ 
+ 	if (mtk_wed_device_attach(wed))
+ 		return 0;
+@@ -1037,6 +1037,7 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
+ 				SURVEY_INFO_TIME_RX |
+ 				SURVEY_INFO_TIME_BSS_RX,
+ 		.token_size = MT7915_TOKEN_SIZE,
++		.rx_token_size = MT7915_RX_TOKEN_SIZE,
+ 		.tx_prepare_skb = mt7915_tx_prepare_skb,
+ 		.tx_complete_skb = mt76_connac_tx_complete_skb,
+ 		.rx_skb = mt7915_queue_rx_skb,
+diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
+index 1dfecdb0..918046f2 100644
+--- a/mt7915/mt7915.h
++++ b/mt7915/mt7915.h
+@@ -57,6 +57,7 @@
+ #define MT7915_EEPROM_BLOCK_SIZE	16
+ #define MT7915_HW_TOKEN_SIZE		7168
+ #define MT7915_TOKEN_SIZE		8192
++#define MT7915_RX_TOKEN_SIZE		4096
+ 
+ #define MT7915_CFEND_RATE_DEFAULT	0x49	/* OFDM 24M */
+ #define MT7915_CFEND_RATE_11B		0x03	/* 11B LP, 11M */
+diff --git a/tx.c b/tx.c
+index 91c8dd8d..04b6f2e7 100644
+--- a/tx.c
++++ b/tx.c
+@@ -751,16 +751,16 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
+ EXPORT_SYMBOL_GPL(mt76_token_consume);
+ 
+ int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
+-			  struct mt76_txwi_cache *t, dma_addr_t phys)
++			  struct mt76_rxwi_cache *r, dma_addr_t phys)
+ {
+ 	int token;
+ 
+ 	spin_lock_bh(&dev->rx_token_lock);
+-	token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size,
++	token = idr_alloc(&dev->rx_token, r, 0, dev->rx_token_size,
+ 			  GFP_ATOMIC);
+ 	if (token >= 0) {
+-		t->ptr = ptr;
+-		t->dma_addr = phys;
++		r->ptr = ptr;
++		r->dma_addr = phys;
+ 	}
+ 	spin_unlock_bh(&dev->rx_token_lock);
+ 
+@@ -797,15 +797,15 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
+ }
+ EXPORT_SYMBOL_GPL(mt76_token_release);
+ 
+-struct mt76_txwi_cache *
++struct mt76_rxwi_cache *
+ mt76_rx_token_release(struct mt76_dev *dev, int token)
+ {
+-	struct mt76_txwi_cache *t;
++	struct mt76_rxwi_cache *r;
+ 
+ 	spin_lock_bh(&dev->rx_token_lock);
+-	t = idr_remove(&dev->rx_token, token);
++	r = idr_remove(&dev->rx_token, token);
+ 	spin_unlock_bh(&dev->rx_token_lock);
+ 
+-	return t;
++	return r;
+ }
+ EXPORT_SYMBOL_GPL(mt76_rx_token_release);
+-- 
+2.39.0
+