[rdkb][common][bsp][Refactor and sync wifi from openwrt]
[Description]
feda97c [MAC80211][mt76][Enable sigma daemon in mt7988]
90bafdf [MAC80211][mt76][rework patches for mt7996]
1b21440 [MAC80211][mt76][Add mac80211 5.15 source]
c75c5a6 [MAC80211][mt76][Refactor mt76 internal patches]
[Release-log]
Change-Id: I5b2706e2673bc43437f51713260237f67e8fbea9
diff --git a/recipes-wifi/linux-mt76/files/patches/3008-mt76-mt7915-wed-add-rxwi-for-further-in-chip-rro.patch b/recipes-wifi/linux-mt76/files/patches/3008-mt76-mt7915-wed-add-rxwi-for-further-in-chip-rro.patch
new file mode 100644
index 0000000..0e6a4bf
--- /dev/null
+++ b/recipes-wifi/linux-mt76/files/patches/3008-mt76-mt7915-wed-add-rxwi-for-further-in-chip-rro.patch
@@ -0,0 +1,499 @@
+From 664c9ad764c5ec77923ec6a70623e7b294a4bf60 Mon Sep 17 00:00:00 2001
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Fri, 6 Jan 2023 18:18:50 +0800
+Subject: [PATCH 3008/3010] mt76: mt7915: wed: add rxwi for further in chip rro
+
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+---
+ dma.c | 93 +++++++++++++++++++++++++------------------------
+ mac80211.c | 2 +-
+ mt76.h | 24 ++++++++-----
+ mt7915/dma.c | 2 --
+ mt7915/mmio.c | 27 +++++++-------
+ mt7915/mt7915.h | 1 +
+ tx.c | 16 ++++-----
+ 7 files changed, 86 insertions(+), 79 deletions(-)
+
+diff --git a/dma.c b/dma.c
+index 86b0bf84..76af6506 100644
+--- a/dma.c
++++ b/dma.c
+@@ -59,17 +59,17 @@ mt76_alloc_txwi(struct mt76_dev *dev)
+ return t;
+ }
+
+-static struct mt76_txwi_cache *
++static struct mt76_rxwi_cache *
+ mt76_alloc_rxwi(struct mt76_dev *dev)
+ {
+- struct mt76_txwi_cache *t;
++ struct mt76_rxwi_cache *r;
+
+- t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
+- if (!t)
++ r = kzalloc(L1_CACHE_ALIGN(sizeof(*r)), GFP_ATOMIC);
++ if (!r)
+ return NULL;
+
+- t->ptr = NULL;
+- return t;
++ r->ptr = NULL;
++ return r;
+ }
+
+ static struct mt76_txwi_cache *
+@@ -88,20 +88,20 @@ __mt76_get_txwi(struct mt76_dev *dev)
+ return t;
+ }
+
+-static struct mt76_txwi_cache *
++static struct mt76_rxwi_cache *
+ __mt76_get_rxwi(struct mt76_dev *dev)
+ {
+- struct mt76_txwi_cache *t = NULL;
++ struct mt76_rxwi_cache *r = NULL;
+
+- spin_lock(&dev->wed_lock);
++ spin_lock(&dev->lock);
+ if (!list_empty(&dev->rxwi_cache)) {
+- t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
++ r = list_first_entry(&dev->rxwi_cache, struct mt76_rxwi_cache,
+ list);
+- list_del(&t->list);
++ list_del(&r->list);
+ }
+- spin_unlock(&dev->wed_lock);
++ spin_unlock(&dev->lock);
+
+- return t;
++ return r;
+ }
+
+ static struct mt76_txwi_cache *
+@@ -115,13 +115,13 @@ mt76_get_txwi(struct mt76_dev *dev)
+ return mt76_alloc_txwi(dev);
+ }
+
+-struct mt76_txwi_cache *
++struct mt76_rxwi_cache *
+ mt76_get_rxwi(struct mt76_dev *dev)
+ {
+- struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
++ struct mt76_rxwi_cache *r = __mt76_get_rxwi(dev);
+
+- if (t)
+- return t;
++ if (r)
++ return r;
+
+ return mt76_alloc_rxwi(dev);
+ }
+@@ -140,14 +140,14 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+ EXPORT_SYMBOL_GPL(mt76_put_txwi);
+
+ void
+-mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
++mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r)
+ {
+- if (!t)
++ if (!r)
+ return;
+
+- spin_lock(&dev->wed_lock);
+- list_add(&t->list, &dev->rxwi_cache);
+- spin_unlock(&dev->wed_lock);
++ spin_lock(&dev->lock);
++ list_add(&r->list, &dev->rxwi_cache);
++ spin_unlock(&dev->lock);
+ }
+ EXPORT_SYMBOL_GPL(mt76_put_rxwi);
+
+@@ -168,13 +168,13 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
+ void
+ mt76_free_pending_rxwi(struct mt76_dev *dev)
+ {
+- struct mt76_txwi_cache *t;
++ struct mt76_rxwi_cache *r;
+
+ local_bh_disable();
+- while ((t = __mt76_get_rxwi(dev)) != NULL) {
+- if (t->ptr)
+- mt76_put_page_pool_buf(t->ptr, false);
+- kfree(t);
++ while ((r = __mt76_get_rxwi(dev)) != NULL) {
++ if (r->ptr)
++ mt76_put_page_pool_buf(r->ptr, false);
++ kfree(r);
+ }
+ local_bh_enable();
+ }
+@@ -212,7 +212,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ {
+ struct mt76_desc *desc = &q->desc[q->head];
+ struct mt76_queue_entry *entry = &q->entry[q->head];
+- struct mt76_txwi_cache *txwi = NULL;
++ struct mt76_rxwi_cache *rxwi = NULL;
+ u32 buf1 = 0, ctrl;
+ int idx = q->head;
+ int rx_token;
+@@ -220,13 +220,13 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
+
+ if (mt76_queue_is_wed_rx(q)) {
+- txwi = mt76_get_rxwi(dev);
+- if (!txwi)
++ rxwi = mt76_get_rxwi(dev);
++ if (!rxwi)
+ return -ENOMEM;
+
+- rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
++ rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr);
+ if (rx_token < 0) {
+- mt76_put_rxwi(dev, txwi);
++ mt76_put_rxwi(dev, rxwi);
+ return -ENOMEM;
+ }
+
+@@ -241,7 +241,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
+
+ entry->dma_addr[0] = buf->addr;
+ entry->dma_len[0] = buf->len;
+- entry->txwi = txwi;
++ entry->rxwi = rxwi;
+ entry->buf = data;
+ entry->wcid = 0xffff;
+ entry->skip_buf1 = true;
+@@ -254,7 +254,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ static int
+ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_buf *buf, int nbufs, u32 info,
+- struct sk_buff *skb, void *txwi)
++ struct sk_buff *skb, void *txwi, void *rxwi)
+ {
+ struct mt76_queue_entry *entry;
+ struct mt76_desc *desc;
+@@ -307,6 +307,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ }
+
+ q->entry[idx].txwi = txwi;
++ q->entry[idx].rxwi = rxwi;
+ q->entry[idx].skb = skb;
+ q->entry[idx].wcid = 0xffff;
+
+@@ -406,13 +407,13 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+ u32 id, find = 0;
+ u32 token = FIELD_GET(MT_DMA_CTL_TOKEN,
+ le32_to_cpu(desc->buf1));
+- struct mt76_txwi_cache *t;
++ struct mt76_rxwi_cache *r;
+
+ if (*more) {
+ spin_lock_bh(&dev->rx_token_lock);
+
+- idr_for_each_entry(&dev->rx_token, t, id) {
+- if (t->dma_addr == le32_to_cpu(desc->buf0)) {
++ idr_for_each_entry(&dev->rx_token, r, id) {
++ if (r->dma_addr == le32_to_cpu(desc->buf0)) {
+ find = 1;
+ token = id;
+
+@@ -430,19 +431,19 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+ return NULL;
+ }
+
+- t = mt76_rx_token_release(dev, token);
+- if (!t)
++ r = mt76_rx_token_release(dev, token);
++ if (!r)
+ return NULL;
+
+- dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
++ dma_sync_single_for_cpu(dev->dma_dev, r->dma_addr,
+ SKB_WITH_OVERHEAD(q->buf_size),
+ page_pool_get_dma_dir(q->page_pool));
+
+- buf = t->ptr;
+- t->dma_addr = 0;
+- t->ptr = NULL;
++ buf = r->ptr;
++ r->dma_addr = 0;
++ r->ptr = NULL;
+
+- mt76_put_rxwi(dev, t);
++ mt76_put_rxwi(dev, r);
+
+ if (drop) {
+ u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
+@@ -510,7 +511,7 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
+ buf.len = skb->len;
+
+ spin_lock_bh(&q->lock);
+- mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
++ mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL, NULL);
+ mt76_dma_kick_queue(dev, q);
+ spin_unlock_bh(&q->lock);
+
+@@ -587,7 +588,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ goto unmap;
+
+ return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
+- tx_info.info, tx_info.skb, t);
++ tx_info.info, tx_info.skb, t, NULL);
+
+ unmap:
+ for (n--; n > 0; n--)
+diff --git a/mac80211.c b/mac80211.c
+index 35fd0347..95861d47 100644
+--- a/mac80211.c
++++ b/mac80211.c
+@@ -640,7 +640,6 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
+ spin_lock_init(&dev->lock);
+ spin_lock_init(&dev->cc_lock);
+ spin_lock_init(&dev->status_lock);
+- spin_lock_init(&dev->wed_lock);
+ mutex_init(&dev->mutex);
+ init_waitqueue_head(&dev->tx_wait);
+
+@@ -671,6 +670,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
+ INIT_LIST_HEAD(&dev->txwi_cache);
+ INIT_LIST_HEAD(&dev->rxwi_cache);
+ dev->token_size = dev->drv->token_size;
++ dev->rx_token_size = dev->drv->rx_token_size;
+
+ for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
+ skb_queue_head_init(&dev->rx_skb[i]);
+diff --git a/mt76.h b/mt76.h
+index 982d0bbf..a36a978b 100644
+--- a/mt76.h
++++ b/mt76.h
+@@ -166,6 +166,7 @@ struct mt76_queue_entry {
+ };
+ union {
+ struct mt76_txwi_cache *txwi;
++ struct mt76_rxwi_cache *rxwi;
+ struct urb *urb;
+ int buf_sz;
+ };
+@@ -357,10 +358,15 @@ struct mt76_txwi_cache {
+ struct list_head list;
+ dma_addr_t dma_addr;
+
+- union {
+- struct sk_buff *skb;
+- void *ptr;
+- };
++ struct sk_buff *skb;
++};
++
++struct mt76_rxwi_cache {
++ struct list_head list;
++ dma_addr_t dma_addr;
++
++ void *ptr;
++ u32 token;
+ };
+
+ struct mt76_rx_tid {
+@@ -445,6 +451,7 @@ struct mt76_driver_ops {
+ u16 txwi_size;
+ u16 token_size;
+ u8 mcs_rates;
++ u16 rx_token_size;
+
+ void (*update_survey)(struct mt76_phy *phy);
+
+@@ -815,7 +822,6 @@ struct mt76_dev {
+
+ struct ieee80211_hw *hw;
+
+- spinlock_t wed_lock;
+ spinlock_t lock;
+ spinlock_t cc_lock;
+
+@@ -1406,8 +1412,8 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
+ }
+
+ void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
+-void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
+-struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
++void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_rxwi_cache *r);
++struct mt76_rxwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
+ void mt76_free_pending_rxwi(struct mt76_dev *dev);
+ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
+ struct napi_struct *napi);
+@@ -1560,9 +1566,9 @@ struct mt76_txwi_cache *
+ mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
+ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
+ void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
+-struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
++struct mt76_rxwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
+ int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
+- struct mt76_txwi_cache *r, dma_addr_t phys);
++ struct mt76_rxwi_cache *r, dma_addr_t phys);
+ int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q);
+ static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct)
+ {
+diff --git a/mt7915/dma.c b/mt7915/dma.c
+index 52fabde0..4dd321ee 100644
+--- a/mt7915/dma.c
++++ b/mt7915/dma.c
+@@ -491,7 +491,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
+ mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
+ dev->mt76.q_rx[MT_RXQ_MAIN].flags =
+ MT_WED_Q_RX(MT7915_RXQ_BAND0);
+- dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
+ }
+
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
+@@ -528,7 +527,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
+ mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
+ dev->mt76.q_rx[MT_RXQ_BAND1].flags =
+ MT_WED_Q_RX(MT7915_RXQ_BAND1);
+- dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
+ }
+
+ /* rx data queue for band1 */
+diff --git a/mt7915/mmio.c b/mt7915/mmio.c
+index 890af388..6dec9d60 100644
+--- a/mt7915/mmio.c
++++ b/mt7915/mmio.c
+@@ -606,16 +606,16 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
+
+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
+ for (i = 0; i < dev->mt76.rx_token_size; i++) {
+- struct mt76_txwi_cache *t;
++ struct mt76_rxwi_cache *r;
+
+- t = mt76_rx_token_release(&dev->mt76, i);
+- if (!t || !t->ptr)
++ r = mt76_rx_token_release(&dev->mt76, i);
++ if (!r || !r->ptr)
+ continue;
+
+- mt76_put_page_pool_buf(t->ptr, false);
+- t->ptr = NULL;
++ mt76_put_page_pool_buf(r->ptr, false);
++ r->ptr = NULL;
+
+- mt76_put_rxwi(&dev->mt76, t);
++ mt76_put_rxwi(&dev->mt76, r);
+ }
+
+ mt76_free_pending_rxwi(&dev->mt76);
+@@ -624,7 +624,7 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
+ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
+ {
+ struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
+- struct mt76_txwi_cache *t = NULL;
++ struct mt76_rxwi_cache *r = NULL;
+ struct mt7915_dev *dev;
+ struct mt76_queue *q;
+ int i, len;
+@@ -640,8 +640,8 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
+ int token;
+ void *buf;
+
+- t = mt76_get_rxwi(&dev->mt76);
+- if (!t)
++ r = mt76_get_rxwi(&dev->mt76);
++ if (!r)
+ goto unmap;
+
+ buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
+@@ -653,7 +653,7 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
+ dma_sync_single_for_device(dev->mt76.dma_dev, addr, len, dir);
+
+ desc->buf0 = cpu_to_le32(addr);
+- token = mt76_rx_token_consume(&dev->mt76, buf, t, addr);
++ token = mt76_rx_token_consume(&dev->mt76, buf, r, addr);
+ if (token < 0) {
+ mt76_put_page_pool_buf(buf, false);
+ goto unmap;
+@@ -667,8 +667,8 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
+ return 0;
+
+ unmap:
+- if (t)
+- mt76_put_rxwi(&dev->mt76, t);
++ if (r)
++ mt76_put_rxwi(&dev->mt76, r);
+ mt7915_mmio_wed_release_rx_buf(wed);
+ return -ENOMEM;
+ }
+@@ -818,7 +818,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
+ wed->wlan.reset = mt7915_mmio_wed_reset;
+ wed->wlan.reset_complete = mt7915_mmio_wed_reset_complete;
+
+- dev->mt76.rx_token_size = wed->wlan.rx_npkt;
++ dev->mt76.rx_token_size += wed->wlan.rx_npkt;
+
+ if (mtk_wed_device_attach(wed))
+ return 0;
+@@ -1024,6 +1024,7 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BSS_RX,
+ .token_size = MT7915_TOKEN_SIZE,
++ .rx_token_size = MT7915_RX_TOKEN_SIZE,
+ .tx_prepare_skb = mt7915_tx_prepare_skb,
+ .tx_complete_skb = mt76_connac_tx_complete_skb,
+ .rx_skb = mt7915_queue_rx_skb,
+diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
+index a1ef8359..594dfcd5 100644
+--- a/mt7915/mt7915.h
++++ b/mt7915/mt7915.h
+@@ -57,6 +57,7 @@
+ #define MT7915_EEPROM_BLOCK_SIZE 16
+ #define MT7915_HW_TOKEN_SIZE 4096
+ #define MT7915_TOKEN_SIZE 8192
++#define MT7915_RX_TOKEN_SIZE 4096
+
+ #define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
+ #define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
+diff --git a/tx.c b/tx.c
+index 6d55566f..a72b7779 100644
+--- a/tx.c
++++ b/tx.c
+@@ -756,16 +756,16 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
+ EXPORT_SYMBOL_GPL(mt76_token_consume);
+
+ int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
+- struct mt76_txwi_cache *t, dma_addr_t phys)
++ struct mt76_rxwi_cache *r, dma_addr_t phys)
+ {
+ int token;
+
+ spin_lock_bh(&dev->rx_token_lock);
+- token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size,
++ token = idr_alloc(&dev->rx_token, r, 0, dev->rx_token_size,
+ GFP_ATOMIC);
+ if (token >= 0) {
+- t->ptr = ptr;
+- t->dma_addr = phys;
++ r->ptr = ptr;
++ r->dma_addr = phys;
+ }
+ spin_unlock_bh(&dev->rx_token_lock);
+
+@@ -802,15 +802,15 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
+ }
+ EXPORT_SYMBOL_GPL(mt76_token_release);
+
+-struct mt76_txwi_cache *
++struct mt76_rxwi_cache *
+ mt76_rx_token_release(struct mt76_dev *dev, int token)
+ {
+- struct mt76_txwi_cache *t;
++ struct mt76_rxwi_cache *r;
+
+ spin_lock_bh(&dev->rx_token_lock);
+- t = idr_remove(&dev->rx_token, token);
++ r = idr_remove(&dev->rx_token, token);
+ spin_unlock_bh(&dev->rx_token_lock);
+
+- return t;
++ return r;
+ }
+ EXPORT_SYMBOL_GPL(mt76_rx_token_release);
+--
+2.18.0
+