[][MAC80211][wed][fix rx copy patch to sync mt76 lastest version]
[Description]
Fix rx copy patch to sync mt76 lastest version
[Release-log]
N/A
Change-Id: I94f96375c821e9ae98bc7f684a2a5ae839957572
Reviewed-on: https://gerrit.mediatek.inc/c/openwrt/feeds/mtk_openwrt_feeds/+/7017857
diff --git a/autobuild_mac80211_release/package/kernel/mt76/patches/3011-mt76-mt7915-wed-add-rxwi-for-further-in-chip-rro-dev.patch b/autobuild_mac80211_release/package/kernel/mt76/patches/3011-mt76-mt7915-wed-add-rxwi-for-further-in-chip-rro-dev.patch
index 7de420c..bac87e6 100644
--- a/autobuild_mac80211_release/package/kernel/mt76/patches/3011-mt76-mt7915-wed-add-rxwi-for-further-in-chip-rro-dev.patch
+++ b/autobuild_mac80211_release/package/kernel/mt76/patches/3011-mt76-mt7915-wed-add-rxwi-for-further-in-chip-rro-dev.patch
@@ -1,21 +1,21 @@
-From afd516f8fd841fc7fc46667edc57f2f10d92de46 Mon Sep 17 00:00:00 2001
-From: Evelyn Tsai <evelyn.tsai@mediatek.com>
-Date: Wed, 21 Dec 2022 09:47:01 +0800
-Subject: [PATCH 3011/3013] mt76: mt7915: wed: add rxwi for further in chip rro
- development
+From dc6f151a8420ce5569b1db103525543600b2f9b5 Mon Sep 17 00:00:00 2001
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Fri, 6 Jan 2023 18:18:50 +0800
+Subject: [PATCH 3011/3014] mt76: mt7915: wed: add rxwi for further in chip rro
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
---
- dma.c | 98 +++++++++++++++++++++++++------------------------
- mac80211.c | 2 +-
- mt76.h | 24 +++++++-----
- mt7915/dma.c | 2 -
- mt7915/mmio.c | 21 ++++++-----
- mt7915/mt7915.h | 1 +
- tx.c | 16 ++++----
- 7 files changed, 86 insertions(+), 78 deletions(-)
+ dma.c | 107 ++++++++++++++++++++++++------------------------
+ mac80211.c | 2 +-
+ mt76.h | 24 +++++++----
+ mt7915/dma.c | 2 -
+ mt7915/mmio.c | 21 +++++-----
+ mt7915/mt7915.h | 1 +
+ tx.c | 16 ++++----
+ 7 files changed, 90 insertions(+), 83 deletions(-)
diff --git a/dma.c b/dma.c
-index 0914266..7ef272e 100644
+index 98d2a29a..21f26df7 100644
--- a/dma.c
+++ b/dma.c
@@ -59,17 +59,17 @@ mt76_alloc_txwi(struct mt76_dev *dev)
@@ -129,6 +129,45 @@
}
@@ -209,7 +209,7 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
static int
+ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_buf *buf, void *data,
+- struct mt76_txwi_cache *txwi)
++ struct mt76_rxwi_cache *rxwi)
+ {
+ struct mt76_desc *desc = &q->desc[q->head];
+ struct mt76_queue_entry *entry = &q->entry[q->head];
+@@ -221,15 +221,15 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
+
+ if ((q->flags & MT_QFLAG_WED) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
+- if(!txwi) {
+- txwi = mt76_get_rxwi(dev);
+- if (!txwi)
++ if(!rxwi) {
++ rxwi = mt76_get_rxwi(dev);
++ if (!rxwi)
+ return -ENOMEM;
+ }
+
+- rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
++ rx_token = mt76_rx_token_consume(dev, data, rxwi, buf->addr);
+ if (rx_token < 0) {
+- mt76_put_rxwi(dev, txwi);
++ mt76_put_rxwi(dev, rxwi);
+ return -ENOMEM;
+ }
+
+@@ -244,7 +244,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
+
+ entry->dma_addr[0] = buf->addr;
+ entry->dma_len[0] = buf->len;
+- entry->txwi = txwi;
++ entry->rxwi = rxwi;
+ entry->buf = data;
+ entry->wcid = 0xffff;
+ entry->skip_buf1 = true;
+@@ -257,7 +257,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ static int
mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_buf *buf, int nbufs, u32 info,
- struct sk_buff *skb, void *txwi)
@@ -136,24 +175,7 @@
{
struct mt76_queue_entry *entry;
struct mt76_desc *desc;
-@@ -227,13 +227,13 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
-
- if ((q->flags & MT_QFLAG_WED) &&
- FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
-- struct mt76_txwi_cache *t = txwi;
-+ struct mt76_rxwi_cache *r = rxwi;
- int rx_token;
-
-- if (!t)
-+ if (!r)
- return -ENOMEM;
-
-- rx_token = mt76_rx_token_consume(dev, (void *)skb, t,
-+ rx_token = mt76_rx_token_consume(dev, (void *)skb, r,
- buf[0].addr);
- if (rx_token < 0)
- return -ENOMEM;
-@@ -280,6 +280,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
+@@ -310,6 +310,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
}
q->entry[idx].txwi = txwi;
@@ -161,12 +183,13 @@
q->entry[idx].skb = skb;
q->entry[idx].wcid = 0xffff;
-@@ -379,13 +380,13 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+@@ -409,14 +410,14 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
u32 id, find = 0;
u32 token = FIELD_GET(MT_DMA_CTL_TOKEN,
le32_to_cpu(desc->buf1));
- struct mt76_txwi_cache *t;
+ struct mt76_rxwi_cache *r;
+ struct mt76_queue_buf qbuf;
if (*more) {
spin_lock_bh(&dev->rx_token_lock);
@@ -178,7 +201,7 @@
find = 1;
desc->buf1 = FIELD_PREP(MT_DMA_CTL_TOKEN, id);
token = id;
-@@ -398,11 +399,11 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+@@ -429,11 +430,11 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
return NULL;
}
@@ -193,21 +216,45 @@
SKB_WITH_OVERHEAD(q->buf_size),
DMA_FROM_DEVICE);
-@@ -410,10 +411,10 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+@@ -441,27 +442,27 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
if (!buf)
return NULL;
- memcpy(buf, t->ptr, SKB_WITH_OVERHEAD(q->buf_size));
-- t->dma_addr = 0;
+ memcpy(buf, r->ptr, SKB_WITH_OVERHEAD(q->buf_size));
-+ r->dma_addr = 0;
+
+- t->dma_addr = dma_map_single(dev->dma_dev, t->ptr,
++ r->dma_addr = dma_map_single(dev->dma_dev, r->ptr,
+ SKB_WITH_OVERHEAD(q->buf_size),
+ DMA_FROM_DEVICE);
+- if (unlikely(dma_mapping_error(dev->dma_dev, t->dma_addr))) {
+- skb_free_frag(t->ptr);
+- mt76_put_rxwi(dev, t);
++ if (unlikely(dma_mapping_error(dev->dma_dev, r->dma_addr))) {
++ skb_free_frag(r->ptr);
++ mt76_put_rxwi(dev, r);
+ return NULL;
+ }
-- mt76_put_rxwi(dev, t);
-+ mt76_put_rxwi(dev, r);
+- qbuf.addr = t->dma_addr;
++ qbuf.addr = r->dma_addr;
+ qbuf.len = SKB_WITH_OVERHEAD(q->buf_size);
+ qbuf.skip_unmap = false;
+
+- if (mt76_dma_add_rx_buf(dev, q, &qbuf, t->ptr, t) < 0) {
+- dma_unmap_single(dev->dma_dev, t->dma_addr,
++ if (mt76_dma_add_rx_buf(dev, q, &qbuf, r->ptr, r) < 0) {
++ dma_unmap_single(dev->dma_dev, r->dma_addr,
+ SKB_WITH_OVERHEAD(q->buf_size),
+ DMA_FROM_DEVICE);
+- skb_free_frag(t->ptr);
+- mt76_put_rxwi(dev, t);
++ skb_free_frag(r->ptr);
++ mt76_put_rxwi(dev, r);
+ return NULL;
+ }
- if (drop) {
- u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
-@@ -481,7 +482,7 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
+@@ -531,7 +532,7 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
buf.len = skb->len;
spin_lock_bh(&q->lock);
@@ -216,7 +263,7 @@
mt76_dma_kick_queue(dev, q);
spin_unlock_bh(&q->lock);
-@@ -558,7 +559,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+@@ -608,7 +609,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
goto unmap;
return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
@@ -225,44 +272,8 @@
unmap:
for (n--; n > 0; n--)
-@@ -598,20 +599,21 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
- spin_lock_bh(&q->lock);
-
- while (q->queued < q->ndesc - 1) {
-- struct mt76_txwi_cache *t = NULL;
-+ struct mt76_rxwi_cache *r = NULL;
- struct mt76_queue_buf qbuf;
- bool skip_alloc = false;
- void *buf = NULL;
-
- if ((q->flags & MT_QFLAG_WED) &&
- FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
-- t = mt76_get_rxwi(dev);
-- if (!t)
-+ r = mt76_get_rxwi(dev);
-+ if (!r)
- break;
-
-- if (t->ptr) {
-+ /* reuse skb buf for wed rx copy*/
-+ if (r->ptr) {
- skip_alloc = true;
-- buf = t->ptr;
-+ buf = r->ptr;
- }
- }
-
-@@ -630,7 +632,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
- qbuf.addr = addr + offset;
- qbuf.len = len - offset;
- qbuf.skip_unmap = false;
-- if (mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t) < 0) {
-+ if (mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL, r) < 0) {
- dma_unmap_single(dev->dma_dev, addr, len,
- DMA_FROM_DEVICE);
- skb_free_frag(buf);
diff --git a/mac80211.c b/mac80211.c
-index de9ef23..818f4f0 100644
+index de9ef237..818f4f0c 100644
--- a/mac80211.c
+++ b/mac80211.c
@@ -597,7 +597,6 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
@@ -282,7 +293,7 @@
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
skb_queue_head_init(&dev->rx_skb[i]);
diff --git a/mt76.h b/mt76.h
-index b10a16f..631c4cc 100644
+index b10a16f8..631c4ccd 100644
--- a/mt76.h
+++ b/mt76.h
@@ -166,6 +166,7 @@ struct mt76_queue_entry {
@@ -353,7 +364,7 @@
static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
{
diff --git a/mt7915/dma.c b/mt7915/dma.c
-index 3626008..9cbd362 100644
+index 36260085..9cbd3625 100644
--- a/mt7915/dma.c
+++ b/mt7915/dma.c
@@ -492,7 +492,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
@@ -373,7 +384,7 @@
/* rx data queue for band1 */
diff --git a/mt7915/mmio.c b/mt7915/mmio.c
-index 4bc8e8c..09b3973 100644
+index 4bc8e8cd..09b39730 100644
--- a/mt7915/mmio.c
+++ b/mt7915/mmio.c
@@ -605,18 +605,18 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
@@ -438,7 +449,7 @@
.tx_complete_skb = mt76_connac_tx_complete_skb,
.rx_skb = mt7915_queue_rx_skb,
diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
-index c3a0b32..91b98ed 100644
+index c3a0b326..91b98ede 100644
--- a/mt7915/mt7915.h
+++ b/mt7915/mt7915.h
@@ -57,6 +57,7 @@
@@ -450,7 +461,7 @@
#define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
#define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
diff --git a/tx.c b/tx.c
-index 6d55566..a72b777 100644
+index 6d55566f..a72b7779 100644
--- a/tx.c
+++ b/tx.c
@@ -756,16 +756,16 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)