[Add kernel flowblock support and sync Wifi]
[Description]
Add kernel flowblock support and sync Wifi
[Release-log]
N/A
diff --git a/recipes-kernel/linux-mt76/files/patches/1111-mt76-testmode-additional-supports.patch b/recipes-kernel/linux-mt76/files/patches/1111-mt76-testmode-additional-supports.patch
index 292e2fc..d39e83d 100644
--- a/recipes-kernel/linux-mt76/files/patches/1111-mt76-testmode-additional-supports.patch
+++ b/recipes-kernel/linux-mt76/files/patches/1111-mt76-testmode-additional-supports.patch
@@ -1,7 +1,7 @@
-From de93c29a69f28d2cf9d2e10fbd3cb1ebda771548 Mon Sep 17 00:00:00 2001
+From 9c76671cd831d28e5b270baa23c71dfc4aaf8410 Mon Sep 17 00:00:00 2001
From: Shayne Chen <shayne.chen@mediatek.com>
Date: Thu, 21 Apr 2022 15:43:19 +0800
-Subject: [PATCH 1111/1115] mt76: testmode: additional supports
+Subject: [PATCH] mt76: testmode: additional supports
Signed-off-by: Shayne Chen <shayne.chen@mediatek.com>
---
@@ -20,14 +20,14 @@
mt7915/regs.h | 3 +
mt7915/testmode.c | 1171 ++++++++++++++++++++++++++++++++++++++++++---
mt7915/testmode.h | 278 +++++++++++
- testmode.c | 274 +++++++++--
+ testmode.c | 275 +++++++++--
testmode.h | 75 +++
tools/fields.c | 80 ++++
tx.c | 3 +-
- 19 files changed, 1962 insertions(+), 149 deletions(-)
+ 19 files changed, 1963 insertions(+), 149 deletions(-)
diff --git a/dma.c b/dma.c
-index 30de8be..f6f5f12 100644
+index f22273cd..03ee9109 100644
--- a/dma.c
+++ b/dma.c
@@ -426,8 +426,7 @@ free:
@@ -41,7 +41,7 @@
#endif
diff --git a/mac80211.c b/mac80211.c
-index c1df063..dc3c63e 100644
+index a7e082f7..9984def5 100644
--- a/mac80211.c
+++ b/mac80211.c
@@ -55,6 +55,13 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = {
@@ -71,7 +71,7 @@
static const struct ieee80211_channel mt76_channels_6ghz[] = {
diff --git a/mt76.h b/mt76.h
-index 28720ee..062c5ce 100644
+index 8325409a..4c8a671f 100644
--- a/mt76.h
+++ b/mt76.h
@@ -602,6 +602,21 @@ struct mt76_testmode_ops {
@@ -235,7 +235,7 @@
static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable)
{
diff --git a/mt76_connac_mcu.c b/mt76_connac_mcu.c
-index 261181d..cd35068 100644
+index 261181dc..cd350689 100644
--- a/mt76_connac_mcu.c
+++ b/mt76_connac_mcu.c
@@ -391,6 +391,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
@@ -257,7 +257,7 @@
return;
diff --git a/mt76_connac_mcu.h b/mt76_connac_mcu.h
-index 32e540c..a0e6fa6 100644
+index 32e540cc..a0e6fa6e 100644
--- a/mt76_connac_mcu.h
+++ b/mt76_connac_mcu.h
@@ -967,6 +967,7 @@ enum {
@@ -277,7 +277,7 @@
MCU_EXT_CMD_CSI_CTRL = 0xc2,
};
diff --git a/mt7915/init.c b/mt7915/init.c
-index 0d5109a..b549fa0 100644
+index 0d5109a3..b549fa04 100644
--- a/mt7915/init.c
+++ b/mt7915/init.c
@@ -576,7 +576,7 @@ static void mt7915_init_work(struct work_struct *work)
@@ -290,7 +290,7 @@
mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband);
mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband);
diff --git a/mt7915/mac.c b/mt7915/mac.c
-index 0593270..8fd4618 100644
+index f13456bf..5e5df23d 100644
--- a/mt7915/mac.c
+++ b/mt7915/mac.c
@@ -565,17 +565,39 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
@@ -356,7 +356,7 @@
}
diff --git a/mt7915/main.c b/mt7915/main.c
-index dc5e4b0..e4b2076 100644
+index dc5e4b0f..e4b20766 100644
--- a/mt7915/main.c
+++ b/mt7915/main.c
@@ -224,7 +224,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
@@ -369,7 +369,7 @@
mvif->mt76.wmm_idx += 2;
diff --git a/mt7915/mcu.c b/mt7915/mcu.c
-index aee3a8c..810d72e 100644
+index fa8f51da..41a8e7e2 100644
--- a/mt7915/mcu.c
+++ b/mt7915/mcu.c
@@ -434,6 +434,11 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
@@ -392,7 +392,7 @@
!rxd->seq)
mt7915_mcu_rx_unsolicited_event(dev, skb);
else
-@@ -2812,14 +2818,14 @@ static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev)
+@@ -2809,14 +2815,14 @@ static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev)
return 0;
}
@@ -410,7 +410,7 @@
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_BUFFER_MODE),
diff --git a/mt7915/mcu.h b/mt7915/mcu.h
-index 75442ba..873a805 100644
+index 15b74732..1b147421 100644
--- a/mt7915/mcu.h
+++ b/mt7915/mcu.h
@@ -8,10 +8,15 @@
@@ -442,7 +442,7 @@
MT_BF_TYPE_UPDATE = 20,
MT_BF_MODULE_UPDATE = 25
};
-@@ -661,10 +672,19 @@ struct mt7915_muru {
+@@ -664,10 +675,19 @@ struct mt7915_muru {
#define MURU_OFDMA_SCH_TYPE_UL BIT(1)
/* Common Config */
@@ -467,7 +467,7 @@
enum {
diff --git a/mt7915/mmio.c b/mt7915/mmio.c
-index bccb108..a84970d 100644
+index 6e140e2d..6d1dbdbd 100644
--- a/mt7915/mmio.c
+++ b/mt7915/mmio.c
@@ -76,6 +76,7 @@ static const u32 mt7915_offs[] = {
@@ -487,7 +487,7 @@
[AGG_PCR0] = 0x040,
[AGG_ACR0] = 0x054,
diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
-index 9f5da64..ff92e55 100644
+index 07a1c9ce..7c7d6bd3 100644
--- a/mt7915/mt7915.h
+++ b/mt7915/mt7915.h
@@ -303,6 +303,9 @@ struct mt7915_phy {
@@ -533,7 +533,7 @@
static inline u16 mt7915_wtbl_size(struct mt7915_dev *dev)
{
diff --git a/mt7915/regs.h b/mt7915/regs.h
-index 47bae86..444440e 100644
+index 47bae86e..444440e1 100644
--- a/mt7915/regs.h
+++ b/mt7915/regs.h
@@ -51,6 +51,7 @@ enum offs_rev {
@@ -554,7 +554,7 @@
(_n) * 4))
#define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_PCR0) + \
diff --git a/mt7915/testmode.c b/mt7915/testmode.c
-index 123ceaf..a0a9f7b 100644
+index 123ceaf9..a0a9f7be 100644
--- a/mt7915/testmode.c
+++ b/mt7915/testmode.c
@@ -9,6 +9,9 @@
@@ -1887,7 +1887,7 @@
+ .set_eeprom = mt7915_tm_set_eeprom,
};
diff --git a/mt7915/testmode.h b/mt7915/testmode.h
-index a1c54c8..01b08e9 100644
+index a1c54c89..01b08e9e 100644
--- a/mt7915/testmode.h
+++ b/mt7915/testmode.h
@@ -4,6 +4,8 @@
@@ -2202,17 +2202,18 @@
+
#endif
diff --git a/testmode.c b/testmode.c
-index e6d1f70..2c699ac 100644
+index 4a24f6c9..690e9a7d 100644
--- a/testmode.c
+++ b/testmode.c
-@@ -25,28 +25,15 @@ const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
+@@ -25,28 +25,16 @@ const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
};
EXPORT_SYMBOL_GPL(mt76_tm_policy);
-void mt76_testmode_tx_pending(struct mt76_phy *phy)
+static void
+mt76_testmode_queue_tx(struct mt76_phy *phy, struct mt76_wcid *wcid,
-+ struct sk_buff *skb, struct mt76_queue *q, u16 limit)
++ struct sk_buff *skb, struct mt76_queue *q, int qid,
++ u16 limit)
{
struct mt76_testmode_data *td = &phy->test;
struct mt76_dev *dev = phy->dev;
@@ -2239,7 +2240,7 @@
q->queued < q->ndesc / 2) {
int ret;
-@@ -55,13 +42,65 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy)
+@@ -55,13 +43,65 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy)
if (ret < 0)
break;
@@ -2276,7 +2277,7 @@
+ q = phy->q_tx[qid];
+ spin_lock_bh(&q->lock);
+ mt76_testmode_queue_tx(phy, &phy->dev->global_wcid,
-+ td->tx_skb, q, tx_queued_limit);
++ td->tx_skb, q, qid, tx_queued_limit);
+ spin_unlock_bh(&q->lock);
+
+ return;
@@ -2293,7 +2294,7 @@
+ if (remain < tx_queued_limit)
+ tx_queued_limit = remain;
+
-+ mt76_testmode_queue_tx(phy, td->cur_entry, ed->tx_skb, q, tx_queued_limit);
++ mt76_testmode_queue_tx(phy, td->cur_entry, ed->tx_skb, q, qid, tx_queued_limit);
+
+ if (td->tx_pending % td->tx_count == 0 || is_mu)
+ td->cur_entry = list_next_entry(td->cur_entry, list);
@@ -2305,7 +2306,7 @@
}
static u32
-@@ -87,15 +126,31 @@ mt76_testmode_max_mpdu_len(struct mt76_phy *phy, u8 tx_rate_mode)
+@@ -87,15 +127,31 @@ mt76_testmode_max_mpdu_len(struct mt76_phy *phy, u8 tx_rate_mode)
}
static void
@@ -2341,7 +2342,7 @@
{
#define MT_TXP_MAX_LEN 4095
u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
-@@ -117,7 +172,8 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
+@@ -117,7 +173,8 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
nfrags = len / MT_TXP_MAX_LEN;
head_len = nfrags ? MT_TXP_MAX_LEN : len;
@@ -2351,7 +2352,7 @@
fc |= IEEE80211_STYPE_QOS_DATA;
head = alloc_skb(head_len, GFP_KERNEL);
-@@ -126,9 +182,9 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
+@@ -126,9 +183,9 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
hdr = __skb_put_zero(head, head_len);
hdr->frame_control = cpu_to_le16(fc);
@@ -2364,7 +2365,7 @@
skb_set_queue_mapping(head, IEEE80211_AC_BE);
info = IEEE80211_SKB_CB(head);
-@@ -152,7 +208,7 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
+@@ -152,7 +209,7 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
frag = alloc_skb(frag_len, GFP_KERNEL);
if (!frag) {
@@ -2373,7 +2374,7 @@
dev_kfree_skb(head);
return -ENOMEM;
}
-@@ -165,15 +221,14 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
+@@ -165,15 +222,14 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
frag_tail = &(*frag_tail)->next;
}
@@ -2393,7 +2394,7 @@
{
struct mt76_testmode_data *td = &phy->test;
struct ieee80211_tx_info *info;
-@@ -181,7 +236,7 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
+@@ -181,7 +237,7 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
u8 max_nss = hweight8(phy->antenna_mask);
int ret;
@@ -2402,7 +2403,7 @@
if (ret)
return ret;
-@@ -191,7 +246,7 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
+@@ -191,7 +247,7 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
if (td->tx_antenna_mask)
max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask));
@@ -2411,7 +2412,7 @@
rate = &info->control.rates[0];
rate->count = 1;
rate->idx = td->tx_rate_idx;
-@@ -263,6 +318,25 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
+@@ -263,6 +319,25 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
out:
return 0;
}
@@ -2437,7 +2438,7 @@
static void
mt76_testmode_tx_start(struct mt76_phy *phy)
-@@ -273,6 +347,14 @@ mt76_testmode_tx_start(struct mt76_phy *phy)
+@@ -273,6 +348,14 @@ mt76_testmode_tx_start(struct mt76_phy *phy)
td->tx_queued = 0;
td->tx_done = 0;
td->tx_pending = td->tx_count;
@@ -2452,7 +2453,7 @@
mt76_worker_schedule(&dev->tx_worker);
}
-@@ -291,7 +373,7 @@ mt76_testmode_tx_stop(struct mt76_phy *phy)
+@@ -291,7 +374,7 @@ mt76_testmode_tx_stop(struct mt76_phy *phy)
wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued,
MT76_TM_TIMEOUT * HZ);
@@ -2461,7 +2462,7 @@
}
static inline void
-@@ -322,6 +404,8 @@ mt76_testmode_init_defaults(struct mt76_phy *phy)
+@@ -322,6 +405,8 @@ mt76_testmode_init_defaults(struct mt76_phy *phy)
memcpy(td->addr[0], phy->macaddr, ETH_ALEN);
memcpy(td->addr[1], phy->macaddr, ETH_ALEN);
memcpy(td->addr[2], phy->macaddr, ETH_ALEN);
@@ -2470,7 +2471,7 @@
}
static int
-@@ -331,8 +415,12 @@ __mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state)
+@@ -331,8 +416,12 @@ __mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state)
struct mt76_dev *dev = phy->dev;
int err;
@@ -2484,7 +2485,7 @@
if (state == MT76_TM_STATE_TX_FRAMES) {
err = mt76_testmode_tx_init(phy);
-@@ -402,6 +490,44 @@ mt76_tm_get_u8(struct nlattr *attr, u8 *dest, u8 min, u8 max)
+@@ -402,6 +491,44 @@ mt76_tm_get_u8(struct nlattr *attr, u8 *dest, u8 min, u8 max)
return 0;
}
@@ -2529,7 +2530,7 @@
int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len)
{
-@@ -425,6 +551,11 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+@@ -425,6 +552,11 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&dev->mutex);
@@ -2541,7 +2542,7 @@
if (tb[MT76_TM_ATTR_RESET]) {
mt76_testmode_set_state(phy, MT76_TM_STATE_OFF);
memset(td, 0, sizeof(*td));
-@@ -452,7 +583,10 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+@@ -452,7 +584,10 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_DUTY_CYCLE],
&td->tx_duty_cycle, 0, 99) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_POWER_CONTROL],
@@ -2553,7 +2554,7 @@
goto out;
if (tb[MT76_TM_ATTR_TX_LENGTH]) {
-@@ -484,8 +618,7 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+@@ -484,8 +619,7 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (tb[MT76_TM_ATTR_TX_POWER]) {
struct nlattr *cur;
@@ -2563,7 +2564,7 @@
nla_for_each_nested(cur, tb[MT76_TM_ATTR_TX_POWER], rem) {
if (nla_len(cur) != 1 ||
-@@ -505,11 +638,45 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+@@ -505,11 +639,45 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (nla_len(cur) != ETH_ALEN || idx >= 3)
goto out;
@@ -2610,7 +2611,7 @@
if (dev->test_ops->set_params) {
err = dev->test_ops->set_params(phy, tb, state);
if (err)
-@@ -574,6 +741,7 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
+@@ -574,6 +742,7 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
struct mt76_testmode_data *td = &phy->test;
@@ -2618,7 +2619,7 @@
struct nlattr *tb[NUM_MT76_TM_ATTRS] = {};
int err = 0;
void *a;
-@@ -606,6 +774,19 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
+@@ -606,6 +775,19 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
goto out;
}
@@ -2638,7 +2639,7 @@
mt76_testmode_init_defaults(phy);
err = -EMSGSIZE;
-@@ -618,12 +799,8 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
+@@ -618,12 +800,8 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
goto out;
if (nla_put_u32(msg, MT76_TM_ATTR_TX_COUNT, td->tx_count) ||
@@ -2651,7 +2652,7 @@
nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_STBC, td->tx_rate_stbc) ||
(mt76_testmode_param_present(td, MT76_TM_ATTR_TX_LTF) &&
nla_put_u8(msg, MT76_TM_ATTR_TX_LTF, td->tx_ltf)) ||
-@@ -643,6 +820,15 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
+@@ -643,6 +821,15 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
nla_put_u8(msg, MT76_TM_ATTR_FREQ_OFFSET, td->freq_offset)))
goto out;
@@ -2668,7 +2669,7 @@
a = nla_nest_start(msg, MT76_TM_ATTR_TX_POWER);
if (!a)
diff --git a/testmode.h b/testmode.h
-index 8961326..57949f2 100644
+index 89613266..57949f2b 100644
--- a/testmode.h
+++ b/testmode.h
@@ -6,6 +6,8 @@
@@ -2773,7 +2774,7 @@
+
#endif
diff --git a/tools/fields.c b/tools/fields.c
-index e3f6908..6e36ab2 100644
+index e3f69089..6e36ab27 100644
--- a/tools/fields.c
+++ b/tools/fields.c
@@ -10,6 +10,7 @@ static const char * const testmode_state[] = {
@@ -2899,7 +2900,7 @@
};
diff --git a/tx.c b/tx.c
-index 02067ed..0457c3e 100644
+index 6c8d50d3..ae44afe0 100644
--- a/tx.c
+++ b/tx.c
@@ -245,8 +245,7 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *
diff --git a/recipes-kernel/linux-mt76/files/patches/1113-mt76-mt7915-drop-packets-when-TWT-stations-use-more-.patch b/recipes-kernel/linux-mt76/files/patches/1113-mt76-mt7915-drop-packets-when-TWT-stations-use-more-.patch
old mode 100644
new mode 100755
index e077acf..bb06dff
--- a/recipes-kernel/linux-mt76/files/patches/1113-mt76-mt7915-drop-packets-when-TWT-stations-use-more-.patch
+++ b/recipes-kernel/linux-mt76/files/patches/1113-mt76-mt7915-drop-packets-when-TWT-stations-use-more-.patch
@@ -1,19 +1,21 @@
-From 23de5ffe66f73e5fbd22a441f7ca1f612f1bd158 Mon Sep 17 00:00:00 2001
-From: Peter Chiu <chui-hao.chiu@mediatek.com>
-Date: Wed, 22 Jun 2022 10:58:37 +0800
-Subject: [PATCH 1113/1115] mt76: mt7915: drop packets when TWT stations use
- more tokens than 128
+From 9bc707077df60aa6423c89b1d17fd8a5f4e2d36b Mon Sep 17 00:00:00 2001
+From: Bo Jiao <Bo.Jiao@mediatek.com>
+Date: Mon, 4 Jul 2022 19:24:34 +0800
+Subject: [PATCH] mt76: mt7915: drop packets when TWT stations use
---
mt7915/mac.c | 21 ++++++++++++++++++---
mt7915/mt7915.h | 2 ++
2 files changed, 20 insertions(+), 3 deletions(-)
+ mode change 100644 => 100755 mt7915/mac.c
diff --git a/mt7915/mac.c b/mt7915/mac.c
-index 8fd4618..fd0dd50 100644
+old mode 100644
+new mode 100755
+index 5e5df23d..55b38d16
--- a/mt7915/mac.c
+++ b/mt7915/mac.c
-@@ -724,6 +724,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+@@ -725,6 +725,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
struct ieee80211_vif *vif = info->control.vif;
@@ -21,7 +23,7 @@
struct mt76_connac_fw_txp *txp;
struct mt76_txwi_cache *t;
int id, i, nbuf = tx_info->nbuf - 1;
-@@ -737,8 +738,6 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+@@ -738,8 +739,6 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
wcid = &dev->mt76.global_wcid;
if (sta) {
@@ -30,7 +32,7 @@
msta = (struct mt7915_sta *)sta->drv_priv;
if (time_after(jiffies, msta->jiffies + HZ / 4)) {
-@@ -754,10 +753,22 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+@@ -755,10 +754,22 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
mgmt->u.action.category == 0xff)
return -1;
@@ -51,9 +53,9 @@
+ spin_unlock_bh(&mdev->token_lock);
+
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
- mt7915_mac_write_txwi(mdev, txwi_ptr, tx_info->skb, wcid, pid, key, 0);
-
-@@ -937,6 +948,7 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
+ mt7915_mac_write_txwi(mdev, txwi_ptr, tx_info->skb, wcid, pid, key,
+ qid, 0);
+@@ -939,6 +950,7 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
struct mt76_dev *mdev = &dev->mt76;
struct mt76_txwi_cache *txwi;
struct ieee80211_sta *sta = NULL;
@@ -61,7 +63,7 @@
LIST_HEAD(free_list);
void *end = data + len;
bool v3, wake = false;
-@@ -960,7 +972,6 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
+@@ -962,7 +974,6 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
* 1'b0: msdu_id with the same 'wcid pair' as above.
*/
if (info & MT_TX_FREE_PAIR) {
@@ -69,7 +71,7 @@
struct mt76_wcid *wcid;
u16 idx;
-@@ -993,6 +1004,10 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
+@@ -995,6 +1006,10 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
txwi = mt76_token_release(mdev, msdu, &wake);
if (!txwi)
continue;
@@ -81,7 +83,7 @@
mt7915_txwi_free(dev, txwi, sta, &free_list);
}
diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
-index ff92e55..6235014 100644
+index 7c7d6bd3..5f11b3fb 100644
--- a/mt7915/mt7915.h
+++ b/mt7915/mt7915.h
@@ -136,6 +136,8 @@ struct mt7915_sta {
diff --git a/recipes-kernel/linux-mt76/files/patches/1116-mt76-mt7915-add-vendor-dump-phy-capa.patch b/recipes-kernel/linux-mt76/files/patches/1116-mt76-mt7915-add-vendor-dump-phy-capa.patch
new file mode 100644
index 0000000..df0838e
--- /dev/null
+++ b/recipes-kernel/linux-mt76/files/patches/1116-mt76-mt7915-add-vendor-dump-phy-capa.patch
@@ -0,0 +1,130 @@
+Index: mt76-2022-06-24-b6e865e2/mt7915/vendor.c
+===================================================================
+--- mt76-2022-06-24-b6e865e2.orig/mt7915/vendor.c
++++ mt76-2022-06-24-b6e865e2/mt7915/vendor.c
+@@ -50,6 +50,18 @@ rfeature_ctrl_policy[NUM_MTK_VENDOR_ATTR
+ [MTK_VENDOR_ATTR_RFEATURE_CTRL_TRIG_TXBF] = { .type = NLA_U8 },
+ };
+
++static const struct nla_policy
++phy_capa_ctrl_policy[NUM_MTK_VENDOR_ATTRS_PHY_CAPA_CTRL] = {
++ [MTK_VENDOR_ATTR_PHY_CAPA_CTRL_SET] = { .type = NLA_NESTED },
++ [MTK_VENDOR_ATTR_PHY_CAPA_CTRL_DUMP] = { .type = NLA_NESTED },
++};
++
++static const struct nla_policy
++phy_capa_dump_policy[NUM_MTK_VENDOR_ATTRS_PHY_CAPA_DUMP] = {
++ [MTK_VENDOR_ATTR_PHY_CAPA_DUMP_MAX_SUPPORTED_BSS] = { .type = NLA_U16 },
++ [MTK_VENDOR_ATTR_PHY_CAPA_DUMP_MAX_SUPPORTED_STA] = { .type = NLA_U16 },
++};
++
+ struct csi_null_tone {
+ u8 start;
+ u8 end;
+@@ -974,6 +986,35 @@ static int mt7915_vendor_hemu_ctrl(struc
+ return 0;
+ }
+
++static int
++mt7915_vendor_phy_capa_ctrl_dump(struct wiphy *wiphy, struct wireless_dev *wdev,
++ struct sk_buff *skb, const void *data, int data_len,
++ unsigned long *storage)
++{
++ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
++ struct mt7915_phy *phy = mt7915_hw_phy(hw);
++ struct mt7915_dev *dev = phy->dev;
++ void *a;
++ int len = 0;
++
++ if (*storage == 1)
++ return -ENOENT;
++ *storage = 1;
++
++ a = nla_nest_start(skb, MTK_VENDOR_ATTR_PHY_CAPA_CTRL_DUMP);
++
++ if (nla_put_u16(skb,
++ MTK_VENDOR_ATTR_PHY_CAPA_DUMP_MAX_SUPPORTED_BSS, MT7915_MAX_BSS) ||
++ nla_put_u16(skb,
++ MTK_VENDOR_ATTR_PHY_CAPA_DUMP_MAX_SUPPORTED_STA, MT7915_WTBL_STA))
++ return -ENOMEM;
++ len += 2;
++
++ nla_nest_end(skb, a);
++
++ return len;
++}
++
+ static const struct wiphy_vendor_command mt7915_vendor_commands[] = {
+ {
+ .info = {
+@@ -1031,6 +1072,17 @@ static const struct wiphy_vendor_command
+ .doit = mt7915_vendor_hemu_ctrl,
+ .policy = hemu_ctrl_policy,
+ .maxattr = MTK_VENDOR_ATTR_HEMU_CTRL_MAX,
++ },
++ {
++ .info = {
++ .vendor_id = MTK_NL80211_VENDOR_ID,
++ .subcmd = MTK_NL80211_VENDOR_SUBCMD_PHY_CAPA_CTRL,
++ },
++ .flags = WIPHY_VENDOR_CMD_NEED_NETDEV |
++ WIPHY_VENDOR_CMD_NEED_RUNNING,
++ .dumpit = mt7915_vendor_phy_capa_ctrl_dump,
++ .policy = phy_capa_ctrl_policy,
++ .maxattr = MTK_VENDOR_ATTR_PHY_CAPA_CTRL_MAX,
+ }
+ };
+
+Index: mt76-2022-06-24-b6e865e2/mt7915/vendor.h
+===================================================================
+--- mt76-2022-06-24-b6e865e2.orig/mt7915/vendor.h
++++ mt76-2022-06-24-b6e865e2/mt7915/vendor.h
+@@ -9,6 +9,7 @@ enum mtk_nl80211_vendor_subcmds {
+ MTK_NL80211_VENDOR_SUBCMD_RFEATURE_CTRL = 0xc3,
+ MTK_NL80211_VENDOR_SUBCMD_WIRELESS_CTRL = 0xc4,
+ MTK_NL80211_VENDOR_SUBCMD_HEMU_CTRL = 0xc5,
++ MTK_NL80211_VENDOR_SUBCMD_PHY_CAPA_CTRL = 0xc6,
+ };
+
+ enum mtk_capi_control_changed {
+@@ -149,4 +150,28 @@ enum mtk_vendor_attr_mnt_dump {
+ NUM_MTK_VENDOR_ATTRS_AMNT_DUMP - 1
+ };
+
++enum mtk_vendor_attr_phy_capa_ctrl {
++ MTK_VENDOR_ATTR_PHY_CAPA_CTRL_UNSPEC,
++
++ MTK_VENDOR_ATTR_PHY_CAPA_CTRL_SET,
++ MTK_VENDOR_ATTR_PHY_CAPA_CTRL_DUMP,
++
++ /* keep last */
++ NUM_MTK_VENDOR_ATTRS_PHY_CAPA_CTRL,
++ MTK_VENDOR_ATTR_PHY_CAPA_CTRL_MAX =
++ NUM_MTK_VENDOR_ATTRS_PHY_CAPA_CTRL - 1
++};
++
++enum mtk_vendor_attr_phy_capa_dump {
++ MTK_VENDOR_ATTR_PHY_CAPA_DUMP_UNSPEC,
++
++ MTK_VENDOR_ATTR_PHY_CAPA_DUMP_MAX_SUPPORTED_BSS,
++ MTK_VENDOR_ATTR_PHY_CAPA_DUMP_MAX_SUPPORTED_STA,
++
++ /* keep last */
++ NUM_MTK_VENDOR_ATTRS_PHY_CAPA_DUMP,
++ MTK_VENDOR_ATTR_PHY_CAPA_DUMP_MAX =
++ NUM_MTK_VENDOR_ATTRS_PHY_CAPA_DUMP - 1
++};
++
+ #endif
+Index: mt76-2022-06-24-b6e865e2/mt7915/mt7915.h
+===================================================================
+--- mt76-2022-06-24-b6e865e2.orig/mt7915/mt7915.h
++++ mt76-2022-06-24-b6e865e2/mt7915/mt7915.h
+@@ -11,6 +11,7 @@
+
+ #define MTK_DEBUG 1
+ #define MT7915_MAX_INTERFACES 19
++#define MT7915_MAX_BSS 16
+ #define MT7915_WTBL_SIZE 288
+ #define MT7916_WTBL_SIZE 544
+ #define MT7915_WTBL_RESERVED (mt7915_wtbl_size(dev) - 1)
diff --git a/recipes-kernel/linux-mt76/files/patches/3000-mt76-remove-WED-support-patch-for-build-err.patch b/recipes-kernel/linux-mt76/files/patches/3000-mt76-remove-WED-support-patch-for-build-err.patch
deleted file mode 100644
index 2ef225a..0000000
--- a/recipes-kernel/linux-mt76/files/patches/3000-mt76-remove-WED-support-patch-for-build-err.patch
+++ /dev/null
@@ -1,1123 +0,0 @@
-From f4838210b5e80adfa3af028721ee040edff79a48 Mon Sep 17 00:00:00 2001
-From: Sujuan Chen <sujuan.chen@mediatek.com>
-Date: Mon, 6 Jun 2022 20:22:35 +0800
-Subject: [PATCH] mt76:remove WED support patch for build err
-
----
- dma.c | 160 ++++++++++--------------------------------------
- mac80211.c | 4 +-
- mmio.c | 9 +--
- mt76.h | 25 ++------
- mt7603/dma.c | 8 +--
- mt7615/dma.c | 6 +-
- mt76x02_mmio.c | 4 +-
- mt7915/dma.c | 43 ++-----------
- mt7915/mac.c | 139 ++++++++++-------------------------------
- mt7915/mac.h | 2 -
- mt7915/main.c | 36 -----------
- mt7915/mcu.c | 3 -
- mt7915/mmio.c | 29 +++------
- mt7915/mt7915.h | 2 -
- mt7915/pci.c | 96 +++--------------------------
- mt7915/regs.h | 17 +----
- mt7921/dma.c | 2 +-
- tx.c | 16 +----
- 18 files changed, 105 insertions(+), 496 deletions(-)
-
-diff --git a/dma.c b/dma.c
-index f6f5f12..3f7456b 100644
---- a/dma.c
-+++ b/dma.c
-@@ -7,36 +7,9 @@
- #include "mt76.h"
- #include "dma.h"
-
--#if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
--
--#define Q_READ(_dev, _q, _field) ({ \
-- u32 _offset = offsetof(struct mt76_queue_regs, _field); \
-- u32 _val; \
-- if ((_q)->flags & MT_QFLAG_WED) \
-- _val = mtk_wed_device_reg_read(&(_dev)->mmio.wed, \
-- ((_q)->wed_regs + \
-- _offset)); \
-- else \
-- _val = readl(&(_q)->regs->_field); \
-- _val; \
--})
--
--#define Q_WRITE(_dev, _q, _field, _val) do { \
-- u32 _offset = offsetof(struct mt76_queue_regs, _field); \
-- if ((_q)->flags & MT_QFLAG_WED) \
-- mtk_wed_device_reg_write(&(_dev)->mmio.wed, \
-- ((_q)->wed_regs + _offset), \
-- _val); \
-- else \
-- writel(_val, &(_q)->regs->_field); \
--} while (0)
--
--#else
--
--#define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field)
--#define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field)
-+#define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field)
-+#define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field)
-
--#endif
-
- static struct mt76_txwi_cache *
- mt76_alloc_txwi(struct mt76_dev *dev)
-@@ -138,6 +111,36 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
- mt76_dma_sync_idx(dev, q);
- }
-
-+static int
-+mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
-+ int idx, int n_desc, int bufsize,
-+ u32 ring_base)
-+{
-+ int size;
-+
-+ spin_lock_init(&q->lock);
-+ spin_lock_init(&q->cleanup_lock);
-+
-+ q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
-+ q->ndesc = n_desc;
-+ q->buf_size = bufsize;
-+ q->hw_idx = idx;
-+
-+ size = q->ndesc * sizeof(struct mt76_desc);
-+ q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
-+ if (!q->desc)
-+ return -ENOMEM;
-+
-+ size = q->ndesc * sizeof(*q->entry);
-+ q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
-+ if (!q->entry)
-+ return -ENOMEM;
-+
-+ mt76_dma_queue_reset(dev, q);
-+
-+ return 0;
-+}
-+
- static int
- mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
- struct mt76_queue_buf *buf, int nbufs, u32 info,
-@@ -482,85 +485,6 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
- return frames;
- }
-
--static int
--mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
--{
--#ifdef CONFIG_NET_MEDIATEK_SOC_WED
-- struct mtk_wed_device *wed = &dev->mmio.wed;
-- int ret, type, ring;
-- u8 flags = q->flags;
--
-- if (!mtk_wed_device_active(wed))
-- q->flags &= ~MT_QFLAG_WED;
--
-- if (!(q->flags & MT_QFLAG_WED))
-- return 0;
--
-- type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
-- ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
--
-- switch (type) {
-- case MT76_WED_Q_TX:
-- ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs);
-- if (!ret)
-- q->wed_regs = wed->tx_ring[ring].reg_base;
-- break;
-- case MT76_WED_Q_TXFREE:
-- /* WED txfree queue needs ring to be initialized before setup */
-- q->flags = 0;
-- mt76_dma_queue_reset(dev, q);
-- mt76_dma_rx_fill(dev, q);
-- q->flags = flags;
--
-- ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
-- if (!ret)
-- q->wed_regs = wed->txfree_ring.reg_base;
-- break;
-- default:
-- ret = -EINVAL;
-- }
--
-- return ret;
--#else
-- return 0;
--#endif
--}
--
--static int
--mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
-- int idx, int n_desc, int bufsize,
-- u32 ring_base)
--{
-- int ret, size;
--
-- spin_lock_init(&q->lock);
-- spin_lock_init(&q->cleanup_lock);
--
-- q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
-- q->ndesc = n_desc;
-- q->buf_size = bufsize;
-- q->hw_idx = idx;
--
-- size = q->ndesc * sizeof(struct mt76_desc);
-- q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
-- if (!q->desc)
-- return -ENOMEM;
--
-- size = q->ndesc * sizeof(*q->entry);
-- q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
-- if (!q->entry)
-- return -ENOMEM;
--
-- ret = mt76_dma_wed_setup(dev, q);
-- if (ret)
-- return ret;
--
-- if (q->flags != MT_WED_Q_TXFREE)
-- mt76_dma_queue_reset(dev, q);
--
-- return 0;
--}
--
- static void
- mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
- {
-@@ -642,29 +566,14 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
- static int
- mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
- {
-- int len, data_len, done = 0, dma_idx;
-+ int len, data_len, done = 0;
- struct sk_buff *skb;
- unsigned char *data;
-- bool check_ddone = false;
- bool more;
-
-- if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
-- q->flags == MT_WED_Q_TXFREE) {
-- dma_idx = Q_READ(dev, q, dma_idx);
-- check_ddone = true;
-- }
--
- while (done < budget) {
- u32 info;
-
-- if (check_ddone) {
-- if (q->tail == dma_idx)
-- dma_idx = Q_READ(dev, q, dma_idx);
--
-- if (q->tail == dma_idx)
-- break;
-- }
--
- data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
- if (!data)
- break;
-@@ -805,8 +714,5 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
- }
-
- mt76_free_pending_txwi(dev);
--
-- if (mtk_wed_device_active(&dev->mmio.wed))
-- mtk_wed_device_detach(&dev->mmio.wed);
- }
- EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
-diff --git a/mac80211.c b/mac80211.c
-index 5600a09..7878446 100644
---- a/mac80211.c
-+++ b/mac80211.c
-@@ -1605,7 +1605,7 @@ EXPORT_SYMBOL_GPL(mt76_get_antenna);
-
- struct mt76_queue *
- mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
-- int ring_base, u32 flags)
-+ int ring_base)
- {
- struct mt76_queue *hwq;
- int err;
-@@ -1614,8 +1614,6 @@ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
- if (!hwq)
- return ERR_PTR(-ENOMEM);
-
-- hwq->flags = flags;
--
- err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
- if (err < 0)
- return ERR_PTR(err);
-diff --git a/mmio.c b/mmio.c
-index 86e3d2a..26353b6 100644
---- a/mmio.c
-+++ b/mmio.c
-@@ -73,13 +73,8 @@ void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
- spin_lock_irqsave(&dev->mmio.irq_lock, flags);
- dev->mmio.irqmask &= ~clear;
- dev->mmio.irqmask |= set;
-- if (addr) {
-- if (mtk_wed_device_active(&dev->mmio.wed))
-- mtk_wed_device_irq_set_mask(&dev->mmio.wed,
-- dev->mmio.irqmask);
-- else
-- mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
-- }
-+ if (addr)
-+ mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
- spin_unlock_irqrestore(&dev->mmio.irq_lock, flags);
- }
- EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
-diff --git a/mt76.h b/mt76.h
-index 062c5ce..ed1924c 100644
---- a/mt76.h
-+++ b/mt76.h
-@@ -13,7 +13,6 @@
- #include <linux/leds.h>
- #include <linux/usb.h>
- #include <linux/average.h>
--#include <linux/soc/mediatek/mtk_wed.h>
- #include <net/mac80211.h>
- #include "util.h"
- #include "testmode.h"
-@@ -27,16 +26,6 @@
-
- #define MT76_TOKEN_FREE_THR 64
-
--#define MT_QFLAG_WED_RING GENMASK(1, 0)
--#define MT_QFLAG_WED_TYPE GENMASK(3, 2)
--#define MT_QFLAG_WED BIT(4)
--
--#define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \
-- FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
-- FIELD_PREP(MT_QFLAG_WED_RING, _n))
--#define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n)
--#define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0)
--
- struct mt76_dev;
- struct mt76_phy;
- struct mt76_wcid;
-@@ -186,9 +175,6 @@ struct mt76_queue {
- u8 buf_offset;
- u8 hw_idx;
- u8 qid;
-- u8 flags;
--
-- u32 wed_regs;
-
- dma_addr_t desc_dma;
- struct sk_buff *rx_head;
-@@ -556,8 +542,6 @@ struct mt76_mmio {
- void __iomem *regs;
- spinlock_t irq_lock;
- u32 irqmask;
--
-- struct mtk_wed_device wed;
- };
-
- struct mt76_rx_status {
-@@ -782,7 +766,6 @@ struct mt76_dev {
-
- spinlock_t token_lock;
- struct idr token;
-- u16 wed_token_count;
- u16 token_count;
- u16 token_size;
-
-@@ -1008,14 +991,14 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len);
-
- struct mt76_queue *
- mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
-- int ring_base, u32 flags);
-+ int ring_base);
- u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx);
- static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
-- int n_desc, int ring_base, u32 flags)
-+ int n_desc, int ring_base)
- {
- struct mt76_queue *q;
-
-- q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, flags);
-+ q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base);
- if (IS_ERR(q))
- return PTR_ERR(q);
-
-@@ -1030,7 +1013,7 @@ static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx,
- {
- struct mt76_queue *q;
-
-- q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, 0);
-+ q = mt76_init_queue(dev, qid, idx, n_desc, ring_base);
- if (IS_ERR(q))
- return PTR_ERR(q);
-
-diff --git a/mt7603/dma.c b/mt7603/dma.c
-index 590cff9..37b092e 100644
---- a/mt7603/dma.c
-+++ b/mt7603/dma.c
-@@ -173,13 +173,13 @@ int mt7603_dma_init(struct mt7603_dev *dev)
-
- for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
- ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
-- MT7603_TX_RING_SIZE, MT_TX_RING_BASE, 0);
-+ MT7603_TX_RING_SIZE, MT_TX_RING_BASE);
- if (ret)
- return ret;
- }
-
- ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
-- MT7603_PSD_RING_SIZE, MT_TX_RING_BASE, 0);
-+ MT7603_PSD_RING_SIZE, MT_TX_RING_BASE);
- if (ret)
- return ret;
-
-@@ -189,12 +189,12 @@ int mt7603_dma_init(struct mt7603_dev *dev)
- return ret;
-
- ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_BEACON, MT_TX_HW_QUEUE_BCN,
-- MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0);
-+ MT_MCU_RING_SIZE, MT_TX_RING_BASE);
- if (ret)
- return ret;
-
- ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_CAB, MT_TX_HW_QUEUE_BMC,
-- MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0);
-+ MT_MCU_RING_SIZE, MT_TX_RING_BASE);
- if (ret)
- return ret;
-
-diff --git a/mt7615/dma.c b/mt7615/dma.c
-index 3a79a2d..00aefea 100644
---- a/mt7615/dma.c
-+++ b/mt7615/dma.c
-@@ -26,14 +26,14 @@ mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
- for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
- ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
- MT7615_TX_RING_SIZE / 2,
-- MT_TX_RING_BASE, 0);
-+ MT_TX_RING_BASE);
- if (ret)
- return ret;
- }
-
- ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT7622_TXQ_MGMT,
- MT7615_TX_MGMT_RING_SIZE,
-- MT_TX_RING_BASE, 0);
-+ MT_TX_RING_BASE);
- if (ret)
- return ret;
-
-@@ -55,7 +55,7 @@ mt7615_init_tx_queues(struct mt7615_dev *dev)
- return mt7622_init_tx_queues_multi(dev);
-
- ret = mt76_init_tx_queue(&dev->mphy, 0, 0, MT7615_TX_RING_SIZE,
-- MT_TX_RING_BASE, 0);
-+ MT_TX_RING_BASE);
- if (ret)
- return ret;
-
-diff --git a/mt76x02_mmio.c b/mt76x02_mmio.c
-index 0fa3c7c..8bcd8af 100644
---- a/mt76x02_mmio.c
-+++ b/mt76x02_mmio.c
-@@ -191,13 +191,13 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- ret = mt76_init_tx_queue(&dev->mphy, i, mt76_ac_to_hwq(i),
- MT76x02_TX_RING_SIZE,
-- MT_TX_RING_BASE, 0);
-+ MT_TX_RING_BASE);
- if (ret)
- return ret;
- }
-
- ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
-- MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE, 0);
-+ MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE);
- if (ret)
- return ret;
-
-diff --git a/mt7915/dma.c b/mt7915/dma.c
-index 9e3d14d..4358e9b 100644
---- a/mt7915/dma.c
-+++ b/mt7915/dma.c
-@@ -8,16 +8,9 @@
- static int
- mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base)
- {
-- struct mt7915_dev *dev = phy->dev;
- int i, err;
-
-- if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
-- ring_base = MT_WED_TX_RING_BASE;
-- idx -= MT_TXQ_ID(0);
-- }
--
-- err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, ring_base,
-- MT_WED_Q_TX(idx));
-+ err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, ring_base);
- if (err < 0)
- return err;
-
-@@ -326,14 +319,6 @@ static int mt7915_dma_enable(struct mt7915_dev *dev)
- if (dev->dbdc_support || dev->phy.band_idx)
- irq_mask |= MT_INT_BAND1_RX_DONE;
-
-- if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
-- u32 wed_irq_mask = irq_mask;
--
-- wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
-- mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask);
-- mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask);
-- }
--
- mt7915_irq_enable(dev, irq_mask);
-
- return 0;
-@@ -342,7 +327,6 @@ static int mt7915_dma_enable(struct mt7915_dev *dev)
- int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
- {
- struct mt76_dev *mdev = &dev->mt76;
-- u32 wa_rx_base, wa_rx_idx;
- u32 hif1_ofs = 0;
- int ret;
-
-@@ -355,17 +339,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
-
- mt7915_dma_disable(dev, true);
-
-- if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
-- mt76_set(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED);
--
-- mt76_wr(dev, MT_WFDMA_WED_RING_CONTROL,
-- FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) |
-- FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) |
-- FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1, 1));
-- } else {
-- mt76_clear(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED);
-- }
--
- /* init tx queue */
- ret = mt7915_init_tx_queues(&dev->phy,
- MT_TXQ_ID(dev->phy.band_idx),
-@@ -417,17 +390,11 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
- return ret;
-
- /* event from WA */
-- if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
-- wa_rx_base = MT_WED_RX_RING_BASE;
-- wa_rx_idx = MT7915_RXQ_MCU_WA;
-- dev->mt76.q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE;
-- } else {
-- wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MCU_WA);
-- wa_rx_idx = MT_RXQ_ID(MT_RXQ_MCU_WA);
-- }
- ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
-- wa_rx_idx, MT7915_RX_MCU_RING_SIZE,
-- MT_RX_BUF_SIZE, wa_rx_base);
-+ MT_RXQ_ID(MT_RXQ_MCU_WA),
-+ MT7915_RX_MCU_RING_SIZE,
-+ MT_RX_BUF_SIZE,
-+ MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
- if (ret)
- return ret;
-
-diff --git a/mt7915/mac.c b/mt7915/mac.c
-index fd0dd50..1bf3b41 100644
---- a/mt7915/mac.c
-+++ b/mt7915/mac.c
-@@ -815,29 +815,6 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- return 0;
- }
-
--u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
--{
-- struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE;
-- __le32 *txwi = ptr;
-- u32 val;
--
-- memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
--
-- val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
-- FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
-- txwi[0] = cpu_to_le32(val);
--
-- val = MT_TXD1_LONG_FORMAT |
-- FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
-- txwi[1] = cpu_to_le32(val);
--
-- txp->token = cpu_to_le16(token_id);
-- txp->nbuf = 1;
-- txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
--
-- return MT_TXD_SIZE + sizeof(*txp);
--}
--
- static void
- mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
- {
-@@ -863,12 +840,23 @@ mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
- ieee80211_start_tx_ba_session(sta, tid, 0);
- }
-
-+/* static void */
-+/* mt7915_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t) */
-+/* { */
-+/* struct mt76_connac_fw_txp *txp; */
-+/* int i; */
-+
-+/* txp = mt76_connac_txwi_to_txp(dev, t); */
-+/* for (i = 0; i < txp->nbuf; i++) */
-+/* dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]), */
-+/* le16_to_cpu(txp->len[i]), DMA_TO_DEVICE); */
-+/* } */
-+
- static void
- mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
- struct ieee80211_sta *sta, struct list_head *free_list)
- {
- struct mt76_dev *mdev = &dev->mt76;
-- struct mt7915_sta *msta;
- struct mt76_wcid *wcid;
- __le32 *txwi;
- u16 wcid_idx;
-@@ -881,24 +869,13 @@ mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
- if (sta) {
- wcid = (struct mt76_wcid *)sta->drv_priv;
- wcid_idx = wcid->idx;
-+
-+ if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
-+ mt7915_tx_check_aggr(sta, txwi);
- } else {
- wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
-- wcid = rcu_dereference(dev->mt76.wcid[wcid_idx]);
--
-- if (wcid && wcid->sta) {
-- msta = container_of(wcid, struct mt7915_sta, wcid);
-- sta = container_of((void *)msta, struct ieee80211_sta,
-- drv_priv);
-- spin_lock_bh(&dev->sta_poll_lock);
-- if (list_empty(&msta->poll_list))
-- list_add_tail(&msta->poll_list, &dev->sta_poll_list);
-- spin_unlock_bh(&dev->sta_poll_lock);
-- }
- }
-
-- if (sta && likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
-- mt7915_tx_check_aggr(sta, txwi);
--
- __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
-
- out:
-@@ -906,57 +883,31 @@ out:
- mt76_put_txwi(mdev, t);
- }
-
--static void
--mt7915_mac_tx_free_prepare(struct mt7915_dev *dev)
--{
-- struct mt76_dev *mdev = &dev->mt76;
-- struct mt76_phy *mphy_ext = mdev->phy2;
--
-- /* clean DMA queues and unmap buffers first */
-- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
-- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
-- if (mphy_ext) {
-- mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false);
-- mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false);
-- }
--}
--
--static void
--mt7915_mac_tx_free_done(struct mt7915_dev *dev,
-- struct list_head *free_list, bool wake)
--{
-- struct sk_buff *skb, *tmp;
--
-- mt7915_mac_sta_poll(dev);
--
-- if (wake)
-- mt76_set_tx_blocked(&dev->mt76, false);
--
-- mt76_worker_schedule(&dev->mt76.tx_worker);
--
-- list_for_each_entry_safe(skb, tmp, free_list, list) {
-- skb_list_del_init(skb);
-- napi_consume_skb(skb, 1);
-- }
--}
--
- static void
- mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
- {
- struct mt76_connac_tx_free *free = data;
- __le32 *tx_info = (__le32 *)(data + sizeof(*free));
- struct mt76_dev *mdev = &dev->mt76;
-+ struct mt76_phy *mphy_ext = mdev->phy2;
- struct mt76_txwi_cache *txwi;
- struct ieee80211_sta *sta = NULL;
- struct mt7915_sta *msta = NULL;
- LIST_HEAD(free_list);
-+ struct sk_buff *skb, *tmp;
- void *end = data + len;
- bool v3, wake = false;
- u16 total, count = 0;
- u32 txd = le32_to_cpu(free->txd);
- __le32 *cur_info;
-
-- mt7915_mac_tx_free_prepare(dev);
-+ /* clean DMA queues and unmap buffers first */
-+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
-+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
-+ if (mphy_ext) {
-+ mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false);
-+ mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false);
-+ }
-
- total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
- v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4);
-@@ -1013,38 +964,17 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
- }
- }
-
-- mt7915_mac_tx_free_done(dev, &free_list, wake);
--}
--
--static void
--mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len)
--{
-- struct mt76_connac_tx_free *free = data;
-- __le16 *info = (__le16 *)(data + sizeof(*free));
-- struct mt76_dev *mdev = &dev->mt76;
-- void *end = data + len;
-- LIST_HEAD(free_list);
-- bool wake = false;
-- u8 i, count;
--
-- mt7915_mac_tx_free_prepare(dev);
--
-- count = FIELD_GET(MT_TX_FREE_MSDU_CNT_V0, le16_to_cpu(free->ctrl));
-- if (WARN_ON_ONCE((void *)&info[count] > end))
-- return;
-+ mt7915_mac_sta_poll(dev);
-
-- for (i = 0; i < count; i++) {
-- struct mt76_txwi_cache *txwi;
-- u16 msdu = le16_to_cpu(info[i]);
-+ if (wake)
-+ mt76_set_tx_blocked(&dev->mt76, false);
-
-- txwi = mt76_token_release(mdev, msdu, &wake);
-- if (!txwi)
-- continue;
-+ mt76_worker_schedule(&dev->mt76.tx_worker);
-
-- mt7915_txwi_free(dev, txwi, NULL, &free_list);
-+ list_for_each_entry_safe(skb, tmp, &free_list, list) {
-+ skb_list_del_init(skb);
-+ napi_consume_skb(skb, 1);
- }
--
-- mt7915_mac_tx_free_done(dev, &free_list, wake);
- }
-
- static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
-@@ -1102,9 +1032,6 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
- case PKT_TYPE_TXRX_NOTIFY:
- mt7915_mac_tx_free(dev, data, len);
- return false;
-- case PKT_TYPE_TXRX_NOTIFY_V0:
-- mt7915_mac_tx_free_v0(dev, data, len);
-- return false;
- case PKT_TYPE_TXS:
- for (rxd += 2; rxd + 8 <= end; rxd += 8)
- mt7915_mac_add_txs(dev, rxd);
-@@ -1132,10 +1059,6 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- mt7915_mac_tx_free(dev, skb->data, skb->len);
- napi_consume_skb(skb, 1);
- break;
-- case PKT_TYPE_TXRX_NOTIFY_V0:
-- mt7915_mac_tx_free_v0(dev, skb->data, skb->len);
-- napi_consume_skb(skb, 1);
-- break;
- case PKT_TYPE_RX_EVENT:
- mt7915_mcu_rx_event(dev, skb);
- break;
-diff --git a/mt7915/mac.h b/mt7915/mac.h
-index 6fa9c79..9986c03 100644
---- a/mt7915/mac.h
-+++ b/mt7915/mac.h
-@@ -26,12 +26,10 @@ enum rx_pkt_type {
- PKT_TYPE_TXRX_NOTIFY,
- PKT_TYPE_RX_EVENT,
- PKT_TYPE_RX_FW_MONITOR = 0x0c,
-- PKT_TYPE_TXRX_NOTIFY_V0 = 0x18,
- };
-
- #define MT_TX_FREE_VER GENMASK(18, 16)
- #define MT_TX_FREE_MSDU_CNT GENMASK(9, 0)
--#define MT_TX_FREE_MSDU_CNT_V0 GENMASK(6, 0)
- #define MT_TX_FREE_WLAN_ID GENMASK(23, 14)
- #define MT_TX_FREE_LATENCY GENMASK(12, 0)
- /* 0: success, others: dropped */
-diff --git a/mt7915/main.c b/mt7915/main.c
-index ebff255..79127b4 100644
---- a/mt7915/main.c
-+++ b/mt7915/main.c
-@@ -1422,39 +1422,6 @@ out:
- return ret;
- }
-
--#ifdef CONFIG_NET_MEDIATEK_SOC_WED
--static int
--mt7915_net_fill_forward_path(struct ieee80211_hw *hw,
-- struct ieee80211_vif *vif,
-- struct ieee80211_sta *sta,
-- struct net_device_path_ctx *ctx,
-- struct net_device_path *path)
--{
-- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
-- struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
-- struct mt7915_dev *dev = mt7915_hw_dev(hw);
-- struct mt7915_phy *phy = mt7915_hw_phy(hw);
-- struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
--
-- if (!mtk_wed_device_active(wed))
-- return -ENODEV;
--
-- if (msta->wcid.idx > 0xff)
-- return -EIO;
--
-- path->type = DEV_PATH_MTK_WDMA;
-- path->dev = ctx->dev;
-- path->mtk_wdma.wdma_idx = wed->wdma_idx;
-- path->mtk_wdma.bss = mvif->mt76.idx;
-- path->mtk_wdma.wcid = msta->wcid.idx;
-- path->mtk_wdma.queue = phy != &dev->phy;
--
-- ctx->dev = NULL;
--
-- return 0;
--}
--#endif
--
- const struct ieee80211_ops mt7915_ops = {
- .tx = mt7915_tx,
- .start = mt7915_start,
-@@ -1502,7 +1469,4 @@ const struct ieee80211_ops mt7915_ops = {
- .sta_add_debugfs = mt7915_sta_add_debugfs,
- #endif
- .set_radar_background = mt7915_set_radar_background,
--#ifdef CONFIG_NET_MEDIATEK_SOC_WED
-- .net_fill_forward_path = mt7915_net_fill_forward_path,
--#endif
- };
-diff --git a/mt7915/mcu.c b/mt7915/mcu.c
-index a16081d..46eef36 100644
---- a/mt7915/mcu.c
-+++ b/mt7915/mcu.c
-@@ -2365,9 +2365,6 @@ int mt7915_run_firmware(struct mt7915_dev *dev)
- if (ret)
- return ret;
-
-- if (mtk_wed_device_active(&dev->mt76.mmio.wed))
-- mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(CAPABILITY), 0, 0, 0);
--
- ret = mt7915_mcu_set_mwds(dev, 1);
- if (ret)
- return ret;
-diff --git a/mt7915/mmio.c b/mt7915/mmio.c
-index a84970d..1f58b2f 100644
---- a/mt7915/mmio.c
-+++ b/mt7915/mmio.c
-@@ -560,21 +560,15 @@ static void mt7915_rx_poll_complete(struct mt76_dev *mdev,
- static void mt7915_irq_tasklet(struct tasklet_struct *t)
- {
- struct mt7915_dev *dev = from_tasklet(dev, t, irq_tasklet);
-- struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
- u32 intr, intr1, mask;
-
-- if (mtk_wed_device_active(wed)) {
-- mtk_wed_device_irq_set_mask(wed, 0);
-- intr = mtk_wed_device_irq_get(wed, dev->mt76.mmio.irqmask);
-- } else {
-- mt76_wr(dev, MT_INT_MASK_CSR, 0);
-- if (dev->hif2)
-- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
-+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
-+ if (dev->hif2)
-+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
-
-- intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
-- intr &= dev->mt76.mmio.irqmask;
-- mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
-- }
-+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
-+ intr &= dev->mt76.mmio.irqmask;
-+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
-
- if (dev->hif2) {
- intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
-@@ -628,15 +622,10 @@ static void mt7915_irq_tasklet(struct tasklet_struct *t)
- irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
- {
- struct mt7915_dev *dev = dev_instance;
-- struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
-
-- if (mtk_wed_device_active(wed)) {
-- mtk_wed_device_irq_set_mask(wed, 0);
-- } else {
-- mt76_wr(dev, MT_INT_MASK_CSR, 0);
-- if (dev->hif2)
-- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
-- }
-+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
-+ if (dev->hif2)
-+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
-
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
- return IRQ_NONE;
-diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
-index 6235014..ca95948 100644
---- a/mt7915/mt7915.h
-+++ b/mt7915/mt7915.h
-@@ -527,8 +527,6 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
- void mt7915_wfsys_reset(struct mt7915_dev *dev);
- irqreturn_t mt7915_irq_handler(int irq, void *dev_instance);
- u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif);
--u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id);
--
- int mt7915_register_device(struct mt7915_dev *dev);
- void mt7915_unregister_device(struct mt7915_dev *dev);
- int mt7915_eeprom_init(struct mt7915_dev *dev);
-diff --git a/mt7915/pci.c b/mt7915/pci.c
-index d74f609..7cea49f 100644
---- a/mt7915/pci.c
-+++ b/mt7915/pci.c
-@@ -12,9 +12,6 @@
- #include "mac.h"
- #include "../trace.h"
-
--static bool wed_enable = false;
--module_param(wed_enable, bool, 0644);
--
- static LIST_HEAD(hif_list);
- static DEFINE_SPINLOCK(hif_lock);
- static u32 hif_idx;
-@@ -95,79 +92,12 @@ static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
- return 0;
- }
-
--#ifdef CONFIG_NET_MEDIATEK_SOC_WED
--static int mt7915_wed_offload_enable(struct mtk_wed_device *wed)
--{
-- struct mt7915_dev *dev;
-- int ret;
--
-- dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
--
-- spin_lock_bh(&dev->mt76.token_lock);
-- dev->mt76.token_size = wed->wlan.token_start;
-- spin_unlock_bh(&dev->mt76.token_lock);
--
-- ret = wait_event_timeout(dev->mt76.tx_wait,
-- !dev->mt76.wed_token_count, HZ);
-- if (!ret)
-- return -EAGAIN;
--
-- return 0;
--}
--
--static void mt7915_wed_offload_disable(struct mtk_wed_device *wed)
--{
-- struct mt7915_dev *dev;
--
-- dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
--
-- spin_lock_bh(&dev->mt76.token_lock);
-- dev->mt76.token_size = MT7915_TOKEN_SIZE;
-- spin_unlock_bh(&dev->mt76.token_lock);
--}
--#endif
--
--static int
--mt7915_pci_wed_init(struct mt7915_dev *dev, struct pci_dev *pdev, int *irq)
--{
--#ifdef CONFIG_NET_MEDIATEK_SOC_WED
-- struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
-- int ret;
--
-- if (!wed_enable)
-- return 0;
--
-- wed->wlan.pci_dev = pdev;
-- wed->wlan.wpdma_phys = pci_resource_start(pdev, 0) +
-- MT_WFDMA_EXT_CSR_BASE;
-- wed->wlan.nbuf = 4096;
-- wed->wlan.token_start = MT7915_TOKEN_SIZE - wed->wlan.nbuf;
-- wed->wlan.init_buf = mt7915_wed_init_buf;
-- wed->wlan.offload_enable = mt7915_wed_offload_enable;
-- wed->wlan.offload_disable = mt7915_wed_offload_disable;
--
-- if (mtk_wed_device_attach(wed) != 0)
-- return 0;
--
-- *irq = wed->irq;
-- dev->mt76.dma_dev = wed->dev;
--
-- ret = dma_set_mask(wed->dev, DMA_BIT_MASK(32));
-- if (ret)
-- return ret;
--
-- return 1;
--#else
-- return 0;
--#endif
--}
--
- static int mt7915_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
- {
-- struct mt7915_hif *hif2 = NULL;
- struct mt7915_dev *dev;
- struct mt76_dev *mdev;
-+ struct mt7915_hif *hif2;
- int irq;
- int ret;
-
-@@ -199,24 +129,15 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
- mt7915_wfsys_reset(dev);
- hif2 = mt7915_pci_init_hif2(pdev);
-
-- ret = mt7915_pci_wed_init(dev, pdev, &irq);
-+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
- if (ret < 0)
-- goto free_wed_or_irq_vector;
--
-- if (!ret) {
-- hif2 = mt7915_pci_init_hif2(pdev);
--
-- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
-- if (ret < 0)
-- goto free_device;
--
-- irq = pdev->irq;
-- }
-+ goto free_device;
-
-+ irq = pdev->irq;
- ret = devm_request_irq(mdev->dev, irq, mt7915_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME, dev);
- if (ret)
-- goto free_wed_or_irq_vector;
-+ goto free_irq_vector;
-
- /* master switch of PCIe tnterrupt enable */
- mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
-@@ -251,11 +172,8 @@ free_hif2:
- if (dev->hif2)
- put_device(dev->hif2->dev);
- devm_free_irq(mdev->dev, irq, dev);
--free_wed_or_irq_vector:
-- if (mtk_wed_device_active(&mdev->mmio.wed))
-- mtk_wed_device_detach(&mdev->mmio.wed);
-- else
-- pci_free_irq_vectors(pdev);
-+free_irq_vector:
-+ pci_free_irq_vectors(pdev);
- free_device:
- mt76_free_device(&dev->mt76);
-
-diff --git a/mt7915/regs.h b/mt7915/regs.h
-index 444440e..1e7fbce 100644
---- a/mt7915/regs.h
-+++ b/mt7915/regs.h
-@@ -603,31 +603,18 @@ enum offs_rev {
-
- /* WFDMA CSR */
- #define MT_WFDMA_EXT_CSR_BASE __REG(WFDMA_EXT_CSR_ADDR)
--#define MT_WFDMA_EXT_CSR_PHYS_BASE 0x18027000
- #define MT_WFDMA_EXT_CSR(ofs) (MT_WFDMA_EXT_CSR_BASE + (ofs))
--#define MT_WFDMA_EXT_CSR_PHYS(ofs) (MT_WFDMA_EXT_CSR_PHYS_BASE + (ofs))
-
--#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR_PHYS(0x30)
-+#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR(0x30)
- #define MT_WFDMA_HOST_CONFIG_PDMA_BAND BIT(0)
--#define MT_WFDMA_HOST_CONFIG_WED BIT(1)
-
--#define MT_WFDMA_WED_RING_CONTROL MT_WFDMA_EXT_CSR_PHYS(0x34)
--#define MT_WFDMA_WED_RING_CONTROL_TX0 GENMASK(4, 0)
--#define MT_WFDMA_WED_RING_CONTROL_TX1 GENMASK(12, 8)
--#define MT_WFDMA_WED_RING_CONTROL_RX1 GENMASK(20, 16)
--
--#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR_PHYS(0x44)
-+#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44)
- #define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0)
-
- #define MT_PCIE_RECOG_ID 0xd7090
- #define MT_PCIE_RECOG_ID_MASK GENMASK(30, 0)
- #define MT_PCIE_RECOG_ID_SEM BIT(31)
-
--#define MT_INT_WED_MASK_CSR MT_WFDMA_EXT_CSR(0x204)
--
--#define MT_WED_TX_RING_BASE MT_WFDMA_EXT_CSR(0x300)
--#define MT_WED_RX_RING_BASE MT_WFDMA_EXT_CSR(0x400)
--
- /* WFDMA0 PCIE1 */
- #define MT_WFDMA0_PCIE1_BASE __REG(WFDMA0_PCIE1_ADDR)
- #define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
-diff --git a/mt7921/dma.c b/mt7921/dma.c
-index 2939cf9..ca7e20f 100644
---- a/mt7921/dma.c
-+++ b/mt7921/dma.c
-@@ -9,7 +9,7 @@ static int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc)
- {
- int i, err;
-
-- err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE, 0);
-+ err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE);
- if (err < 0)
- return err;
-
-diff --git a/tx.c b/tx.c
-index 0457c3e..656b709 100644
---- a/tx.c
-+++ b/tx.c
-@@ -725,12 +725,6 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
- if (token >= 0)
- dev->token_count++;
-
--#ifdef CONFIG_NET_MEDIATEK_SOC_WED
-- if (mtk_wed_device_active(&dev->mmio.wed) &&
-- token >= dev->mmio.wed.wlan.token_start)
-- dev->wed_token_count++;
--#endif
--
- if (dev->token_count >= dev->token_size - MT76_TOKEN_FREE_THR)
- __mt76_set_tx_blocked(dev, true);
-
-@@ -748,17 +742,9 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
- spin_lock_bh(&dev->token_lock);
-
- txwi = idr_remove(&dev->token, token);
-- if (txwi) {
-+ if (txwi)
- dev->token_count--;
-
--#ifdef CONFIG_NET_MEDIATEK_SOC_WED
-- if (mtk_wed_device_active(&dev->mmio.wed) &&
-- token >= dev->mmio.wed.wlan.token_start &&
-- --dev->wed_token_count == 0)
-- wake_up(&dev->tx_wait);
--#endif
-- }
--
- if (dev->token_count < dev->token_size - MT76_TOKEN_FREE_THR &&
- dev->phy.q_tx[0]->blocked)
- *wake = true;
---
-2.18.0
-
diff --git a/recipes-kernel/linux-mt76/files/patches/3001-mt76-add-wed-tx-support.patch b/recipes-kernel/linux-mt76/files/patches/3001-mt76-add-wed-tx-support.patch
new file mode 100755
index 0000000..e83baf5
--- /dev/null
+++ b/recipes-kernel/linux-mt76/files/patches/3001-mt76-add-wed-tx-support.patch
@@ -0,0 +1,554 @@
+From c5d0d7fb936620a3737fe5b71c1f59170ba42674 Mon Sep 17 00:00:00 2001
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Sun, 12 Jun 2022 16:38:45 +0800
+Subject: [PATCH 1/3] mt76 add wed tx support
+
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+---
+ mt76_connac.h | 1 +
+ mt7915/dma.c | 59 +++++++++++++++++++-------
+ mt7915/mac.c | 4 +-
+ mt7915/mac.h | 0
+ mt7915/main.c | 9 +++-
+ mt7915/mcu.c | 2 +-
+ mt7915/mmio.c | 110 +++++++++++++++++++++++++++++++++++++++++++++++-
+ mt7915/mt7915.h | 2 +
+ mt7915/pci.c | 72 +------------------------------
+ mt7915/regs.h | 15 +++++++
+ mt7915/soc.c | 16 +++++--
+ 11 files changed, 193 insertions(+), 97 deletions(-)
+ mode change 100644 => 100755 mt76_connac.h
+ mode change 100644 => 100755 mt7915/mac.h
+ mode change 100644 => 100755 mt7915/mmio.c
+
+diff --git a/mt76_connac.h b/mt76_connac.h
+old mode 100644
+new mode 100755
+index 1d32d55b..3c493014
+--- a/mt76_connac.h
++++ b/mt76_connac.h
+@@ -110,6 +110,7 @@ struct mt76_connac_sta_key_conf {
+ };
+
+ #define MT_TXP_MAX_BUF_NUM 6
++#define MT_TXD_TXP_BUF_SIZE 128
+
+ struct mt76_connac_fw_txp {
+ __le16 flags;
+diff --git a/mt7915/dma.c b/mt7915/dma.c
+index 9e3d14db..71223221 100644
+--- a/mt7915/dma.c
++++ b/mt7915/dma.c
+@@ -12,7 +12,10 @@ mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base
+ int i, err;
+
+ if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
+- ring_base = MT_WED_TX_RING_BASE;
++ if(!is_mt7986(&dev->mt76))
++ ring_base = MT_WED_TX_RING_BASE;
++ else
++ ring_base += MT_TXQ_ID(0) * MT_RING_SIZE;
+ idx -= MT_TXQ_ID(0);
+ }
+
+@@ -74,14 +77,23 @@ static void mt7915_dma_config(struct mt7915_dev *dev)
+ MCUQ_CONFIG(MT_MCUQ_WA, WFDMA1, MT_INT_TX_DONE_MCU_WA, MT7915_TXQ_MCU_WA);
+ MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA1, MT_INT_TX_DONE_FWDL, MT7915_TXQ_FWDL);
+ } else {
+- RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0_MT7916, MT7916_RXQ_BAND0);
++ if(is_mt7916(&dev->mt76) && (mtk_wed_device_active(&dev->mt76.mmio.wed))) {
++ RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_WED_RX_DONE_BAND0_MT7916, MT7916_RXQ_BAND0);
++ RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_WED_RX_DONE_WA_MT7916, MT7916_RXQ_MCU_WA);
++ RXQ_CONFIG(MT_RXQ_EXT, WFDMA0, MT_INT_WED_RX_DONE_BAND1_MT7916, MT7916_RXQ_BAND1);
++ RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_WED_RX_DONE_WA_MAIN_MT7916, MT7916_RXQ_MCU_WA_MAIN);
++ TXQ_CONFIG(0, WFDMA0, MT_INT_WED_TX_DONE_BAND0, MT7915_TXQ_BAND0);
++ TXQ_CONFIG(1, WFDMA0, MT_INT_WED_TX_DONE_BAND1, MT7915_TXQ_BAND1);
++ } else {
++ RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0_MT7916, MT7916_RXQ_BAND0);
++ RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7916_RXQ_MCU_WA);
++ RXQ_CONFIG(MT_RXQ_EXT, WFDMA0, MT_INT_RX_DONE_BAND1_MT7916, MT7916_RXQ_BAND1);
++ RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN_MT7916, MT7916_RXQ_MCU_WA_MAIN);
++ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
++ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
++ }
+ RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7916_RXQ_MCU_WM);
+- RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7916_RXQ_MCU_WA);
+- RXQ_CONFIG(MT_RXQ_EXT, WFDMA0, MT_INT_RX_DONE_BAND1_MT7916, MT7916_RXQ_BAND1);
+ RXQ_CONFIG(MT_RXQ_EXT_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT_MT7916, MT7916_RXQ_MCU_WA_EXT);
+- RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN_MT7916, MT7916_RXQ_MCU_WA_MAIN);
+- TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
+- TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
+ MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7915_TXQ_MCU_WM);
+ MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA_MT7916, MT7915_TXQ_MCU_WA);
+ MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7915_TXQ_FWDL);
+@@ -330,7 +342,9 @@ static int mt7915_dma_enable(struct mt7915_dev *dev)
+ u32 wed_irq_mask = irq_mask;
+
+ wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
+- mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask);
++ if (!is_mt7986(&dev->mt76))
++ mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask);
++ mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
+ mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask);
+ }
+
+@@ -355,15 +369,19 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
+
+ mt7915_dma_disable(dev, true);
+
+- if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
++ if (mtk_wed_device_active(&dev->mt76.mmio.wed) && !is_mt7986(mdev)) {
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED);
+-
++ if(is_mt7915(mdev)) {
+ mt76_wr(dev, MT_WFDMA_WED_RING_CONTROL,
+ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) |
+ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) |
+ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1, 1));
+- } else {
+- mt76_clear(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED);
++ } else {
++ mt76_wr(dev, MT_WFDMA_WED_RING_CONTROL,
++ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) |
++ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) |
++ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1, 2));
++ }
+ }
+
+ /* init tx queue */
+@@ -417,7 +435,7 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
+ return ret;
+
+ /* event from WA */
+- if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
++ if (mtk_wed_device_active(&dev->mt76.mmio.wed) && is_mt7915(mdev)) {
+ wa_rx_base = MT_WED_RX_RING_BASE;
+ wa_rx_idx = MT7915_RXQ_MCU_WA;
+ dev->mt76.q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE;
+@@ -444,11 +462,20 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
+
+ /* tx free notify event from WA for band0 */
+ if (!is_mt7915(mdev)) {
++ wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA);
++ wa_rx_idx = MT_RXQ_ID(MT_RXQ_MAIN_WA);
++
++ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
++ dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
++ if (is_mt7916(mdev)) {
++ wa_rx_base = MT_WED_RX_RING_BASE;
++ wa_rx_idx = MT7915_RXQ_MCU_WA;
++ }
++ }
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
+- MT_RXQ_ID(MT_RXQ_MAIN_WA),
++ wa_rx_idx,
+ MT7915_RX_MCU_RING_SIZE,
+- MT_RX_BUF_SIZE,
+- MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA));
++ MT_RX_BUF_SIZE, wa_rx_base);
+ if (ret)
+ return ret;
+ }
+diff --git a/mt7915/mac.c b/mt7915/mac.c
+index fd0dd509..3f059bed 100644
+--- a/mt7915/mac.c
++++ b/mt7915/mac.c
+@@ -833,9 +833,9 @@ u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
+
+ txp->token = cpu_to_le16(token_id);
+ txp->nbuf = 1;
+- txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
++ txp->buf[0] = cpu_to_le32(phys + MT_TXD_TXP_BUF_SIZE);
+
+- return MT_TXD_SIZE + sizeof(*txp);
++ return MT_TXD_TXP_BUF_SIZE;
+ }
+
+ static void
+diff --git a/mt7915/mac.h b/mt7915/mac.h
+old mode 100644
+new mode 100755
+diff --git a/mt7915/main.c b/mt7915/main.c
+index ebff255f..f1396eed 100644
+--- a/mt7915/main.c
++++ b/mt7915/main.c
+@@ -1439,14 +1439,19 @@ mt7915_net_fill_forward_path(struct ieee80211_hw *hw,
+ if (!mtk_wed_device_active(wed))
+ return -ENODEV;
+
+- if (msta->wcid.idx > 0xff)
++ if (msta->wcid.idx > MT7915_WTBL_STA)
+ return -EIO;
+
+ path->type = DEV_PATH_MTK_WDMA;
+ path->dev = ctx->dev;
+ path->mtk_wdma.wdma_idx = wed->wdma_idx;
+ path->mtk_wdma.bss = mvif->mt76.idx;
+- path->mtk_wdma.wcid = msta->wcid.idx;
++ /* fw will find the wcid by dest addr */
++ if(is_mt7915(&dev->mt76))
++ path->mtk_wdma.wcid = 0xff;
++ else
++ path->mtk_wdma.wcid = 0x3ff;
++
+ path->mtk_wdma.queue = phy != &dev->phy;
+
+ ctx->dev = NULL;
+diff --git a/mt7915/mcu.c b/mt7915/mcu.c
+index 3344e122..9d2a7059 100644
+--- a/mt7915/mcu.c
++++ b/mt7915/mcu.c
+@@ -2362,7 +2362,7 @@ int mt7915_run_firmware(struct mt7915_dev *dev)
+ if (ret)
+ return ret;
+
+- if (mtk_wed_device_active(&dev->mt76.mmio.wed))
++ if (mtk_wed_device_active(&dev->mt76.mmio.wed) && is_mt7915(&dev->mt76))
+ mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(CAPABILITY), 0, 0, 0);
+
+ ret = mt7915_mcu_set_mwds(dev, 1);
+diff --git a/mt7915/mmio.c b/mt7915/mmio.c
+old mode 100644
+new mode 100755
+index 6d1dbdbd..b4a3120d
+--- a/mt7915/mmio.c
++++ b/mt7915/mmio.c
+@@ -10,6 +10,9 @@
+ #include "mac.h"
+ #include "../trace.h"
+
++static bool wed_enable = true;
++module_param(wed_enable, bool, 0644);
++
+ static const u32 mt7915_reg[] = {
+ [INT_SOURCE_CSR] = 0xd7010,
+ [INT_MASK_CSR] = 0xd7014,
+@@ -541,7 +544,11 @@ void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev,
+ mdev->mmio.irqmask |= set;
+
+ if (write_reg) {
+- mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
++ if (mtk_wed_device_active(&mdev->mmio.wed))
++ mtk_wed_device_irq_set_mask(&mdev->mmio.wed,
++ mdev->mmio.irqmask);
++ else
++ mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
+ mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
+ }
+
+@@ -565,6 +572,8 @@ static void mt7915_irq_tasklet(struct tasklet_struct *t)
+
+ if (mtk_wed_device_active(wed)) {
+ mtk_wed_device_irq_set_mask(wed, 0);
++ if (dev->hif2)
++ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+ intr = mtk_wed_device_irq_get(wed, dev->mt76.mmio.irqmask);
+ } else {
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+@@ -646,6 +655,105 @@ irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
+ return IRQ_HANDLED;
+ }
+
++#ifdef CONFIG_NET_MEDIATEK_SOC_WED
++static int mt7915_wed_offload_enable(struct mtk_wed_device *wed)
++{
++ struct mt7915_dev *dev;
++ int ret;
++
++ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
++
++ spin_lock_bh(&dev->mt76.token_lock);
++ dev->mt76.token_size = wed->wlan.token_start;
++ spin_unlock_bh(&dev->mt76.token_lock);
++
++ ret = wait_event_timeout(dev->mt76.tx_wait,
++ !dev->mt76.wed_token_count, HZ);
++ if (!ret)
++ return -EAGAIN;
++
++ return 0;
++}
++
++static void mt7915_wed_offload_disable(struct mtk_wed_device *wed)
++{
++ struct mt7915_dev *dev;
++
++ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
++
++ spin_lock_bh(&dev->mt76.token_lock);
++ dev->mt76.token_size = wed->wlan.token_start;//MT7915_TOKEN_SIZE;
++ spin_unlock_bh(&dev->mt76.token_lock);
++}
++#endif
++
++int
++mt7915_pci_wed_init(struct mt7915_dev *dev, struct device *pdev, int *irq)
++{
++#ifdef CONFIG_NET_MEDIATEK_SOC_WED
++ struct mt76_dev *mdev = &dev->mt76;
++ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
++ u32 base;
++ int ret;
++
++ if (!wed_enable)
++ return 0;
++
++ if (dev_is_pci(pdev)) {
++ struct pci_dev *pci_dev;
++
++ pci_dev = container_of(pdev, struct pci_dev, dev);
++ base = pci_resource_start(pci_dev, 0);
++ wed->wlan.base = (void __iomem *)ioremap(base, pci_resource_len(pci_dev, 0));
++
++ wed->wlan.pci_dev = pci_dev;
++ wed->wlan.bus_type = MTK_BUS_TYPE_PCIE;
++ wed->wlan.wpdma_int = base + MT_INT_WED_SOURCE_CSR;
++ wed->wlan.wpdma_mask = base + MT_INT_WED_MASK_CSR;
++ } else {
++ struct platform_device *plat_dev;
++ struct resource *res;
++
++ plat_dev = to_platform_device(pdev);
++ res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
++ base = res->start;
++ wed->wlan.base = (void __iomem *)ioremap(base, resource_size(res));
++ wed->wlan.bus_type = MTK_BUS_TYPE_AXI;
++ wed->wlan.wpdma_int = base + MT_INT_SOURCE_CSR;
++ wed->wlan.wpdma_mask = base + MT_INT_MASK_CSR;
++ }
++ wed->wlan.wpdma_tx = base + MT_TXQ_WED_RING_BASE;
++ wed->wlan.wpdma_txfree = base + MT_RXQ_WED_RING_BASE;
++
++ wed->wlan.tx_tbit[0] = MT_WED_TX_DONE_BAND0;
++ wed->wlan.tx_tbit[1] = MT_WED_TX_DONE_BAND1;
++ wed->wlan.txfree_tbit = MT_WED_TX_FREE_DONE;
++ wed->wlan.nbuf = 7168;
++ wed->wlan.token_start = MT7915_TOKEN_SIZE - wed->wlan.nbuf;
++ wed->wlan.init_buf = mt7915_wed_init_buf;
++ /* disable dynamic tx token */
++ wed->wlan.offload_enable = mt7915_wed_offload_enable;
++ wed->wlan.offload_disable = mt7915_wed_offload_disable;
++
++ if (mtk_wed_device_attach(wed) != 0)
++ return 0;
++
++ if (wed->ver == MTK_WED_V1)
++ wed->wlan.wpdma_phys = base + MT_WFDMA_EXT_CSR_BASE;
++
++ *irq = wed->irq;
++ dev->mt76.dma_dev = wed->dev;
++ mdev->token_size = wed->wlan.token_start;
++ ret = dma_set_mask(wed->dev, DMA_BIT_MASK(32));
++ if (ret)
++ return ret;
++
++ return 1;
++#else
++ return 0;
++#endif
++}
++
+ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
+ void __iomem *mem_base, u32 device_id)
+ {
+diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
+index 62350141..d7a2e594 100644
+--- a/mt7915/mt7915.h
++++ b/mt7915/mt7915.h
+@@ -522,6 +522,8 @@ static inline void mt7986_wmac_disable(struct mt7915_dev *dev)
+ {
+ }
+ #endif
++int mt7915_pci_wed_init(struct mt7915_dev *dev,
++ struct device *pdev, int *irq);
+ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
+ void __iomem *mem_base, u32 device_id);
+ void mt7915_wfsys_reset(struct mt7915_dev *dev);
+diff --git a/mt7915/pci.c b/mt7915/pci.c
+index d74f6097..c5da01a9 100644
+--- a/mt7915/pci.c
++++ b/mt7915/pci.c
+@@ -12,9 +12,6 @@
+ #include "mac.h"
+ #include "../trace.h"
+
+-static bool wed_enable = false;
+-module_param(wed_enable, bool, 0644);
+-
+ static LIST_HEAD(hif_list);
+ static DEFINE_SPINLOCK(hif_lock);
+ static u32 hif_idx;
+@@ -95,73 +92,6 @@ static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
+ return 0;
+ }
+
+-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+-static int mt7915_wed_offload_enable(struct mtk_wed_device *wed)
+-{
+- struct mt7915_dev *dev;
+- int ret;
+-
+- dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
+-
+- spin_lock_bh(&dev->mt76.token_lock);
+- dev->mt76.token_size = wed->wlan.token_start;
+- spin_unlock_bh(&dev->mt76.token_lock);
+-
+- ret = wait_event_timeout(dev->mt76.tx_wait,
+- !dev->mt76.wed_token_count, HZ);
+- if (!ret)
+- return -EAGAIN;
+-
+- return 0;
+-}
+-
+-static void mt7915_wed_offload_disable(struct mtk_wed_device *wed)
+-{
+- struct mt7915_dev *dev;
+-
+- dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
+-
+- spin_lock_bh(&dev->mt76.token_lock);
+- dev->mt76.token_size = MT7915_TOKEN_SIZE;
+- spin_unlock_bh(&dev->mt76.token_lock);
+-}
+-#endif
+-
+-static int
+-mt7915_pci_wed_init(struct mt7915_dev *dev, struct pci_dev *pdev, int *irq)
+-{
+-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+- struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+- int ret;
+-
+- if (!wed_enable)
+- return 0;
+-
+- wed->wlan.pci_dev = pdev;
+- wed->wlan.wpdma_phys = pci_resource_start(pdev, 0) +
+- MT_WFDMA_EXT_CSR_BASE;
+- wed->wlan.nbuf = 4096;
+- wed->wlan.token_start = MT7915_TOKEN_SIZE - wed->wlan.nbuf;
+- wed->wlan.init_buf = mt7915_wed_init_buf;
+- wed->wlan.offload_enable = mt7915_wed_offload_enable;
+- wed->wlan.offload_disable = mt7915_wed_offload_disable;
+-
+- if (mtk_wed_device_attach(wed) != 0)
+- return 0;
+-
+- *irq = wed->irq;
+- dev->mt76.dma_dev = wed->dev;
+-
+- ret = dma_set_mask(wed->dev, DMA_BIT_MASK(32));
+- if (ret)
+- return ret;
+-
+- return 1;
+-#else
+- return 0;
+-#endif
+-}
+-
+ static int mt7915_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+ {
+@@ -199,7 +129,7 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
+ mt7915_wfsys_reset(dev);
+ hif2 = mt7915_pci_init_hif2(pdev);
+
+- ret = mt7915_pci_wed_init(dev, pdev, &irq);
++ ret = mt7915_pci_wed_init(dev, &pdev->dev, &irq);
+ if (ret < 0)
+ goto free_wed_or_irq_vector;
+
+diff --git a/mt7915/regs.h b/mt7915/regs.h
+index 444440e1..ffda5f6b 100644
+--- a/mt7915/regs.h
++++ b/mt7915/regs.h
+@@ -623,6 +623,7 @@ enum offs_rev {
+ #define MT_PCIE_RECOG_ID_MASK GENMASK(30, 0)
+ #define MT_PCIE_RECOG_ID_SEM BIT(31)
+
++#define MT_INT_WED_SOURCE_CSR MT_WFDMA_EXT_CSR(0x200)
+ #define MT_INT_WED_MASK_CSR MT_WFDMA_EXT_CSR(0x204)
+
+ #define MT_WED_TX_RING_BASE MT_WFDMA_EXT_CSR(0x300)
+@@ -669,6 +670,13 @@ enum offs_rev {
+ #define MT_TXQ_EXT_CTRL(q) (MT_Q_BASE(__TXQ(q)) + 0x600 + \
+ MT_TXQ_ID(q)* 0x4)
+
++#define MT_TXQ_WED_RING_BASE (!is_mt7986(mdev)? 0xd7300 : 0x24420)
++#define MT_RXQ_WED_RING_BASE (!is_mt7986(mdev)? 0xd7410 : 0x24520)
++
++#define MT_WED_TX_DONE_BAND0 (is_mt7915(mdev)? 4 : 30)
++#define MT_WED_TX_DONE_BAND1 (is_mt7915(mdev)? 5 : 31)
++#define MT_WED_TX_FREE_DONE (is_mt7915(mdev)? 1 : 2)
++
+ #define MT_INT_SOURCE_CSR __REG(INT_SOURCE_CSR)
+ #define MT_INT_MASK_CSR __REG(INT_MASK_CSR)
+
+@@ -687,6 +695,11 @@ enum offs_rev {
+ #define MT_INT_RX_DONE_WA_MAIN_MT7916 BIT(2)
+ #define MT_INT_RX_DONE_WA_EXT_MT7916 BIT(3)
+
++#define MT_INT_WED_RX_DONE_BAND0_MT7916 BIT(18)
++#define MT_INT_WED_RX_DONE_BAND1_MT7916 BIT(19)
++#define MT_INT_WED_RX_DONE_WA_MAIN_MT7916 BIT(1)
++#define MT_INT_WED_RX_DONE_WA_MT7916 BIT(17)
++
+ #define MT_INT_RX(q) (dev->q_int_mask[__RXQ(q)])
+ #define MT_INT_TX_MCU(q) (dev->q_int_mask[(q)])
+
+@@ -710,6 +723,8 @@ enum offs_rev {
+ #define MT_INT_TX_DONE_BAND0 BIT(30)
+ #define MT_INT_TX_DONE_BAND1 BIT(31)
+ #define MT_INT_TX_DONE_MCU_WA_MT7916 BIT(25)
++#define MT_INT_WED_TX_DONE_BAND0 BIT(4)
++#define MT_INT_WED_TX_DONE_BAND1 BIT(5)
+
+ #define MT_INT_TX_DONE_MCU (MT_INT_TX_MCU(MT_MCUQ_WA) | \
+ MT_INT_TX_MCU(MT_MCUQ_WM) | \
+diff --git a/mt7915/soc.c b/mt7915/soc.c
+index 3618718d..8d0b2068 100644
+--- a/mt7915/soc.c
++++ b/mt7915/soc.c
+@@ -1171,10 +1171,6 @@ static int mt7986_wmac_probe(struct platform_device *pdev)
+
+ chip_id = (uintptr_t)of_device_get_match_data(&pdev->dev);
+
+- irq = platform_get_irq(pdev, 0);
+- if (irq < 0)
+- return irq;
+-
+ mem_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mem_base)) {
+ dev_err(&pdev->dev, "Failed to get memory resource\n");
+@@ -1186,6 +1182,16 @@ static int mt7986_wmac_probe(struct platform_device *pdev)
+ return PTR_ERR(dev);
+
+ mdev = &dev->mt76;
++ ret = mt7915_pci_wed_init(dev, &pdev->dev, &irq);
++ if (ret < 0)
++ goto free_device;
++
++ if (!ret) {
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0)
++ return irq;;
++ }
++
+ ret = devm_request_irq(mdev->dev, irq, mt7915_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+@@ -1207,6 +1213,8 @@ free_irq:
+ devm_free_irq(mdev->dev, irq, dev);
+
+ free_device:
++ if (mtk_wed_device_active(&mdev->mmio.wed))
++ mtk_wed_device_detach(&mdev->mmio.wed);
+ mt76_free_device(&dev->mt76);
+
+ return ret;
+--
+2.18.0
+
diff --git a/recipes-kernel/linux-mt76/files/patches/3002-mt76-add-wed-rx-support.patch b/recipes-kernel/linux-mt76/files/patches/3002-mt76-add-wed-rx-support.patch
new file mode 100755
index 0000000..5c5a05d
--- /dev/null
+++ b/recipes-kernel/linux-mt76/files/patches/3002-mt76-add-wed-rx-support.patch
@@ -0,0 +1,1152 @@
+From 1abac441c94f3f32bd074b8b01c439263129102d Mon Sep 17 00:00:00 2001
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Tue, 5 Jul 2022 19:42:55 +0800
+Subject: [PATCH 2/3] mt76 add wed rx support
+
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+---
+ dma.c | 219 +++++++++++++++++++++++++++++++++--------
+ dma.h | 10 ++
+ mac80211.c | 8 +-
+ mt76.h | 24 ++++-
+ mt7603/dma.c | 2 +-
+ mt7603/mt7603.h | 2 +-
+ mt7615/mac.c | 2 +-
+ mt7615/mt7615.h | 2 +-
+ mt76_connac_mcu.c | 9 ++
+ mt76x02.h | 2 +-
+ mt76x02_txrx.c | 2 +-
+ mt7915/dma.c | 10 ++
+ mt7915/mac.c | 89 ++++++++++++++++-
+ mt7915/mcu.c | 3 +
+ mt7915/mmio.c | 26 ++++-
+ mt7915/mt7915.h | 7 +-
+ mt7915/regs.h | 14 ++-
+ mt7921/mac.c | 2 +-
+ mt7921/mt7921.h | 4 +-
+ mt7921/pci_mac.c | 4 +-
+ tx.c | 34 +++++++
+ 21 files changed, 410 insertions(+), 65 deletions(-)
+ mode change 100755 => 100644 mt7915/mac.c
+ mode change 100755 => 100644 mt7915/mmio.c
+
+diff --git a/dma.c b/dma.c
+index 03ee910..094aede 100644
+--- a/dma.c
++++ b/dma.c
+@@ -98,6 +98,63 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+ }
+ EXPORT_SYMBOL_GPL(mt76_put_txwi);
+
++static struct mt76_txwi_cache *
++mt76_alloc_rxwi(struct mt76_dev *dev)
++{
++ struct mt76_txwi_cache *r;
++ int size;
++
++ size = L1_CACHE_ALIGN(sizeof(*r));
++ r = kzalloc(size, GFP_ATOMIC);
++ if (!r)
++ return NULL;
++
++ r->buf = NULL;
++
++ return r;
++}
++
++static struct mt76_txwi_cache *
++__mt76_get_rxwi(struct mt76_dev *dev)
++{
++ struct mt76_txwi_cache *r = NULL;
++
++ spin_lock(&dev->wed_lock);
++ if (!list_empty(&dev->rxwi_cache)) {
++ r = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
++ list);
++ if(r)
++ list_del(&r->list);
++ }
++ spin_unlock(&dev->wed_lock);
++
++ return r;
++}
++
++struct mt76_txwi_cache *
++mt76_get_rxwi(struct mt76_dev *dev)
++{
++ struct mt76_txwi_cache *r = __mt76_get_rxwi(dev);
++
++ if (r)
++ return r;
++
++ return mt76_alloc_rxwi(dev);
++}
++EXPORT_SYMBOL_GPL(mt76_get_rxwi);
++
++void
++mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *r)
++{
++ if (!r)
++ return;
++
++ spin_lock(&dev->wed_lock);
++ list_add(&r->list, &dev->rxwi_cache);
++ spin_unlock(&dev->wed_lock);
++}
++EXPORT_SYMBOL_GPL(mt76_put_rxwi);
++
+ static void
+ mt76_free_pending_txwi(struct mt76_dev *dev)
+ {
+@@ -141,12 +198,15 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
+ static int
+ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_buf *buf, int nbufs, u32 info,
+- struct sk_buff *skb, void *txwi)
++ struct sk_buff *skb, void *txwi, void *rxwi)
+ {
++ struct mtk_wed_device *wed = &dev->mmio.wed;
++
+ struct mt76_queue_entry *entry;
+ struct mt76_desc *desc;
+ u32 ctrl;
+ int i, idx = -1;
++ int type;
+
+ if (txwi) {
+ q->entry[q->head].txwi = DMA_DUMMY_DATA;
+@@ -162,28 +222,42 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ desc = &q->desc[idx];
+ entry = &q->entry[idx];
+
+- if (buf[0].skip_unmap)
+- entry->skip_buf0 = true;
+- entry->skip_buf1 = i == nbufs - 1;
+-
+- entry->dma_addr[0] = buf[0].addr;
+- entry->dma_len[0] = buf[0].len;
+-
+- ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
+- if (i < nbufs - 1) {
+- entry->dma_addr[1] = buf[1].addr;
+- entry->dma_len[1] = buf[1].len;
+- buf1 = buf[1].addr;
+- ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
+- if (buf[1].skip_unmap)
+- entry->skip_buf1 = true;
++ type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
++ if (mtk_wed_device_active(wed) && type == MT76_WED_Q_RX) {
++ struct mt76_txwi_cache *r = rxwi;
++ int rx_token;
++
++ if (!r)
++ return -ENOMEM;
++
++ rx_token = mt76_rx_token_consume(dev, (void *)skb, r, buf[0].addr);
++
++ buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
++ ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, MTK_WED_RX_PKT_SIZE);
++ ctrl |= MT_DMA_CTL_TO_HOST;
++ } else {
++ if (buf[0].skip_unmap)
++ entry->skip_buf0 = true;
++ entry->skip_buf1 = i == nbufs - 1;
++
++ entry->dma_addr[0] = buf[0].addr;
++ entry->dma_len[0] = buf[0].len;
++
++ ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
++ if (i < nbufs - 1) {
++ entry->dma_addr[1] = buf[1].addr;
++ entry->dma_len[1] = buf[1].len;
++ buf1 = buf[1].addr;
++ ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
++ if (buf[1].skip_unmap)
++ entry->skip_buf1 = true;
++ }
++ if (i == nbufs - 1)
++ ctrl |= MT_DMA_CTL_LAST_SEC0;
++ else if (i == nbufs - 2)
++ ctrl |= MT_DMA_CTL_LAST_SEC1;
+ }
+
+- if (i == nbufs - 1)
+- ctrl |= MT_DMA_CTL_LAST_SEC0;
+- else if (i == nbufs - 2)
+- ctrl |= MT_DMA_CTL_LAST_SEC1;
+-
+ WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
+ WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
+ WRITE_ONCE(desc->info, cpu_to_le32(info));
+@@ -272,33 +346,63 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
+
+ static void *
+ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+- int *len, u32 *info, bool *more)
++ int *len, u32 *info, bool *more, bool *drop)
+ {
+ struct mt76_queue_entry *e = &q->entry[idx];
+ struct mt76_desc *desc = &q->desc[idx];
+ dma_addr_t buf_addr;
+ void *buf = e->buf;
+ int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
++ struct mtk_wed_device *wed = &dev->mmio.wed;
++ int type;
+
+- buf_addr = e->dma_addr[0];
+ if (len) {
+ u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
+ *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
+ *more = !(ctl & MT_DMA_CTL_LAST_SEC0);
+ }
+
+- if (info)
+- *info = le32_to_cpu(desc->info);
++ type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
++ if (mtk_wed_device_active(wed) && type == MT76_WED_Q_RX) {
++ u32 token;
++ struct mt76_txwi_cache *r;
++
++ token = FIELD_GET(MT_DMA_CTL_TOKEN, desc->buf1);
++
++ r = mt76_rx_token_release(dev, token);
++ if (!r)
++ return NULL;
++
++ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
++ if (!buf)
++ return NULL;
++
++ memcpy(buf, r->buf, MTK_WED_RX_PKT_SIZE);
++ buf_addr = r->dma_addr;
++ buf_len = MTK_WED_RX_PKT_SIZE;
++ r->dma_addr = 0;
++ //r->buf = NULL;
++
++ mt76_put_rxwi(dev, r);
++
++ if (desc->ctrl & (MT_DMA_CTL_TO_HOST_A | MT_DMA_CTL_DROP))
++ *drop = true;
++ } else {
++ buf_addr = e->dma_addr[0];
++ e->buf = NULL;
++ }
+
+ dma_unmap_single(dev->dma_dev, buf_addr, buf_len, DMA_FROM_DEVICE);
+- e->buf = NULL;
++
++ if (info)
++ *info = le32_to_cpu(desc->info);
+
+ return buf;
+ }
+
+ static void *
+ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
+- int *len, u32 *info, bool *more)
++ int *len, u32 *info, bool *more, bool *drop)
+ {
+ int idx = q->tail;
+
+@@ -314,7 +418,7 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+
+- return mt76_dma_get_buf(dev, q, idx, len, info, more);
++ return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
+ }
+
+ static int
+@@ -336,7 +440,7 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
+ buf.len = skb->len;
+
+ spin_lock_bh(&q->lock);
+- mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
++ mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL, NULL);
+ mt76_dma_kick_queue(dev, q);
+ spin_unlock_bh(&q->lock);
+
+@@ -413,7 +517,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ goto unmap;
+
+ return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
+- tx_info.info, tx_info.skb, t);
++ tx_info.info, tx_info.skb, t, NULL);
+
+ unmap:
+ for (n--; n > 0; n--)
+@@ -448,6 +552,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
+ int frames = 0;
+ int len = SKB_WITH_OVERHEAD(q->buf_size);
+ int offset = q->buf_offset;
++ struct mtk_wed_device *wed = &dev->mmio.wed;
+
+ if (!q->ndesc)
+ return 0;
+@@ -456,10 +561,27 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
+
+ while (q->queued < q->ndesc - 1) {
+ struct mt76_queue_buf qbuf;
++ int type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
++ bool skip_alloc = false;
++ struct mt76_txwi_cache *r = NULL;
++
++ if (mtk_wed_device_active(wed) && type == MT76_WED_Q_RX) {
++ r = mt76_get_rxwi(dev);
++ if (!r)
++ return -ENOMEM;
++
++ if (r->buf) {
++ skip_alloc = true;
++ len = MTK_WED_RX_PKT_SIZE;
++ buf = r->buf;
++ }
++ }
+
+- buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
+- if (!buf)
+- break;
++ if (!skip_alloc) {
++ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
++ if (!buf)
++ break;
++ }
+
+ addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
+@@ -470,7 +592,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
+ qbuf.addr = addr + offset;
+ qbuf.len = len - offset;
+ qbuf.skip_unmap = false;
+- mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
++ mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL, r);
+ frames++;
+ }
+
+@@ -516,6 +638,11 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
+ if (!ret)
+ q->wed_regs = wed->txfree_ring.reg_base;
+ break;
++ case MT76_WED_Q_RX:
++ ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs);
++ if (!ret)
++ q->wed_regs = wed->rx_ring[ring].reg_base;
++ break;
+ default:
+ ret = -EINVAL;
+ }
+@@ -531,7 +658,8 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
+ int idx, int n_desc, int bufsize,
+ u32 ring_base)
+ {
+- int ret, size;
++ int ret, size, type;
++ struct mtk_wed_device *wed = &dev->mmio.wed;
+
+ spin_lock_init(&q->lock);
+ spin_lock_init(&q->cleanup_lock);
+@@ -541,6 +669,11 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
+ q->buf_size = bufsize;
+ q->hw_idx = idx;
+
++ type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
++ if (mtk_wed_device_active(wed) && type == MT76_WED_Q_RX)
++ q->buf_size = SKB_DATA_ALIGN(NET_SKB_PAD + MTK_WED_RX_PKT_SIZE) +
++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++
+ size = q->ndesc * sizeof(struct mt76_desc);
+ q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
+ if (!q->desc)
+@@ -573,7 +706,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
+
+ spin_lock_bh(&q->lock);
+ do {
+- buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
++ buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
+ if (!buf)
+ break;
+
+@@ -614,7 +747,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
+
+ static void
+ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
+- int len, bool more)
++ int len, bool more, u32 info)
+ {
+ struct sk_buff *skb = q->rx_head;
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+@@ -634,7 +767,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
+
+ q->rx_head = NULL;
+ if (nr_frags < ARRAY_SIZE(shinfo->frags))
+- dev->drv->rx_skb(dev, q - dev->q_rx, skb);
++ dev->drv->rx_skb(dev, q - dev->q_rx, skb, info);
+ else
+ dev_kfree_skb(skb);
+ }
+@@ -655,6 +788,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
+ }
+
+ while (done < budget) {
++ bool drop = false;
+ u32 info;
+
+ if (check_ddone) {
+@@ -665,10 +799,13 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
+ break;
+ }
+
+- data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
++ data = mt76_dma_dequeue(dev, q, false, &len, &info, &more, &drop);
+ if (!data)
+ break;
+
++ if (drop)
++ goto free_frag;
++
+ if (q->rx_head)
+ data_len = q->buf_size;
+ else
+@@ -681,7 +818,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
+ }
+
+ if (q->rx_head) {
+- mt76_add_fragment(dev, q, data, len, more);
++ mt76_add_fragment(dev, q, data, len, more, info);
+ continue;
+ }
+
+@@ -708,7 +845,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
+ continue;
+ }
+
+- dev->drv->rx_skb(dev, q - dev->q_rx, skb);
++ dev->drv->rx_skb(dev, q - dev->q_rx, skb, info);
+ continue;
+
+ free_frag:
+diff --git a/dma.h b/dma.h
+index fdf786f..90370d1 100644
+--- a/dma.h
++++ b/dma.h
+@@ -16,6 +16,16 @@
+ #define MT_DMA_CTL_LAST_SEC0 BIT(30)
+ #define MT_DMA_CTL_DMA_DONE BIT(31)
+
++#define MT_DMA_CTL_TO_HOST BIT(8)
++#define MT_DMA_CTL_TO_HOST_A BIT(12)
++#define MT_DMA_CTL_DROP BIT(14)
++
++#define MT_DMA_CTL_TOKEN GENMASK(31, 16)
++
++#define MT_DMA_PPE_CPU_REASON GENMASK(15, 11)
++#define MT_DMA_PPE_ENTRY GENMASK(30, 16)
++#define MT_DMA_INFO_PPE_VLD BIT(31)
++
+ #define MT_DMA_HDR_LEN 4
+ #define MT_RX_INFO_LEN 4
+ #define MT_FCE_INFO_LEN 4
+diff --git a/mac80211.c b/mac80211.c
+index af2c09a..fa5ce6e 100644
+--- a/mac80211.c
++++ b/mac80211.c
+@@ -594,11 +594,14 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
+ BIT(NL80211_IFTYPE_ADHOC);
+
+ spin_lock_init(&dev->token_lock);
++ spin_lock_init(&dev->rx_token_lock);
+ idr_init(&dev->token);
++ idr_init(&dev->rx_token);
+
+ INIT_LIST_HEAD(&dev->wcid_list);
+
+ INIT_LIST_HEAD(&dev->txwi_cache);
++ INIT_LIST_HEAD(&dev->rxwi_cache);
+ dev->token_size = dev->drv->token_size;
+
+ for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
+@@ -1296,7 +1299,10 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
+
+ while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
+ mt76_check_sta(dev, skb);
+- mt76_rx_aggr_reorder(skb, &frames);
++ if (mtk_wed_device_active(&dev->mmio.wed))
++ __skb_queue_tail(&frames, skb);
++ else
++ mt76_rx_aggr_reorder(skb, &frames);
+ }
+
+ mt76_rx_complete(dev, &frames, napi);
+diff --git a/mt76.h b/mt76.h
+index 4c8a671..24e4741 100644
+--- a/mt76.h
++++ b/mt76.h
+@@ -20,6 +20,8 @@
+
+ #define MT_MCU_RING_SIZE 32
+ #define MT_RX_BUF_SIZE 2048
++#define MTK_WED_RX_PKT_SIZE 1700
++
+ #define MT_SKB_HEAD_LEN 256
+
+ #define MT_MAX_NON_AQL_PKT 16
+@@ -35,6 +37,7 @@
+ FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
+ FIELD_PREP(MT_QFLAG_WED_RING, _n))
+ #define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n)
++#define MT_WED_Q_RX(_n) __MT_WED_Q(MT76_WED_Q_RX, _n)
+ #define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0)
+
+ struct mt76_dev;
+@@ -56,6 +59,7 @@ enum mt76_bus_type {
+ enum mt76_wed_type {
+ MT76_WED_Q_TX,
+ MT76_WED_Q_TXFREE,
++ MT76_WED_Q_RX,
+ };
+
+ struct mt76_bus_ops {
+@@ -305,7 +309,10 @@ struct mt76_txwi_cache {
+ struct list_head list;
+ dma_addr_t dma_addr;
+
+- struct sk_buff *skb;
++ union {
++ void *buf;
++ struct sk_buff *skb;
++ };
+ };
+
+ struct mt76_rx_tid {
+@@ -403,7 +410,7 @@ struct mt76_driver_ops {
+ bool (*rx_check)(struct mt76_dev *dev, void *data, int len);
+
+ void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
+- struct sk_buff *skb);
++ struct sk_buff *skb, u32 info);
+
+ void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
+
+@@ -747,6 +754,7 @@ struct mt76_dev {
+ struct ieee80211_hw *hw;
+
+ spinlock_t lock;
++ spinlock_t wed_lock;
+ spinlock_t cc_lock;
+
+ u32 cur_cc_bss_rx;
+@@ -772,6 +780,7 @@ struct mt76_dev {
+ struct sk_buff_head rx_skb[__MT_RXQ_MAX];
+
+ struct list_head txwi_cache;
++ struct list_head rxwi_cache;
+ struct mt76_queue *q_mcu[__MT_MCUQ_MAX];
+ struct mt76_queue q_rx[__MT_RXQ_MAX];
+ const struct mt76_queue_ops *queue_ops;
+@@ -785,6 +794,9 @@ struct mt76_dev {
+ u16 wed_token_count;
+ u16 token_count;
+ u16 token_size;
++ u16 rx_token_size;
++ spinlock_t rx_token_lock;
++ struct idr rx_token;
+
+ wait_queue_head_t tx_wait;
+ /* spinclock used to protect wcid pktid linked list */
+@@ -1351,6 +1363,8 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
+ }
+
+ void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
++void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
++struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
+ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
+ struct napi_struct *napi);
+ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
+@@ -1495,6 +1509,12 @@ struct mt76_txwi_cache *
+ mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
+ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
+ void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
++int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
++ struct mt76_txwi_cache *r, dma_addr_t phys);
++void skb_trace(const struct sk_buff *skb, bool full_pkt);
++
++struct mt76_txwi_cache *
++mt76_rx_token_release(struct mt76_dev *dev, int token);
+
+ static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
+ {
+diff --git a/mt7603/dma.c b/mt7603/dma.c
+index 590cff9..2ff71c5 100644
+--- a/mt7603/dma.c
++++ b/mt7603/dma.c
+@@ -69,7 +69,7 @@ free:
+ }
+
+ void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+- struct sk_buff *skb)
++ struct sk_buff *skb, u32 info)
+ {
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+ __le32 *rxd = (__le32 *)skb->data;
+diff --git a/mt7603/mt7603.h b/mt7603/mt7603.h
+index 0fd46d9..f2ce22a 100644
+--- a/mt7603/mt7603.h
++++ b/mt7603/mt7603.h
+@@ -244,7 +244,7 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
+
+ void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+- struct sk_buff *skb);
++ struct sk_buff *skb, u32 info);
+ void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
+ void mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
+ int mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+diff --git a/mt7615/mac.c b/mt7615/mac.c
+index 3728627..14cdd9a 100644
+--- a/mt7615/mac.c
++++ b/mt7615/mac.c
+@@ -1648,7 +1648,7 @@ bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len)
+ EXPORT_SYMBOL_GPL(mt7615_rx_check);
+
+ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+- struct sk_buff *skb)
++ struct sk_buff *skb, u32 info)
+ {
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ __le32 *rxd = (__le32 *)skb->data;
+diff --git a/mt7615/mt7615.h b/mt7615/mt7615.h
+index 25880d1..983469c 100644
+--- a/mt7615/mt7615.h
++++ b/mt7615/mt7615.h
+@@ -511,7 +511,7 @@ void mt7615_tx_worker(struct mt76_worker *w);
+ void mt7615_tx_token_put(struct mt7615_dev *dev);
+ bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len);
+ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+- struct sk_buff *skb);
++ struct sk_buff *skb, u32 info);
+ void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
+ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+diff --git a/mt76_connac_mcu.c b/mt76_connac_mcu.c
+index cd35068..f90a08f 100644
+--- a/mt76_connac_mcu.c
++++ b/mt76_connac_mcu.c
+@@ -1190,6 +1190,7 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
+ int cmd, bool enable, bool tx)
+ {
+ struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
++ struct mtk_wed_device *wed = &dev->mmio.wed;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+@@ -1210,6 +1211,8 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
+ mt76_connac_mcu_wtbl_ba_tlv(dev, skb, params, enable, tx, sta_wtbl,
+ wtbl_hdr);
+
++ if (mtk_wed_device_active(wed) && wed->ver > MTK_WED_V1)
++ mtk_wed_device_update_msg(wed, WED_WO_STA_REC, skb->data, skb->len);
+ ret = mt76_mcu_skb_send_msg(dev, skb, cmd, true);
+ if (ret)
+ return ret;
+@@ -1220,6 +1223,8 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
+
+ mt76_connac_mcu_sta_ba_tlv(skb, params, enable, tx);
+
++ if (mtk_wed_device_active(wed) && wed->ver > MTK_WED_V1)
++ mtk_wed_device_update_msg(wed, WED_WO_STA_REC, skb->data, skb->len);
+ return mt76_mcu_skb_send_msg(dev, skb, cmd, true);
+ }
+ EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba);
+@@ -2634,6 +2639,7 @@ int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct mt76_wcid *wcid, enum set_key_cmd cmd)
+ {
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
++ struct mtk_wed_device *wed = &dev->mmio.wed;
+ struct sk_buff *skb;
+ int ret;
+
+@@ -2645,6 +2651,9 @@ int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ if (ret)
+ return ret;
+
++ if (mtk_wed_device_active(wed) && wed->ver > MTK_WED_V1)
++ mtk_wed_device_update_msg(wed, WED_WO_STA_REC, skb->data, skb->len);
++
+ return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
+ }
+ EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_key);
+diff --git a/mt76x02.h b/mt76x02.h
+index f76fd22..0b872af 100644
+--- a/mt76x02.h
++++ b/mt76x02.h
+@@ -173,7 +173,7 @@ int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val);
+ void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
+ bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
+ void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+- struct sk_buff *skb);
++ struct sk_buff *skb, u32 info);
+ void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
+ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance);
+ void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+diff --git a/mt76x02_txrx.c b/mt76x02_txrx.c
+index 96fdf42..bf24d3e 100644
+--- a/mt76x02_txrx.c
++++ b/mt76x02_txrx.c
+@@ -33,7 +33,7 @@ void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ EXPORT_SYMBOL_GPL(mt76x02_tx);
+
+ void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+- struct sk_buff *skb)
++ struct sk_buff *skb, u32 info)
+ {
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ void *rxwi = skb->data;
+diff --git a/mt7915/dma.c b/mt7915/dma.c
+index 7122322..ac98e01 100644
+--- a/mt7915/dma.c
++++ b/mt7915/dma.c
+@@ -376,6 +376,8 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
+ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) |
+ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) |
+ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1, 1));
++ mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP,
++ MT_WFDMA0_EXT0_RXWB_KEEP);
+ } else {
+ mt76_wr(dev, MT_WFDMA_WED_RING_CONTROL,
+ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) |
+@@ -451,6 +453,10 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
+
+ /* rx data queue for band0 */
+ if (!dev->phy.band_idx) {
++ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
++ dev->mt76.mmio.wed.ver > MTK_WED_V1)
++ dev->mt76.q_rx[MT_RXQ_MAIN].flags = MT_WED_Q_RX(MT7915_RXQ_BAND0);
++
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
+ MT_RXQ_ID(MT_RXQ_MAIN),
+ MT7915_RX_RING_SIZE,
+@@ -482,6 +488,10 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
+
+ if (dev->dbdc_support || dev->phy.band_idx) {
+ /* rx data queue for band1 */
++ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
++ dev->mt76.mmio.wed.ver > MTK_WED_V1)
++ dev->mt76.q_rx[MT_RXQ_EXT].flags = MT_WED_Q_RX(MT7915_RXQ_BAND1);
++
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT],
+ MT_RXQ_ID(MT_RXQ_EXT),
+ MT7915_RX_RING_SIZE,
+diff --git a/mt7915/mac.c b/mt7915/mac.c
+old mode 100755
+new mode 100644
+index bc8da4d..79b7d01
+--- a/mt7915/mac.c
++++ b/mt7915/mac.c
+@@ -217,7 +217,7 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
+ }
+
+ static int
+-mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
++mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb, enum mt76_rxq_id q, u32 info)
+ {
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt76_phy *mphy = &dev->mt76.phy;
+@@ -494,6 +494,27 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
+ #endif
+ } else {
+ status->flag |= RX_FLAG_8023;
++ if (msta || msta->vif) {
++ struct mtk_wed_device *wed;
++ int type;
++
++ wed = &dev->mt76.mmio.wed;
++ type = FIELD_GET(MT_QFLAG_WED_TYPE, dev->mt76.q_rx[q].flags);
++ if ((mtk_wed_device_active(wed) && type == MT76_WED_Q_RX) &&
++ (info & MT_DMA_INFO_PPE_VLD)){
++ struct ieee80211_vif *vif;
++ u32 hash, reason;
++
++ vif = container_of((void *)msta->vif, struct ieee80211_vif,
++ drv_priv);
++
++ skb->dev = ieee80211_vif_to_netdev(vif);
++ reason = FIELD_GET(MT_DMA_PPE_CPU_REASON, info);
++ hash = FIELD_GET(MT_DMA_PPE_ENTRY, info);
++
++ mtk_wed_device_ppe_check(wed, skb, reason, hash);
++ }
++ }
+ }
+
+ if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
+@@ -840,6 +861,68 @@ u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
+ return MT_TXD_TXP_BUF_SIZE;
+ }
+
++u32
++mt7915_wed_init_rx_buf(struct mtk_wed_device *wed, int pkt_num)
++{
++ struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
++ struct mt7915_dev *dev;
++ dma_addr_t buf_phys;
++ void *buf;
++ int i, token, buf_size;
++
++ buf_size = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_pkt_size) +
++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++
++ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
++ for (i = 0; i < pkt_num; i++) {
++ struct mt76_txwi_cache *r = mt76_get_rxwi(&dev->mt76);
++
++ buf = page_frag_alloc(&wed->rx_page, buf_size, GFP_ATOMIC);
++ if (!buf)
++ return -ENOMEM;
++
++ buf_phys = dma_map_single(dev->mt76.dma_dev, buf, wed->wlan.rx_pkt_size,
++ DMA_TO_DEVICE);
++
++ if (unlikely(dma_mapping_error(dev->mt76.dev, buf_phys))) {
++ skb_free_frag(buf);
++ break;
++ }
++
++ desc->buf0 = buf_phys;
++
++ token = mt76_rx_token_consume(&dev->mt76, buf, r, buf_phys);
++
++ desc->token |= FIELD_PREP(MT_DMA_CTL_TOKEN, token);
++ desc++;
++ }
++
++ return 0;
++}
++
++void mt7915_wed_release_rx_buf(struct mtk_wed_device *wed)
++{
++ struct mt76_txwi_cache *rxwi;
++ struct mt7915_dev *dev;
++ int token;
++
++ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
++
++ for(token = 0; token < dev->mt76.rx_token_size; token++) {
++ rxwi = mt76_rx_token_release(&dev->mt76, token);
++ if(!rxwi)
++ continue;
++
++ dma_unmap_single(dev->mt76.dma_dev, rxwi->dma_addr,
++ wed->wlan.rx_pkt_size, DMA_FROM_DEVICE);
++ skb_free_frag(rxwi->buf);
++ rxwi->buf = NULL;
++
++ mt76_put_rxwi(&dev->mt76, rxwi);
++ }
++ return;
++}
++
+ static void
+ mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
+ {
+@@ -1120,7 +1203,7 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
+ }
+
+ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+- struct sk_buff *skb)
++ struct sk_buff *skb, u32 info)
+ {
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ __le32 *rxd = (__le32 *)skb->data;
+@@ -1154,7 +1237,7 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ dev_kfree_skb(skb);
+ break;
+ case PKT_TYPE_NORMAL:
+- if (!mt7915_mac_fill_rx(dev, skb)) {
++ if (!mt7915_mac_fill_rx(dev, skb, q, info)) {
+ mt76_rx(&dev->mt76, q, skb);
+ return;
+ }
+diff --git a/mt7915/mcu.c b/mt7915/mcu.c
+index 1468c3c..4f64df4 100644
+--- a/mt7915/mcu.c
++++ b/mt7915/mcu.c
+@@ -1704,6 +1704,7 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+ {
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
++ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ struct mt7915_sta *msta;
+ struct sk_buff *skb;
+ int ret;
+@@ -1756,6 +1757,8 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ return ret;
+ }
+ out:
++ if (mtk_wed_device_active(wed) && wed->ver > MTK_WED_V1)
++ mtk_wed_device_update_msg(wed, WED_WO_STA_REC, skb->data, skb->len);
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
+ }
+diff --git a/mt7915/mmio.c b/mt7915/mmio.c
+old mode 100755
+new mode 100644
+index b4a3120..08ff556
+--- a/mt7915/mmio.c
++++ b/mt7915/mmio.c
+@@ -28,6 +28,9 @@ static const u32 mt7915_reg[] = {
+ [FW_EXCEPTION_ADDR] = 0x219848,
+ [SWDEF_BASE_ADDR] = 0x41f200,
+ [EXCEPTION_BASE_ADDR] = 0x219848,
++ [WED_TX_RING] = 0xd7300,
++ [WED_RX_RING] = 0xd7410,
++ [WED_RX_DATA_RING] = 0xd4500,
+ };
+
+ static const u32 mt7916_reg[] = {
+@@ -45,6 +48,9 @@ static const u32 mt7916_reg[] = {
+ [FW_EXCEPTION_ADDR] = 0x022050bc,
+ [SWDEF_BASE_ADDR] = 0x411400,
+ [EXCEPTION_BASE_ADDR] = 0x022050BC,
++ [WED_TX_RING] = 0xd7300,
++ [WED_RX_RING] = 0xd7410,
++ [WED_RX_DATA_RING] = 0xd4540,
+ };
+
+ static const u32 mt7986_reg[] = {
+@@ -62,6 +68,9 @@ static const u32 mt7986_reg[] = {
+ [FW_EXCEPTION_ADDR] = 0x02204ffc,
+ [SWDEF_BASE_ADDR] = 0x411400,
+ [EXCEPTION_BASE_ADDR] = 0x02204FFC,
++ [WED_TX_RING] = 0x24420,
++ [WED_RX_RING] = 0x24520,
++ [WED_RX_DATA_RING] = 0x24540,
+ };
+
+ static const u32 mt7915_offs[] = {
+@@ -710,6 +719,7 @@ mt7915_pci_wed_init(struct mt7915_dev *dev, struct device *pdev, int *irq)
+ wed->wlan.bus_type = MTK_BUS_TYPE_PCIE;
+ wed->wlan.wpdma_int = base + MT_INT_WED_SOURCE_CSR;
+ wed->wlan.wpdma_mask = base + MT_INT_WED_MASK_CSR;
++ wed->wlan.wpdma_phys = base + MT_WFDMA_EXT_CSR_BASE;
+ } else {
+ struct platform_device *plat_dev;
+ struct resource *res;
+@@ -722,12 +732,19 @@ mt7915_pci_wed_init(struct mt7915_dev *dev, struct device *pdev, int *irq)
+ wed->wlan.wpdma_int = base + MT_INT_SOURCE_CSR;
+ wed->wlan.wpdma_mask = base + MT_INT_MASK_CSR;
+ }
++ wed->wlan.rx_pkt = MT7915_WED_RX_TOKEN_SIZE;
++ wed->wlan.phy_base = base;
+ wed->wlan.wpdma_tx = base + MT_TXQ_WED_RING_BASE;
+ wed->wlan.wpdma_txfree = base + MT_RXQ_WED_RING_BASE;
++ wed->wlan.wpdma_rx_glo = base + MT_WPDMA_GLO_CFG;
++ wed->wlan.wpdma_rx = base + MT_RXQ_WED_DATA_RING_BASE;
+
+ wed->wlan.tx_tbit[0] = MT_WED_TX_DONE_BAND0;
+ wed->wlan.tx_tbit[1] = MT_WED_TX_DONE_BAND1;
+ wed->wlan.txfree_tbit = MT_WED_TX_FREE_DONE;
++ wed->wlan.rx_tbit[0] = MT_WED_RX_DONE_BAND0;
++ wed->wlan.rx_tbit[1] = MT_WED_RX_DONE_BAND1;
++
+ wed->wlan.nbuf = 7168;
+ wed->wlan.token_start = MT7915_TOKEN_SIZE - wed->wlan.nbuf;
+ wed->wlan.init_buf = mt7915_wed_init_buf;
+@@ -735,12 +752,15 @@ mt7915_pci_wed_init(struct mt7915_dev *dev, struct device *pdev, int *irq)
+ wed->wlan.offload_enable = mt7915_wed_offload_enable;
+ wed->wlan.offload_disable = mt7915_wed_offload_disable;
+
++ wed->wlan.rx_nbuf = 65536;
++ wed->wlan.rx_pkt_size = MTK_WED_RX_PKT_SIZE;
++ wed->wlan.init_rx_buf = mt7915_wed_init_rx_buf;
++ wed->wlan.release_rx_buf = mt7915_wed_release_rx_buf;
++
++ dev->mt76.rx_token_size = wed->wlan.rx_pkt + MT7915_RX_RING_SIZE * 2;
+ if (mtk_wed_device_attach(wed) != 0)
+ return 0;
+
+- if (wed->ver == MTK_WED_V1)
+- wed->wlan.wpdma_phys = base + MT_WFDMA_EXT_CSR_BASE;
+-
+ *irq = wed->irq;
+ dev->mt76.dma_dev = wed->dev;
+ mdev->token_size = wed->wlan.token_start;
+diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
+index fe407c5..e2f0d41 100644
+--- a/mt7915/mt7915.h
++++ b/mt7915/mt7915.h
+@@ -69,6 +69,7 @@
+ #define MT7915_MAX_STA_TWT_AGRT 8
+ #define MT7915_MIN_TWT_DUR 64
+ #define MT7915_MAX_QUEUE (__MT_RXQ_MAX + __MT_MCUQ_MAX + 2)
++#define MT7915_WED_RX_TOKEN_SIZE 12288
+
+ struct mt7915_vif;
+ struct mt7915_sta;
+@@ -531,7 +532,9 @@ void mt7915_wfsys_reset(struct mt7915_dev *dev);
+ irqreturn_t mt7915_irq_handler(int irq, void *dev_instance);
+ u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif);
+ u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id);
+-
++u32 mt7915_wed_init_rx_buf(struct mtk_wed_device *wed,
++ int pkt_num);
++void mt7915_wed_release_rx_buf(struct mtk_wed_device *wed);
+ int mt7915_register_device(struct mt7915_dev *dev);
+ void mt7915_unregister_device(struct mt7915_dev *dev);
+ int mt7915_eeprom_init(struct mt7915_dev *dev);
+@@ -683,7 +686,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ struct mt76_tx_info *tx_info);
+ void mt7915_tx_token_put(struct mt7915_dev *dev);
+ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+- struct sk_buff *skb);
++ struct sk_buff *skb, u32 info);
+ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len);
+ void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
+ void mt7915_stats_work(struct work_struct *work);
+diff --git a/mt7915/regs.h b/mt7915/regs.h
+index ffda5f6..08bf84c 100644
+--- a/mt7915/regs.h
++++ b/mt7915/regs.h
+@@ -33,6 +33,9 @@ enum reg_rev {
+ FW_EXCEPTION_ADDR,
+ SWDEF_BASE_ADDR,
+ EXCEPTION_BASE_ADDR,
++ WED_TX_RING,
++ WED_RX_RING,
++ WED_RX_DATA_RING,
+ __MT_REG_MAX,
+ };
+
+@@ -570,9 +573,13 @@ enum offs_rev {
+ #define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
+
+ #define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c)
++#define MT_WFDMA0_EXT0_CFG MT_WFDMA0(0x2b0)
++#define MT_WFDMA0_EXT0_RXWB_KEEP BIT(10)
++
+ #define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0)
+ #define MT_WFDMA0_PRI_DLY_INT_CFG1 MT_WFDMA0(0x2f4)
+ #define MT_WFDMA0_PRI_DLY_INT_CFG2 MT_WFDMA0(0x2f8)
++#define MT_WPDMA_GLO_CFG MT_WFDMA0(0x208)
+
+ #define MT_WFDMA0_MCU_HOST_INT_ENA MT_WFDMA0(0x1f4)
+ #define MT_WFDMA0_MT_WA_WDT_INT BIT(31)
+@@ -670,12 +677,15 @@ enum offs_rev {
+ #define MT_TXQ_EXT_CTRL(q) (MT_Q_BASE(__TXQ(q)) + 0x600 + \
+ MT_TXQ_ID(q)* 0x4)
+
+-#define MT_TXQ_WED_RING_BASE (!is_mt7986(mdev)? 0xd7300 : 0x24420)
+-#define MT_RXQ_WED_RING_BASE (!is_mt7986(mdev)? 0xd7410 : 0x24520)
++#define MT_TXQ_WED_RING_BASE __REG(WED_TX_RING)
++#define MT_RXQ_WED_RING_BASE __REG(WED_RX_RING)
++#define MT_RXQ_WED_DATA_RING_BASE __REG(WED_RX_DATA_RING)
+
+ #define MT_WED_TX_DONE_BAND0 (is_mt7915(mdev)? 4 : 30)
+ #define MT_WED_TX_DONE_BAND1 (is_mt7915(mdev)? 5 : 31)
+ #define MT_WED_TX_FREE_DONE (is_mt7915(mdev)? 1 : 2)
++#define MT_WED_RX_DONE_BAND0 (is_mt7915(mdev)? 16 : 22)
++#define MT_WED_RX_DONE_BAND1 (is_mt7915(mdev)? 17 : 23)
+
+ #define MT_INT_SOURCE_CSR __REG(INT_SOURCE_CSR)
+ #define MT_INT_MASK_CSR __REG(INT_MASK_CSR)
+diff --git a/mt7921/mac.c b/mt7921/mac.c
+index 4fcadf8..4897940 100644
+--- a/mt7921/mac.c
++++ b/mt7921/mac.c
+@@ -555,7 +555,7 @@ out:
+ EXPORT_SYMBOL_GPL(mt7921_mac_add_txs);
+
+ void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+- struct sk_buff *skb)
++ struct sk_buff *skb, u32 info)
+ {
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ __le32 *rxd = (__le32 *)skb->data;
+diff --git a/mt7921/mt7921.h b/mt7921/mt7921.h
+index efeb82c..4b2e974 100644
+--- a/mt7921/mt7921.h
++++ b/mt7921/mt7921.h
+@@ -388,7 +388,7 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ void mt7921_tx_worker(struct mt76_worker *w);
+ void mt7921_tx_token_put(struct mt7921_dev *dev);
+ void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+- struct sk_buff *skb);
++ struct sk_buff *skb, u32 info);
+ void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
+ void mt7921_stats_work(struct work_struct *work);
+ void mt7921_set_stream_he_caps(struct mt7921_phy *phy);
+@@ -424,7 +424,7 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
+
+ bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len);
+ void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+- struct sk_buff *skb);
++ struct sk_buff *skb, u32 info);
+ int mt7921e_driver_own(struct mt7921_dev *dev);
+ int mt7921e_mac_reset(struct mt7921_dev *dev);
+ int mt7921e_mcu_init(struct mt7921_dev *dev);
+diff --git a/mt7921/pci_mac.c b/mt7921/pci_mac.c
+index e180067..ca982eb 100644
+--- a/mt7921/pci_mac.c
++++ b/mt7921/pci_mac.c
+@@ -182,7 +182,7 @@ bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len)
+ }
+
+ void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+- struct sk_buff *skb)
++ struct sk_buff *skb, u32 info)
+ {
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ __le32 *rxd = (__le32 *)skb->data;
+@@ -196,7 +196,7 @@ void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ napi_consume_skb(skb, 1);
+ break;
+ default:
+- mt7921_queue_rx_skb(mdev, q, skb);
++ mt7921_queue_rx_skb(mdev, q, skb, info);
+ break;
+ }
+ }
+diff --git a/tx.c b/tx.c
+index ae44afe..bccd206 100644
+--- a/tx.c
++++ b/tx.c
+@@ -767,3 +767,37 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
+ return txwi;
+ }
+ EXPORT_SYMBOL_GPL(mt76_token_release);
++
++int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
++ struct mt76_txwi_cache *r, dma_addr_t phys)
++{
++ int token;
++
++ spin_lock_bh(&dev->rx_token_lock);
++
++ token = idr_alloc(&dev->rx_token, r, 0, dev->rx_token_size, GFP_ATOMIC);
++
++ spin_unlock_bh(&dev->rx_token_lock);
++
++ r->buf = ptr;
++ r->dma_addr = phys;
++
++ return token;
++}
++EXPORT_SYMBOL_GPL(mt76_rx_token_consume);
++
++struct mt76_txwi_cache *
++mt76_rx_token_release(struct mt76_dev *dev, int token)
++{
++
++ struct mt76_txwi_cache *rxwi;
++
++ spin_lock_bh(&dev->rx_token_lock);
++
++ rxwi = idr_remove(&dev->rx_token, token);
++
++ spin_unlock_bh(&dev->rx_token_lock);
++
++ return rxwi;
++}
++EXPORT_SYMBOL_GPL(mt76_rx_token_release);
+--
+2.18.0
+
diff --git a/recipes-kernel/linux-mt76/files/patches/3003-mt76-add-fill-receive-path-to-report-wed-idx.patch b/recipes-kernel/linux-mt76/files/patches/3003-mt76-add-fill-receive-path-to-report-wed-idx.patch
new file mode 100755
index 0000000..3e3d8be
--- /dev/null
+++ b/recipes-kernel/linux-mt76/files/patches/3003-mt76-add-fill-receive-path-to-report-wed-idx.patch
@@ -0,0 +1,49 @@
+From bad890a89e289efc57091c0c08bbfad701147e4e Mon Sep 17 00:00:00 2001
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Thu, 19 May 2022 13:44:42 +0800
+Subject: [PATCH 3/3] add fill receive path to report wed idx
+
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+---
+ mt7915/main.c | 19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+diff --git a/mt7915/main.c b/mt7915/main.c
+index f1396eed..a0798d46 100644
+--- a/mt7915/main.c
++++ b/mt7915/main.c
+@@ -1458,6 +1458,24 @@ mt7915_net_fill_forward_path(struct ieee80211_hw *hw,
+
+ return 0;
+ }
++
++static int
++mt7915_net_fill_receive_path(struct ieee80211_hw *hw,
++ struct net_device_path_ctx *ctx,
++ struct net_device_path *path)
++{
++ struct mt7915_dev *dev = mt7915_hw_dev(hw);
++ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
++
++ if (!mtk_wed_device_active(wed))
++ return -ENODEV;
++
++ path->dev = ctx->dev;
++ path->mtk_wdma.wdma_idx = wed->wdma_idx;
++
++ return 0;
++}
++
+ #endif
+
+ const struct ieee80211_ops mt7915_ops = {
+@@ -1509,5 +1527,6 @@ const struct ieee80211_ops mt7915_ops = {
+ .set_radar_background = mt7915_set_radar_background,
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ .net_fill_forward_path = mt7915_net_fill_forward_path,
++ .net_fill_receive_path = mt7915_net_fill_receive_path,
+ #endif
+ };
+--
+2.18.0
+
diff --git a/recipes-kernel/linux-mt76/files/patches/patches.inc b/recipes-kernel/linux-mt76/files/patches/patches.inc
index 868ea02..da26649 100644
--- a/recipes-kernel/linux-mt76/files/patches/patches.inc
+++ b/recipes-kernel/linux-mt76/files/patches/patches.inc
@@ -21,5 +21,8 @@
file://1113-mt76-mt7915-drop-packets-when-TWT-stations-use-more-.patch \
file://1114-mt76-airtime-fairness-feature-off-in-mac80211.patch \
file://1115-mt76-mt7915-add-mt7986-and-mt7916-pre-calibration.patch \
- file://3000-mt76-remove-WED-support-patch-for-build-err.patch \
+ file://1116-mt76-mt7915-add-vendor-dump-phy-capa.patch \
+ file://3001-mt76-add-wed-tx-support.patch \
+ file://3002-mt76-add-wed-rx-support.patch \
+ file://3003-mt76-add-fill-receive-path-to-report-wed-idx.patch \
"
diff --git a/recipes-kernel/linux-mt76/files/src/firmware/mt7986_wo_0.bin b/recipes-kernel/linux-mt76/files/src/firmware/mt7986_wo_0.bin
new file mode 100644
index 0000000..dcdca1d
--- /dev/null
+++ b/recipes-kernel/linux-mt76/files/src/firmware/mt7986_wo_0.bin
Binary files differ
diff --git a/recipes-kernel/linux-mt76/files/src/firmware/mt7986_wo_1.bin b/recipes-kernel/linux-mt76/files/src/firmware/mt7986_wo_1.bin
new file mode 100644
index 0000000..3c2a458
--- /dev/null
+++ b/recipes-kernel/linux-mt76/files/src/firmware/mt7986_wo_1.bin
Binary files differ
diff --git a/recipes-kernel/linux-mt76/linux-mt76.bb b/recipes-kernel/linux-mt76/linux-mt76.bb
index 7b96802..6b4c88c 100644
--- a/recipes-kernel/linux-mt76/linux-mt76.bb
+++ b/recipes-kernel/linux-mt76/linux-mt76.bb
@@ -7,7 +7,7 @@
PV = "1.0"
-SRCREV ?= "b6e865e2cc7080c91ec34a9dd3648d25f7ce04c6"
+SRCREV ?= "93e3fce916c62d06892d41bf00d0f4c2926c9a0b"
SRC_URI = " \
git://git@github.com/openwrt/mt76.git;protocol=https \
file://COPYING;subdir=git \
@@ -96,6 +96,8 @@
install -m 644 ${WORKDIR}/src/firmware/mt7986_eeprom_mt7976_dbdc.bin ${D}${base_libdir}/firmware/mediatek/
install -m 644 ${WORKDIR}/src/firmware/mt7986_eeprom_mt7976.bin ${D}${base_libdir}/firmware/mediatek/
install -m 644 ${WORKDIR}/src/firmware/mt7986_eeprom_mt7976_dual.bin ${D}${base_libdir}/firmware/mediatek/
+ install -m 644 ${WORKDIR}/src/firmware/mt7986_wo_0.bin ${D}${base_libdir}/firmware/mediatek/
+ install -m 644 ${WORKDIR}/src/firmware/mt7986_wo_1.bin ${D}${base_libdir}/firmware/mediatek/
}
FILES_${PN} += " \
@@ -113,6 +115,8 @@
${base_libdir}/firmware/mediatek/mt7986_eeprom_mt7976_dbdc.bin \
${base_libdir}/firmware/mediatek/mt7986_eeprom_mt7976.bin \
${base_libdir}/firmware/mediatek/mt7986_eeprom_mt7976_dual.bin \
+ ${base_libdir}/firmware/mediatek/mt7986_wo_0.bin \
+ ${base_libdir}/firmware/mediatek/mt7986_wo_1.bin \
"
# Make linux-mt76 depend on all of the split-out packages.
python populate_packages_prepend () {