blob: 1430ef9c2247d6ef89bdfc7e1562a991eab9307d [file] [log] [blame]
From 947b0a84d32ca9fbdfc5befc133da50c1349694e Mon Sep 17 00:00:00 2001
From: Shayne Chen <shayne.chen@mediatek.com>
Date: Thu, 21 Apr 2022 15:43:19 +0800
Subject: [PATCH 1112/1133] mt76: testmode: additional supports
Signed-off-by: Shayne Chen <shayne.chen@mediatek.com>
---
dma.c | 3 +-
mac80211.c | 12 +
mt76.h | 108 ++++-
mt76_connac_mcu.c | 4 +
mt76_connac_mcu.h | 2 +
mt7915/init.c | 2 +-
mt7915/mac.c | 39 +-
mt7915/main.c | 2 +-
mt7915/mcu.c | 10 +-
mt7915/mcu.h | 28 +-
mt7915/mmio.c | 2 +
mt7915/mt7915.h | 14 +-
mt7915/regs.h | 3 +
mt7915/testmode.c | 1172 ++++++++++++++++++++++++++++++++++++++++++---
mt7915/testmode.h | 278 +++++++++++
testmode.c | 275 +++++++++--
testmode.h | 75 +++
tools/fields.c | 80 ++++
tx.c | 3 +-
19 files changed, 1964 insertions(+), 148 deletions(-)
diff --git a/dma.c b/dma.c
index e3fa4f39..a6bb3730 100644
--- a/dma.c
+++ b/dma.c
@@ -566,8 +566,7 @@ free:
if (mt76_is_testmode_skb(dev, skb, &hw)) {
struct mt76_phy *phy = hw->priv;
- if (tx_info.skb == phy->test.tx_skb)
- phy->test.tx_done--;
+ phy->test.tx_done--;
}
#endif
diff --git a/mac80211.c b/mac80211.c
index 00e7b4f3..2a66b1dd 100644
--- a/mac80211.c
+++ b/mac80211.c
@@ -56,6 +56,13 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = {
CHAN5G(60, 5300),
CHAN5G(64, 5320),
+ CHAN5G(68, 5340),
+ CHAN5G(80, 5400),
+ CHAN5G(84, 5420),
+ CHAN5G(88, 5440),
+ CHAN5G(92, 5460),
+ CHAN5G(96, 5480),
+
CHAN5G(100, 5500),
CHAN5G(104, 5520),
CHAN5G(108, 5540),
@@ -76,6 +83,11 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = {
CHAN5G(165, 5825),
CHAN5G(169, 5845),
CHAN5G(173, 5865),
+
+ CHAN5G(184, 4920),
+ CHAN5G(188, 4940),
+ CHAN5G(192, 4960),
+ CHAN5G(196, 4980),
};
static const struct ieee80211_channel mt76_channels_6ghz[] = {
diff --git a/mt76.h b/mt76.h
index 25ad0f6b..cde52268 100644
--- a/mt76.h
+++ b/mt76.h
@@ -641,6 +641,21 @@ struct mt76_testmode_ops {
int (*set_params)(struct mt76_phy *phy, struct nlattr **tb,
enum mt76_testmode_state new_state);
int (*dump_stats)(struct mt76_phy *phy, struct sk_buff *msg);
+ int (*set_eeprom)(struct mt76_phy *phy, u32 offset, u8 *val, u8 action);
+};
+
+struct mt76_testmode_entry_data {
+ struct sk_buff *tx_skb;
+
+ u16 tx_mpdu_len;
+ u8 tx_rate_idx;
+ u8 tx_rate_nss;
+ u8 tx_rate_ldpc;
+
+ u8 addr[3][ETH_ALEN];
+ u8 aid;
+ u8 ru_alloc;
+ u8 ru_idx;
};
#define MT_TM_FW_RX_COUNT BIT(0)
@@ -649,16 +664,11 @@ struct mt76_testmode_data {
enum mt76_testmode_state state;
u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)];
- struct sk_buff *tx_skb;
u32 tx_count;
- u16 tx_mpdu_len;
u8 tx_rate_mode;
- u8 tx_rate_idx;
- u8 tx_rate_nss;
u8 tx_rate_sgi;
- u8 tx_rate_ldpc;
u8 tx_rate_stbc;
u8 tx_ltf;
@@ -674,10 +684,37 @@ struct mt76_testmode_data {
u8 tx_power[4];
u8 tx_power_control;
- u8 addr[3][ETH_ALEN];
+ struct list_head tm_entry_list;
+ struct mt76_wcid *cur_entry;
+ u8 entry_num;
+ union {
+ struct mt76_testmode_entry_data ed;
+ struct {
+ /* must be the same as mt76_testmode_entry_data */
+ struct sk_buff *tx_skb;
+
+ u16 tx_mpdu_len;
+ u8 tx_rate_idx;
+ u8 tx_rate_nss;
+ u8 tx_rate_ldpc;
+
+ u8 addr[3][ETH_ALEN];
+ u8 aid;
+ u8 ru_alloc;
+ u8 ru_idx;
+ };
+ };
u8 flag;
+ struct {
+ u8 type;
+ u8 enable;
+ } cfg;
+
+ u8 txbf_act;
+ u16 txbf_param[8];
+
u32 tx_pending;
u32 tx_queued;
u16 tx_queued_limit;
@@ -1141,6 +1178,59 @@ static inline bool mt76_testmode_enabled(struct mt76_phy *phy)
#endif
}
+#ifdef CONFIG_NL80211_TESTMODE
+static inline struct mt76_wcid *
+mt76_testmode_first_entry(struct mt76_phy *phy)
+{
+ if (list_empty(&phy->test.tm_entry_list) && !phy->test.aid)
+ return &phy->dev->global_wcid;
+
+ return list_first_entry(&phy->test.tm_entry_list,
+ typeof(struct mt76_wcid),
+ list);
+}
+
+static inline struct mt76_testmode_entry_data *
+mt76_testmode_entry_data(struct mt76_phy *phy, struct mt76_wcid *wcid)
+{
+ if (!wcid)
+ return NULL;
+ if (wcid == &phy->dev->global_wcid)
+ return &phy->test.ed;
+
+ return (struct mt76_testmode_entry_data *)((u8 *)wcid +
+ phy->hw->sta_data_size);
+}
+
+#define mt76_tm_for_each_entry(phy, wcid, ed) \
+ for (wcid = mt76_testmode_first_entry(phy), \
+ ed = mt76_testmode_entry_data(phy, wcid); \
+ ((phy->test.aid && \
+ !list_entry_is_head(wcid, &phy->test.tm_entry_list, list)) || \
+ (!phy->test.aid && wcid == &phy->dev->global_wcid)) && ed; \
+ wcid = list_next_entry(wcid, list), \
+ ed = mt76_testmode_entry_data(phy, wcid))
+#endif
+
+static inline bool __mt76_is_testmode_skb(struct mt76_phy *phy,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_NL80211_TESTMODE
+ struct mt76_testmode_entry_data *ed = &phy->test.ed;
+ struct mt76_wcid *wcid;
+
+ if (skb == ed->tx_skb)
+ return true;
+
+ mt76_tm_for_each_entry(phy, wcid, ed)
+ if (skb == ed->tx_skb)
+ return true;
+ return false;
+#else
+ return false;
+#endif
+}
+
static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
struct sk_buff *skb,
struct ieee80211_hw **hw)
@@ -1151,7 +1241,8 @@ static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
struct mt76_phy *phy = dev->phys[i];
- if (phy && skb == phy->test.tx_skb) {
+ if (phy && mt76_testmode_enabled(phy) &&
+ __mt76_is_testmode_skb(phy, skb)) {
*hw = dev->phys[i]->hw;
return true;
}
@@ -1253,7 +1344,8 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
struct netlink_callback *cb, void *data, int len);
int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state);
-int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len);
+int mt76_testmode_init_skb(struct mt76_phy *phy, u32 len,
+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN]);
static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable)
{
diff --git a/mt76_connac_mcu.c b/mt76_connac_mcu.c
index 2fefac68..b6c2ccf0 100644
--- a/mt76_connac_mcu.c
+++ b/mt76_connac_mcu.c
@@ -394,6 +394,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MONITOR:
if (vif->p2p)
conn_type = CONNECTION_P2P_GC;
else
@@ -575,6 +576,9 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
rx->rca2 = 1;
rx->rv = 1;
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ rx->rca1 = 0;
+
if (!is_connac_v1(dev))
return;
diff --git a/mt76_connac_mcu.h b/mt76_connac_mcu.h
index 1a146563..f616bcea 100644
--- a/mt76_connac_mcu.h
+++ b/mt76_connac_mcu.h
@@ -999,6 +999,7 @@ enum {
MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13,
MCU_EXT_EVENT_THERMAL_PROTECT = 0x22,
MCU_EXT_EVENT_ASSERT_DUMP = 0x23,
+ MCU_EXT_EVENT_BF_STATUS_READ = 0x35,
MCU_EXT_EVENT_RDD_REPORT = 0x3a,
MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
MCU_EXT_EVENT_BCC_NOTIFY = 0x75,
@@ -1200,6 +1201,7 @@ enum {
MCU_EXT_CMD_PHY_STAT_INFO = 0xad,
/* for vendor csi and air monitor */
MCU_EXT_CMD_SMESH_CTRL = 0xae,
+ MCU_EXT_CMD_RX_STAT_USER_CTRL = 0xb3,
MCU_EXT_CMD_CERT_CFG = 0xb7,
MCU_EXT_CMD_CSI_CTRL = 0xc2,
};
diff --git a/mt7915/init.c b/mt7915/init.c
index f73d34a9..19447ad1 100644
--- a/mt7915/init.c
+++ b/mt7915/init.c
@@ -681,7 +681,7 @@ static void mt7915_init_work(struct work_struct *work)
struct mt7915_dev *dev = container_of(work, struct mt7915_dev,
init_work);
- mt7915_mcu_set_eeprom(dev);
+ mt7915_mcu_set_eeprom(dev, dev->flash_mode);
mt7915_mac_init(dev);
mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband);
mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband);
diff --git a/mt7915/mac.c b/mt7915/mac.c
index de2bdba5..1460a32b 100644
--- a/mt7915/mac.c
+++ b/mt7915/mac.c
@@ -627,16 +627,38 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
{
#ifdef CONFIG_NL80211_TESTMODE
struct mt76_testmode_data *td = &phy->mt76->test;
+ struct mt76_testmode_entry_data *ed;
+ struct mt76_wcid *wcid;
const struct ieee80211_rate *r;
- u8 bw, mode, nss = td->tx_rate_nss;
- u8 rate_idx = td->tx_rate_idx;
+ u8 bw, mode, nss, rate_idx, ldpc;
u16 rateval = 0;
u32 val;
bool cck = false;
int band;
- if (skb != phy->mt76->test.tx_skb)
+ txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
+ txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
+ phy->test.spe_idx));
+
+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU) {
+ txwi[1] |= cpu_to_le32(BIT(18));
+ txwi[2] = 0;
+ txwi[3] &= ~cpu_to_le32(MT_TXD3_NO_ACK);
+ le32p_replace_bits(&txwi[3], 0x1f, MT_TXD3_REM_TX_COUNT);
+
return;
+ }
+
+ mt76_tm_for_each_entry(phy->mt76, wcid, ed)
+ if (ed->tx_skb == skb)
+ break;
+
+ if (!ed)
+ return;
+
+ nss = ed->tx_rate_nss;
+ rate_idx = ed->tx_rate_idx;
+ ldpc = ed->tx_rate_ldpc;
switch (td->tx_rate_mode) {
case MT76_TM_TX_MODE_HT:
@@ -667,7 +689,7 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
rate_idx += 4;
r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx];
- val = cck ? r->hw_value_short : r->hw_value;
+ val = r->hw_value;
mode = val >> 8;
rate_idx = val & 0xff;
@@ -726,13 +748,14 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
if (mode >= MT_PHY_TYPE_HE_SU)
val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf);
- if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
+ if (ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
val |= MT_TXD6_LDPC;
txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
+ if (phy->test.bf_en)
+ val |= MT_TXD6_TX_IBF | MT_TXD6_TX_EBF;
+
txwi[6] |= cpu_to_le32(val);
- txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
- phy->test.spe_idx));
#endif
}
@@ -1479,7 +1502,7 @@ mt7915_mac_restart(struct mt7915_dev *dev)
goto out;
/* set the necessary init items */
- ret = mt7915_mcu_set_eeprom(dev);
+ ret = mt7915_mcu_set_eeprom(dev, dev->flash_mode);
if (ret)
goto out;
diff --git a/mt7915/main.c b/mt7915/main.c
index e4d1c27b..ea0d22fe 100644
--- a/mt7915/main.c
+++ b/mt7915/main.c
@@ -238,7 +238,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
mvif->phy = phy;
mvif->mt76.band_idx = phy->mt76->band_idx;
- mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
+ mvif->mt76.wmm_idx = (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_MONITOR);
if (ext_phy)
mvif->mt76.wmm_idx += 2;
diff --git a/mt7915/mcu.c b/mt7915/mcu.c
index 6ec12fd2..4d878665 100644
--- a/mt7915/mcu.c
+++ b/mt7915/mcu.c
@@ -383,6 +383,11 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
case MCU_EXT_EVENT_BCC_NOTIFY:
mt7915_mcu_rx_bcc_notify(dev, skb);
break;
+#ifdef CONFIG_NL80211_TESTMODE
+ case MCU_EXT_EVENT_BF_STATUS_READ:
+ mt7915_tm_txbf_status_read(dev, skb);
+ break;
+#endif
default:
break;
}
@@ -414,6 +419,7 @@ void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb)
rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP ||
rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC ||
rxd->ext_eid == MCU_EXT_EVENT_BCC_NOTIFY ||
+ rxd->ext_eid == MCU_EXT_EVENT_BF_STATUS_READ ||
!rxd->seq)
mt7915_mcu_rx_unsolicited_event(dev, skb);
else
@@ -2847,14 +2853,14 @@ static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev)
return 0;
}
-int mt7915_mcu_set_eeprom(struct mt7915_dev *dev)
+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev, bool flash_mode)
{
struct mt7915_mcu_eeprom req = {
.buffer_mode = EE_MODE_EFUSE,
.format = EE_FORMAT_WHOLE,
};
- if (dev->flash_mode)
+ if (flash_mode)
return mt7915_mcu_set_eeprom_flash(dev);
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_BUFFER_MODE),
diff --git a/mt7915/mcu.h b/mt7915/mcu.h
index a4b7ef82..1671d563 100644
--- a/mt7915/mcu.h
+++ b/mt7915/mcu.h
@@ -8,10 +8,15 @@
enum {
MCU_ATE_SET_TRX = 0x1,
+ MCU_ATE_SET_TSSI = 0x5,
+ MCU_ATE_SET_DPD = 0x6,
+ MCU_ATE_SET_RATE_POWER_OFFSET = 0x7,
+ MCU_ATE_SET_THERMAL_COMP = 0x8,
MCU_ATE_SET_FREQ_OFFSET = 0xa,
MCU_ATE_SET_PHY_COUNT = 0x11,
MCU_ATE_SET_SLOT_TIME = 0x13,
MCU_ATE_CLEAN_TXQUEUE = 0x1c,
+ MCU_ATE_SET_MU_RX_AID = 0x1e,
};
struct mt7915_mcu_thermal_ctrl {
@@ -472,6 +477,12 @@ enum {
enum {
MT_BF_SOUNDING_ON = 1,
+ MT_BF_DATA_PACKET_APPLY = 2,
+ MT_BF_PFMU_TAG_READ = 5,
+ MT_BF_PFMU_TAG_WRITE = 6,
+ MT_BF_PHASE_CAL = 14,
+ MT_BF_IBF_PHASE_COMP = 15,
+ MT_BF_PROFILE_WRITE_ALL = 17,
MT_BF_TYPE_UPDATE = 20,
MT_BF_MODULE_UPDATE = 25
};
@@ -718,10 +729,19 @@ struct mt7915_muru {
#define MURU_OFDMA_SCH_TYPE_UL BIT(1)
/* Common Config */
-#define MURU_COMM_PPDU_FMT BIT(0)
-#define MURU_COMM_SCH_TYPE BIT(1)
-#define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_SCH_TYPE)
-/* DL&UL User config*/
+/* #define MURU_COMM_PPDU_FMT BIT(0) */
+/* #define MURU_COMM_SCH_TYPE BIT(1) */
+/* #define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_SCH_TYPE) */
+#define MURU_COMM_PPDU_FMT BIT(0)
+#define MURU_COMM_SCH_TYPE BIT(1)
+#define MURU_COMM_BAND BIT(2)
+#define MURU_COMM_WMM BIT(3)
+#define MURU_COMM_SPE_IDX BIT(4)
+#define MURU_COMM_PROC_TYPE BIT(5)
+#define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_BAND | \
+ MURU_COMM_WMM | MURU_COMM_SPE_IDX)
+
+/* DL&UL User config */
#define MURU_USER_CNT BIT(4)
enum {
diff --git a/mt7915/mmio.c b/mt7915/mmio.c
index 6d8455d5..9a666d0f 100644
--- a/mt7915/mmio.c
+++ b/mt7915/mmio.c
@@ -134,6 +134,7 @@ static const u32 mt7915_offs[] = {
[ARB_DRNGR0] = 0x194,
[ARB_SCR] = 0x080,
[RMAC_MIB_AIRTIME14] = 0x3b8,
+ [AGG_AALCR0] = 0x048,
[AGG_AWSCR0] = 0x05c,
[AGG_PCR0] = 0x06c,
[AGG_ACR0] = 0x084,
@@ -209,6 +210,7 @@ static const u32 mt7916_offs[] = {
[ARB_DRNGR0] = 0x1e0,
[ARB_SCR] = 0x000,
[RMAC_MIB_AIRTIME14] = 0x0398,
+ [AGG_AALCR0] = 0x028,
[AGG_AWSCR0] = 0x030,
[AGG_PCR0] = 0x040,
[AGG_ACR0] = 0x054,
diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
index 018fd23e..c45e42c5 100644
--- a/mt7915/mt7915.h
+++ b/mt7915/mt7915.h
@@ -321,6 +321,9 @@ struct mt7915_phy {
u8 last_snr;
u8 spe_idx;
+
+ bool bf_en;
+ bool bf_ever_en;
} test;
#endif
@@ -420,6 +423,14 @@ struct mt7915_dev {
void __iomem *dcm;
void __iomem *sku;
+#ifdef CONFIG_NL80211_TESTMODE
+ struct {
+ void *txbf_phase_cal;
+ void *txbf_pfmu_data;
+ void *txbf_pfmu_tag;
+ } test;
+#endif
+
#ifdef MTK_DEBUG
u16 wlan_idx;
struct {
@@ -591,7 +602,7 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
void *data, u32 field);
-int mt7915_mcu_set_eeprom(struct mt7915_dev *dev);
+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev, bool flash_mode);
int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset);
int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num);
int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable,
@@ -629,6 +640,7 @@ int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl);
int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level);
void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb);
void mt7915_mcu_exit(struct mt7915_dev *dev);
+int mt7915_tm_txbf_status_read(struct mt7915_dev *dev, struct sk_buff *skb);
static inline u16 mt7915_wtbl_size(struct mt7915_dev *dev)
{
diff --git a/mt7915/regs.h b/mt7915/regs.h
index d6a05f13..e8768488 100644
--- a/mt7915/regs.h
+++ b/mt7915/regs.h
@@ -62,6 +62,7 @@ enum offs_rev {
ARB_DRNGR0,
ARB_SCR,
RMAC_MIB_AIRTIME14,
+ AGG_AALCR0,
AGG_AWSCR0,
AGG_PCR0,
AGG_ACR0,
@@ -482,6 +483,8 @@ enum offs_rev {
#define MT_WF_AGG_BASE(_band) ((_band) ? 0x820f2000 : 0x820e2000)
#define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
+#define MT_AGG_AALCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AALCR0) + \
+ (_n) * 4))
#define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AWSCR0) + \
(_n) * 4))
#define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_PCR0) + \
diff --git a/mt7915/testmode.c b/mt7915/testmode.c
index 46939191..e0ba088f 100644
--- a/mt7915/testmode.c
+++ b/mt7915/testmode.c
@@ -9,6 +9,9 @@
enum {
TM_CHANGED_TXPOWER,
TM_CHANGED_FREQ_OFFSET,
+ TM_CHANGED_AID,
+ TM_CHANGED_CFG,
+ TM_CHANGED_TXBF_ACT,
/* must be last */
NUM_TM_CHANGED
@@ -17,6 +20,9 @@ enum {
static const u8 tm_change_map[] = {
[TM_CHANGED_TXPOWER] = MT76_TM_ATTR_TX_POWER,
[TM_CHANGED_FREQ_OFFSET] = MT76_TM_ATTR_FREQ_OFFSET,
+ [TM_CHANGED_AID] = MT76_TM_ATTR_AID,
+ [TM_CHANGED_CFG] = MT76_TM_ATTR_CFG,
+ [TM_CHANGED_TXBF_ACT] = MT76_TM_ATTR_TXBF_ACT,
};
struct reg_band {
@@ -33,6 +39,38 @@ struct reg_band {
#define TM_REG_MAX_ID 20
static struct reg_band reg_backup_list[TM_REG_MAX_ID];
+static void mt7915_tm_update_entry(struct mt7915_phy *phy);
+
+static u8 mt7915_tm_chan_bw(enum nl80211_chan_width width)
+{
+ static const u8 width_to_bw[] = {
+ [NL80211_CHAN_WIDTH_40] = TM_CBW_40MHZ,
+ [NL80211_CHAN_WIDTH_80] = TM_CBW_80MHZ,
+ [NL80211_CHAN_WIDTH_80P80] = TM_CBW_8080MHZ,
+ [NL80211_CHAN_WIDTH_160] = TM_CBW_160MHZ,
+ [NL80211_CHAN_WIDTH_5] = TM_CBW_5MHZ,
+ [NL80211_CHAN_WIDTH_10] = TM_CBW_10MHZ,
+ [NL80211_CHAN_WIDTH_20] = TM_CBW_20MHZ,
+ [NL80211_CHAN_WIDTH_20_NOHT] = TM_CBW_20MHZ,
+ };
+
+ if (width >= ARRAY_SIZE(width_to_bw))
+ return 0;
+
+ return width_to_bw[width];
+}
+
+static void
+mt7915_tm_update_channel(struct mt7915_phy *phy)
+{
+ mutex_unlock(&phy->dev->mt76.mutex);
+ mt7915_set_channel(phy);
+ mutex_lock(&phy->dev->mt76.mutex);
+
+ mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
+
+ mt7915_tm_update_entry(phy);
+}
static int
mt7915_tm_set_tx_power(struct mt7915_phy *phy)
@@ -119,18 +157,28 @@ mt7915_tm_set_trx(struct mt7915_phy *phy, int type, bool en)
}
static int
-mt7915_tm_clean_hwq(struct mt7915_phy *phy, u8 wcid)
+mt7915_tm_clean_hwq(struct mt7915_phy *phy)
{
+ struct mt76_testmode_entry_data *ed;
+ struct mt76_wcid *wcid;
struct mt7915_dev *dev = phy->dev;
struct mt7915_tm_cmd req = {
.testmode_en = 1,
.param_idx = MCU_ATE_CLEAN_TXQUEUE,
- .param.clean.wcid = wcid,
.param.clean.band = phy->mt76->band_idx,
};
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
- sizeof(req), false);
+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
+ int ret;
+
+ req.param.clean.wcid = wcid->idx;
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL),
+ &req, sizeof(req), false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int
@@ -141,7 +189,7 @@ mt7915_tm_set_phy_count(struct mt7915_phy *phy, u8 control)
.testmode_en = 1,
.param_idx = MCU_ATE_SET_PHY_COUNT,
.param.cfg.enable = control,
- .param.cfg.band = phy != &dev->phy,
+ .param.cfg.band = phy->mt76->band_idx,
};
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
@@ -182,12 +230,738 @@ mt7915_tm_set_tam_arb(struct mt7915_phy *phy, bool enable, bool mu)
return mt7915_mcu_set_muru_ctrl(dev, MURU_SET_ARB_OP_MODE, op_mode);
}
+static int
+mt7915_tm_set_cfg(struct mt7915_phy *phy)
+{
+ static const u8 cfg_cmd[] = {
+ [MT76_TM_CFG_TSSI] = MCU_ATE_SET_TSSI,
+ [MT76_TM_CFG_DPD] = MCU_ATE_SET_DPD,
+ [MT76_TM_CFG_RATE_POWER_OFFSET] = MCU_ATE_SET_RATE_POWER_OFFSET,
+ [MT76_TM_CFG_THERMAL_COMP] = MCU_ATE_SET_THERMAL_COMP,
+ };
+ struct mt76_testmode_data *td = &phy->mt76->test;
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_tm_cmd req = {
+ .testmode_en = !(phy->mt76->test.state == MT76_TM_STATE_OFF),
+ .param_idx = cfg_cmd[td->cfg.type],
+ .param.cfg.enable = td->cfg.enable,
+ .param.cfg.band = phy->mt76->band_idx,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
+ sizeof(req), false);
+}
+
+static int
+mt7915_tm_add_txbf(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u8 pfmu_idx, u8 nr,
+ u8 nc, bool ebf)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_dev *dev = phy->dev;
+ struct sk_buff *skb;
+ struct sta_rec_bf *bf;
+ struct tlv *tlv;
+ u8 ndp_rate;
+
+ if (nr == 1)
+ ndp_rate = 8;
+ else if (nr == 2)
+ ndp_rate = 16;
+ else
+ ndp_rate = 24;
+
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
+ bf = (struct sta_rec_bf *)tlv;
+
+ bf->pfmu = cpu_to_le16(pfmu_idx);
+ bf->sounding_phy = 1;
+ bf->bf_cap = ebf;
+ bf->ncol = nc;
+ bf->nrow = nr;
+ bf->ndp_rate = ndp_rate;
+ bf->ibf_timeout = 0xff;
+ bf->tx_mode = MT_PHY_TYPE_HT;
+
+ if (ebf) {
+ bf->mem[0].row = 0;
+ bf->mem[1].row = 1;
+ bf->mem[2].row = 2;
+ bf->mem[3].row = 3;
+ } else {
+ bf->mem[0].row = 4;
+ bf->mem[1].row = 5;
+ bf->mem[2].row = 6;
+ bf->mem[3].row = 7;
+ }
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
+}
+
+static int
+mt7915_tm_entry_add(struct mt7915_phy *phy, u8 aid)
+{
+ struct mt76_testmode_data *td = &phy->mt76->test;
+ struct mt76_testmode_entry_data *ed;
+ struct ieee80211_sband_iftype_data *sdata;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_sta *sta;
+ struct mt7915_sta *msta;
+ int tid, ret;
+
+ if (td->entry_num >= MT76_TM_MAX_ENTRY_NUM)
+ return -EINVAL;
+
+ sta = kzalloc(sizeof(*sta) + phy->mt76->hw->sta_data_size +
+ sizeof(*ed), GFP_KERNEL);
+ if (!sta)
+ return -ENOMEM;
+
+ msta = (struct mt7915_sta *)sta->drv_priv;
+ ed = mt76_testmode_entry_data(phy->mt76, &msta->wcid);
+ memcpy(ed, &td->ed, sizeof(*ed));
+
+ if (phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ) {
+ sband = &phy->mt76->sband_5g.sband;
+ sdata = phy->iftype[NL80211_BAND_5GHZ];
+ } else if (phy->mt76->chandef.chan->band == NL80211_BAND_6GHZ) {
+ sband = &phy->mt76->sband_6g.sband;
+ sdata = phy->iftype[NL80211_BAND_6GHZ];
+ } else {
+ sband = &phy->mt76->sband_2g.sband;
+ sdata = phy->iftype[NL80211_BAND_2GHZ];
+ }
+
+ memcpy(sta->addr, ed->addr[0], ETH_ALEN);
+ if (phy->test.bf_en) {
+ u8 addr[ETH_ALEN] = {0x00, 0x11, 0x11, 0x11, 0x11, 0x11};
+
+ memcpy(sta->addr, addr, ETH_ALEN);
+ }
+
+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_HT)
+ memcpy(&sta->deflink.ht_cap, &sband->ht_cap, sizeof(sta->deflink.ht_cap));
+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_VHT)
+ memcpy(&sta->deflink.vht_cap, &sband->vht_cap, sizeof(sta->deflink.vht_cap));
+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_HE_SU)
+ memcpy(&sta->deflink.he_cap, &sdata[NL80211_IFTYPE_STATION].he_cap,
+ sizeof(sta->deflink.he_cap));
+ sta->aid = aid;
+ sta->wme = 1;
+
+ ret = mt7915_mac_sta_add(&phy->dev->mt76, phy->monitor_vif, sta);
+ if (ret) {
+ kfree(sta);
+ return ret;
+ }
+
+ /* prevent from starting tx ba session */
+ for (tid = 0; tid < 8; tid++)
+ set_bit(tid, &msta->ampdu_state);
+
+ list_add_tail(&msta->wcid.list, &td->tm_entry_list);
+ td->entry_num++;
+
+ return 0;
+}
+
+static void
+mt7915_tm_entry_remove(struct mt7915_phy *phy, u8 aid)
+{
+ struct mt76_testmode_data *td = &phy->mt76->test;
+ struct mt76_wcid *wcid, *tmp;
+
+ if (list_empty(&td->tm_entry_list))
+ return;
+
+ list_for_each_entry_safe(wcid, tmp, &td->tm_entry_list, list) {
+ struct mt76_testmode_entry_data *ed;
+ struct mt7915_dev *dev = phy->dev;
+ struct ieee80211_sta *sta;
+
+ ed = mt76_testmode_entry_data(phy->mt76, wcid);
+ if (aid && ed->aid != aid)
+ continue;
+
+ sta = wcid_to_sta(wcid);
+ mt7915_mac_sta_remove(&dev->mt76, phy->monitor_vif, sta);
+ mt76_wcid_mask_clear(dev->mt76.wcid_mask, wcid->idx);
+
+ list_del_init(&wcid->list);
+ kfree(sta);
+ phy->mt76->test.entry_num--;
+ }
+}
+
+static int
+mt7915_tm_set_entry(struct mt7915_phy *phy)
+{
+ struct mt76_testmode_data *td = &phy->mt76->test;
+ struct mt76_testmode_entry_data *ed;
+ struct mt76_wcid *wcid;
+
+ if (!td->aid) {
+ if (td->state > MT76_TM_STATE_IDLE)
+ mt76_testmode_set_state(phy->mt76, MT76_TM_STATE_IDLE);
+ mt7915_tm_entry_remove(phy, td->aid);
+ return 0;
+ }
+
+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
+ if (ed->aid == td->aid) {
+ struct sk_buff *skb;
+
+ local_bh_disable();
+ skb = ed->tx_skb;
+ memcpy(ed, &td->ed, sizeof(*ed));
+ ed->tx_skb = skb;
+ local_bh_enable();
+
+ return 0;
+ }
+ }
+
+ return mt7915_tm_entry_add(phy, td->aid);
+}
+
+static void
+mt7915_tm_update_entry(struct mt7915_phy *phy)
+{
+ struct mt76_testmode_data *td = &phy->mt76->test;
+ struct mt76_testmode_entry_data *ed, tmp;
+ struct mt76_wcid *wcid, *last;
+
+ if (!td->aid || phy->test.bf_en)
+ return;
+
+ memcpy(&tmp, &td->ed, sizeof(tmp));
+ last = list_last_entry(&td->tm_entry_list,
+ struct mt76_wcid, list);
+
+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
+ memcpy(&td->ed, ed, sizeof(td->ed));
+ mt7915_tm_entry_remove(phy, td->aid);
+ mt7915_tm_entry_add(phy, td->aid);
+ if (wcid == last)
+ break;
+ }
+
+ memcpy(&td->ed, &tmp, sizeof(td->ed));
+}
+
+static int
+mt7915_tm_txbf_init(struct mt7915_phy *phy, u16 *val)
+{
+ struct mt76_testmode_data *td = &phy->mt76->test;
+ struct mt7915_dev *dev = phy->dev;
+ bool enable = val[0];
+ void *phase_cal, *pfmu_data, *pfmu_tag;
+ u8 addr[ETH_ALEN] = {0x00, 0x22, 0x22, 0x22, 0x22, 0x22};
+
+ if (!enable) {
+ phy->test.bf_en = 0;
+ return 0;
+ }
+
+ if (!dev->test.txbf_phase_cal) {
+ phase_cal = devm_kzalloc(dev->mt76.dev,
+ sizeof(struct mt7915_tm_txbf_phase) *
+ MAX_PHASE_GROUP_NUM,
+ GFP_KERNEL);
+ if (!phase_cal)
+ return -ENOMEM;
+
+ dev->test.txbf_phase_cal = phase_cal;
+ }
+
+ if (!dev->test.txbf_pfmu_data) {
+ pfmu_data = devm_kzalloc(dev->mt76.dev, 512, GFP_KERNEL);
+ if (!pfmu_data)
+ return -ENOMEM;
+
+ dev->test.txbf_pfmu_data = pfmu_data;
+ }
+
+ if (!dev->test.txbf_pfmu_tag) {
+ pfmu_tag = devm_kzalloc(dev->mt76.dev,
+ sizeof(struct mt7915_tm_pfmu_tag), GFP_KERNEL);
+ if (!pfmu_tag)
+ return -ENOMEM;
+
+ dev->test.txbf_pfmu_tag = pfmu_tag;
+ }
+
+ memcpy(phy->monitor_vif->addr, addr, ETH_ALEN);
+ mt7915_mcu_add_dev_info(phy, phy->monitor_vif, true);
+
+ td->tx_rate_mode = MT76_TM_TX_MODE_HT;
+ td->tx_mpdu_len = 1024;
+ td->tx_rate_sgi = 0;
+ td->tx_ipg = 100;
+ phy->test.bf_en = 1;
+
+ return mt7915_tm_set_trx(phy, TM_MAC_TX, true);
+}
+
+static int
+mt7915_tm_txbf_phase_comp(struct mt7915_phy *phy, u16 *val)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct {
+ u8 category;
+ u8 wlan_idx_lo;
+ u8 bw;
+ u8 jp_band;
+ u8 dbdc_idx;
+ bool read_from_e2p;
+ bool disable;
+ u8 wlan_idx_hi;
+ u8 buf[40];
+ } __packed req = {
+ .category = MT_BF_IBF_PHASE_COMP,
+ .bw = val[0],
+ .jp_band = (val[2] == 1) ? 1 : 0,
+ .dbdc_idx = phy->mt76->band_idx,
+ .read_from_e2p = val[3],
+ .disable = val[4],
+ };
+ struct mt7915_tm_txbf_phase *phase =
+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
+
+ wait_event_timeout(dev->mt76.tx_wait, phase[val[2]].status != 0, HZ);
+ memcpy(req.buf, &phase[val[2]].phase, sizeof(req.buf));
+
+ pr_info("ibf cal process: phase comp info\n");
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1,
+ &req, sizeof(req), 0);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
+ sizeof(req), true);
+}
+
+static int
+mt7915_tm_txbf_profile_tag_read(struct mt7915_phy *phy, u8 pfmu_idx)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct {
+ u8 format_id;
+ u8 pfmu_idx;
+ bool bfer;
+ u8 dbdc_idx;
+ } __packed req = {
+ .format_id = MT_BF_PFMU_TAG_READ,
+ .pfmu_idx = pfmu_idx,
+ .bfer = 1,
+ .dbdc_idx = phy != &dev->phy,
+ };
+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
+
+ tag->t1.pfmu_idx = 0;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
+ sizeof(req), true);
+}
+
+static int
+mt7915_tm_txbf_profile_tag_write(struct mt7915_phy *phy, u8 pfmu_idx,
+ struct mt7915_tm_pfmu_tag *tag)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct {
+ u8 format_id;
+ u8 pfmu_idx;
+ bool bfer;
+ u8 dbdc_idx;
+ u8 buf[64];
+ } __packed req = {
+ .format_id = MT_BF_PFMU_TAG_WRITE,
+ .pfmu_idx = pfmu_idx,
+ .bfer = 1,
+ .dbdc_idx = phy != &dev->phy,
+ };
+
+ memcpy(req.buf, tag, sizeof(*tag));
+ wait_event_timeout(dev->mt76.tx_wait, tag->t1.pfmu_idx != 0, HZ);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
+ sizeof(req), false);
+}
+
+static int
+mt7915_tm_txbf_apply_tx(struct mt7915_phy *phy, u16 wlan_idx, bool ebf,
+ bool ibf, bool phase_cal)
+{
+#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
+#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
+ struct mt7915_dev *dev = phy->dev;
+ struct {
+ u8 category;
+ u8 wlan_idx_lo;
+ bool ebf;
+ bool ibf;
+ bool mu_txbf;
+ bool phase_cal;
+ u8 wlan_idx_hi;
+ u8 _rsv;
+ } __packed req = {
+ .category = MT_BF_DATA_PACKET_APPLY,
+ .wlan_idx_lo = to_wcid_lo(wlan_idx),
+ .ebf = ebf,
+ .ibf = ibf,
+ .phase_cal = phase_cal,
+ .wlan_idx_hi = to_wcid_hi(wlan_idx),
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
+ sizeof(req), false);
+}
+
+static int mt7915_tm_txbf_set_rate(struct mt7915_phy *phy,
+ struct mt76_wcid *wcid)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt76_testmode_entry_data *ed = mt76_testmode_entry_data(phy->mt76, wcid);
+ struct ieee80211_sta *sta = wcid_to_sta(wcid);
+ struct sta_phy rate = {};
+
+ if (!sta)
+ return 0;
+
+ rate.type = MT_PHY_TYPE_HT;
+ rate.bw = mt7915_tm_chan_bw(phy->mt76->chandef.width);
+ rate.nss = ed->tx_rate_nss;
+ rate.mcs = ed->tx_rate_idx;
+ rate.ldpc = (rate.bw || ed->tx_rate_ldpc) * GENMASK(2, 0);
+
+ return mt7915_mcu_set_fixed_rate_ctrl(dev, phy->monitor_vif, sta,
+ &rate, RATE_PARAM_FIXED);
+}
+
+static int
+mt7915_tm_txbf_set_tx(struct mt7915_phy *phy, u16 *val)
+{
+ bool bf_on = val[0], update = val[3];
+ /* u16 wlan_idx = val[2]; */
+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
+ struct mt76_testmode_data *td = &phy->mt76->test;
+ struct mt76_wcid *wcid;
+
+ if (bf_on) {
+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
+ mt7915_tm_txbf_profile_tag_read(phy, 2);
+ tag->t1.invalid_prof = false;
+ mt7915_tm_txbf_profile_tag_write(phy, 2, tag);
+
+ phy->test.bf_ever_en = true;
+
+ if (update)
+ mt7915_tm_txbf_apply_tx(phy, 1, 0, 1, 1);
+ } else {
+ if (!phy->test.bf_ever_en) {
+ if (update)
+ mt7915_tm_txbf_apply_tx(phy, 1, 0, 0, 0);
+ } else {
+ phy->test.bf_ever_en = false;
+
+ mt7915_tm_txbf_profile_tag_read(phy, 2);
+ tag->t1.invalid_prof = true;
+ mt7915_tm_txbf_profile_tag_write(phy, 2, tag);
+ }
+ }
+
+ wcid = list_first_entry(&td->tm_entry_list, struct mt76_wcid, list);
+ mt7915_tm_txbf_set_rate(phy, wcid);
+
+ return 0;
+}
+
+static int
+mt7915_tm_txbf_profile_update(struct mt7915_phy *phy, u16 *val, bool ebf)
+{
+ static const u8 mode_to_lm[] = {
+ [MT76_TM_TX_MODE_CCK] = 0,
+ [MT76_TM_TX_MODE_OFDM] = 0,
+ [MT76_TM_TX_MODE_HT] = 1,
+ [MT76_TM_TX_MODE_VHT] = 2,
+ [MT76_TM_TX_MODE_HE_SU] = 3,
+ [MT76_TM_TX_MODE_HE_EXT_SU] = 3,
+ [MT76_TM_TX_MODE_HE_TB] = 3,
+ [MT76_TM_TX_MODE_HE_MU] = 3,
+ };
+ struct mt76_testmode_data *td = &phy->mt76->test;
+ struct mt76_wcid *wcid;
+ struct ieee80211_vif *vif = phy->monitor_vif;
+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
+ u8 pfmu_idx = val[0], nc = val[2], nr;
+ int ret;
+
+ if (td->tx_antenna_mask == 3)
+ nr = 1;
+ else if (td->tx_antenna_mask == 7)
+ nr = 2;
+ else
+ nr = 3;
+
+ memset(tag, 0, sizeof(*tag));
+ tag->t1.pfmu_idx = pfmu_idx;
+ tag->t1.ebf = ebf;
+ tag->t1.nr = nr;
+ tag->t1.nc = nc;
+ tag->t1.invalid_prof = true;
+
+ tag->t1.snr_sts4 = 0xc0;
+ tag->t1.snr_sts5 = 0xff;
+ tag->t1.snr_sts6 = 0xff;
+ tag->t1.snr_sts7 = 0xff;
+
+ if (ebf) {
+ tag->t1.row_id1 = 0;
+ tag->t1.row_id2 = 1;
+ tag->t1.row_id3 = 2;
+ tag->t1.row_id4 = 3;
+ tag->t1.lm = mode_to_lm[MT76_TM_TX_MODE_HT];
+ } else {
+ tag->t1.row_id1 = 4;
+ tag->t1.row_id2 = 5;
+ tag->t1.row_id3 = 6;
+ tag->t1.row_id4 = 7;
+ tag->t1.lm = mode_to_lm[MT76_TM_TX_MODE_OFDM];
+
+ tag->t2.ibf_timeout = 0xff;
+ tag->t2.ibf_nr = nr;
+ }
+
+ ret = mt7915_tm_txbf_profile_tag_write(phy, pfmu_idx, tag);
+ if (ret)
+ return ret;
+
+ wcid = list_first_entry(&td->tm_entry_list, struct mt76_wcid, list);
+ ret = mt7915_tm_add_txbf(phy, vif, wcid_to_sta(wcid), pfmu_idx, nr, nc, ebf);
+ if (ret)
+ return ret;
+
+ if (!ebf)
+ return mt7915_tm_txbf_apply_tx(phy, 1, false, true, true);
+
+ return 0;
+}
+
+static int
+mt7915_tm_txbf_phase_cal(struct mt7915_phy *phy, u16 *val)
+{
+#define GROUP_L 0
+#define GROUP_M 1
+#define GROUP_H 2
+ struct mt7915_dev *dev = phy->dev;
+ struct {
+ u8 category;
+ u8 group_l_m_n;
+ u8 group;
+ bool sx2;
+ u8 cal_type;
+ u8 lna_gain_level;
+ u8 _rsv[2];
+ } __packed req = {
+ .category = MT_BF_PHASE_CAL,
+ .group = val[0],
+ .group_l_m_n = val[1],
+ .sx2 = val[2],
+ .cal_type = val[3],
+ .lna_gain_level = 0, /* for test purpose */
+ };
+ struct mt7915_tm_txbf_phase *phase =
+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
+
+ phase[req.group].status = 0;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
+ sizeof(req), true);
+}
+
+int mt7915_tm_txbf_status_read(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+#define BF_PFMU_TAG 16
+#define BF_CAL_PHASE 21
+ u8 format_id;
+
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
+ format_id = *(u8 *)skb->data;
+
+ if (format_id == BF_PFMU_TAG) {
+ struct mt7915_tm_pfmu_tag *tag = dev->test.txbf_pfmu_tag;
+
+ skb_pull(skb, 8);
+ memcpy(tag, skb->data, sizeof(struct mt7915_tm_pfmu_tag));
+ } else if (format_id == BF_CAL_PHASE) {
+ struct mt7915_tm_ibf_cal_info *cal;
+ struct mt7915_tm_txbf_phase *phase =
+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
+
+ cal = (struct mt7915_tm_ibf_cal_info *)skb->data;
+ switch (cal->cal_type) {
+ case IBF_PHASE_CAL_NORMAL:
+ case IBF_PHASE_CAL_NORMAL_INSTRUMENT:
+ if (cal->group_l_m_n != GROUP_M)
+ break;
+ phase = &phase[cal->group];
+ memcpy(&phase->phase, cal->buf + 16, sizeof(phase->phase));
+ phase->status = cal->status;
+ break;
+ case IBF_PHASE_CAL_VERIFY:
+ case IBF_PHASE_CAL_VERIFY_INSTRUMENT:
+ break;
+ default:
+ break;
+ }
+ }
+
+ wake_up(&dev->mt76.tx_wait);
+
+ return 0;
+}
+
+static int
+mt7915_tm_txbf_profile_update_all(struct mt7915_phy *phy, u16 *val)
+{
+ struct mt76_testmode_data *td = &phy->mt76->test;
+ u16 pfmu_idx = val[0];
+ u16 subc_id = val[1];
+ u16 angle11 = val[2];
+ u16 angle21 = val[3];
+ u16 angle31 = val[4];
+ u16 angle41 = val[5];
+ s16 phi11 = 0, phi21 = 0, phi31 = 0;
+ struct mt7915_tm_pfmu_data *pfmu_data;
+
+ if (subc_id > 63)
+ return -EINVAL;
+
+ if (td->tx_antenna_mask == 2) {
+ phi11 = (s16)(angle21 - angle11);
+ } else if (td->tx_antenna_mask == 3) {
+ phi11 = (s16)(angle31 - angle11);
+ phi21 = (s16)(angle31 - angle21);
+ } else {
+ phi11 = (s16)(angle41 - angle11);
+ phi21 = (s16)(angle41 - angle21);
+ phi31 = (s16)(angle41 - angle31);
+ }
+
+ pfmu_data = (struct mt7915_tm_pfmu_data *)phy->dev->test.txbf_pfmu_data;
+ pfmu_data = &pfmu_data[subc_id];
+
+ if (subc_id < 32)
+ pfmu_data->subc_idx = cpu_to_le16(subc_id + 224);
+ else
+ pfmu_data->subc_idx = cpu_to_le16(subc_id - 32);
+ pfmu_data->phi11 = cpu_to_le16(phi11);
+ pfmu_data->phi21 = cpu_to_le16(phi21);
+ pfmu_data->phi31 = cpu_to_le16(phi31);
+
+ if (subc_id == 63) {
+ struct mt7915_dev *dev = phy->dev;
+ struct {
+ u8 format_id;
+ u8 pfmu_idx;
+ u8 dbdc_idx;
+ u8 _rsv;
+ u8 buf[512];
+ } __packed req = {
+ .format_id = MT_BF_PROFILE_WRITE_ALL,
+ .pfmu_idx = pfmu_idx,
+ .dbdc_idx = phy != &dev->phy,
+ };
+
+ memcpy(req.buf, dev->test.txbf_pfmu_data, 512);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION),
+ &req, sizeof(req), true);
+ }
+
+ return 0;
+}
+
+static int
+mt7915_tm_txbf_e2p_update(struct mt7915_phy *phy)
+{
+ struct mt7915_tm_txbf_phase *phase, *p;
+ struct mt7915_dev *dev = phy->dev;
+ u8 *eeprom = dev->mt76.eeprom.data;
+ u16 offset;
+ bool is_7976;
+ int i;
+
+ is_7976 = mt7915_check_adie(dev, false) || is_mt7916(&dev->mt76);
+ offset = is_7976 ? 0x60a : 0x651;
+
+ phase = (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
+ for (i = 0; i < MAX_PHASE_GROUP_NUM; i++) {
+ p = &phase[i];
+
+ if (!p->status)
+ continue;
+
+ /* copy phase cal data to eeprom */
+ memcpy(eeprom + offset + i * sizeof(p->phase), &p->phase,
+ sizeof(p->phase));
+ }
+
+ return 0;
+}
+
+static int
+mt7915_tm_set_txbf(struct mt7915_phy *phy)
+{
+ struct mt76_testmode_data *td = &phy->mt76->test;
+ u16 *val = td->txbf_param;
+
+ pr_info("ibf cal process: act = %u, val = %u, %u, %u, %u, %u\n",
+ td->txbf_act, val[0], val[1], val[2], val[3], val[4]);
+
+ switch (td->txbf_act) {
+ case MT76_TM_TXBF_ACT_INIT:
+ return mt7915_tm_txbf_init(phy, val);
+ case MT76_TM_TXBF_ACT_UPDATE_CH:
+ mt7915_tm_update_channel(phy);
+ break;
+ case MT76_TM_TXBF_ACT_PHASE_COMP:
+ return mt7915_tm_txbf_phase_comp(phy, val);
+ case MT76_TM_TXBF_ACT_TX_PREP:
+ return mt7915_tm_txbf_set_tx(phy, val);
+ case MT76_TM_TXBF_ACT_IBF_PROF_UPDATE:
+ return mt7915_tm_txbf_profile_update(phy, val, false);
+ case MT76_TM_TXBF_ACT_EBF_PROF_UPDATE:
+ return mt7915_tm_txbf_profile_update(phy, val, true);
+ case MT76_TM_TXBF_ACT_PHASE_CAL:
+ return mt7915_tm_txbf_phase_cal(phy, val);
+ case MT76_TM_TXBF_ACT_PROF_UPDATE_ALL:
+ return mt7915_tm_txbf_profile_update_all(phy, val);
+ case MT76_TM_TXBF_ACT_E2P_UPDATE:
+ return mt7915_tm_txbf_e2p_update(phy);
+ default:
+ break;
+ };
+
+ return 0;
+}
+
static int
mt7915_tm_set_wmm_qid(struct mt7915_phy *phy, u8 qid, u8 aifs, u8 cw_min,
- u16 cw_max, u16 txop)
+ u16 cw_max, u16 txop, u8 tx_cmd)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)phy->monitor_vif->drv_priv;
- struct mt7915_mcu_tx req = { .total = 1 };
+ struct mt7915_mcu_tx req = {
+ .valid = true,
+ .mode = tx_cmd,
+ .total = 1,
+ };
struct edca *e = &req.edca[0];
e->queue = qid + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS;
@@ -263,7 +1037,8 @@ done:
return mt7915_tm_set_wmm_qid(phy,
mt76_connac_lmac_mapping(IEEE80211_AC_BE),
- aifsn, cw, cw, 0);
+ aifsn, cw, cw, 0,
+ mode == MT76_TM_TX_MODE_HE_MU);
}
static int
@@ -339,7 +1114,7 @@ mt7915_tm_set_tx_len(struct mt7915_phy *phy, u32 tx_time)
bitrate = cfg80211_calculate_bitrate(&rate);
tx_len = bitrate * tx_time / 10 / 8;
- ret = mt76_testmode_alloc_skb(phy->mt76, tx_len);
+ ret = mt76_testmode_init_skb(phy->mt76, tx_len, &td->tx_skb, td->addr);
if (ret)
return ret;
@@ -458,64 +1233,227 @@ mt7915_tm_init(struct mt7915_phy *phy, bool en)
phy->mt76->test.flag |= MT_TM_FW_RX_COUNT;
- if (!en)
+ if (!en) {
mt7915_tm_set_tam_arb(phy, en, 0);
+
+ phy->mt76->test.aid = 0;
+ phy->mt76->test.tx_mpdu_len = 0;
+ phy->test.bf_en = 0;
+ mt7915_tm_set_entry(phy);
+ }
+}
+
+static bool
+mt7915_tm_check_skb(struct mt7915_phy *phy)
+{
+ struct mt76_testmode_entry_data *ed;
+ struct mt76_wcid *wcid;
+
+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
+ struct ieee80211_tx_info *info;
+
+ if (!ed->tx_skb)
+ return false;
+
+ info = IEEE80211_SKB_CB(ed->tx_skb);
+ info->control.vif = phy->monitor_vif;
+ }
+
+ return true;
+}
+
+static int
+mt7915_tm_set_ba(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt76_testmode_data *td = &phy->mt76->test;
+ struct mt76_wcid *wcid;
+ struct ieee80211_vif *vif = phy->monitor_vif;
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct ieee80211_ampdu_params params = { .buf_size = 256 };
+
+ list_for_each_entry(wcid, &td->tm_entry_list, list) {
+ int tid, ret;
+
+ params.sta = wcid_to_sta(wcid);
+ for (tid = 0; tid < 8; tid++) {
+ params.tid = tid;
+ ret = mt7915_mcu_add_tx_ba(phy->dev, &params, true);
+ if (ret)
+ return ret;
+ }
+ }
+
+ mt76_wr(dev, MT_AGG_AALCR0(mvif->mt76.band_idx, mvif->mt76.wmm_idx),
+ 0x01010101);
+
+ return 0;
+}
+
+static int
+mt7915_tm_set_muru_cfg(struct mt7915_phy *phy, struct mt7915_tm_muru *muru)
+{
+/* #define MURU_SET_MANUAL_CFG 100 */
+ struct mt7915_dev *dev = phy->dev;
+ struct {
+ __le32 cmd;
+ struct mt7915_tm_muru muru;
+ } __packed req = {
+ .cmd = cpu_to_le32(MURU_SET_MANUAL_CFG),
+ };
+
+ memcpy(&req.muru, muru, sizeof(struct mt7915_tm_muru));
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
+ sizeof(req), false);
+}
+
+static int
+mt7915_tm_set_muru_dl(struct mt7915_phy *phy)
+{
+ struct mt76_testmode_data *td = &phy->mt76->test;
+ struct mt76_testmode_entry_data *ed;
+ struct mt76_wcid *wcid;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ struct ieee80211_vif *vif = phy->monitor_vif;
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_tm_muru muru = {};
+ struct mt7915_tm_muru_comm *comm = &muru.comm;
+ struct mt7915_tm_muru_dl *dl = &muru.dl;
+ int i;
+
+ comm->ppdu_format = MURU_PPDU_HE_MU;
+ comm->band = mvif->mt76.band_idx;
+ comm->wmm_idx = mvif->mt76.wmm_idx;
+ comm->spe_idx = phy->test.spe_idx;
+
+ dl->bw = mt7915_tm_chan_bw(chandef->width);
+ dl->gi = td->tx_rate_sgi;;
+ dl->ltf = td->tx_ltf;
+ dl->tx_mode = MT_PHY_TYPE_HE_MU;
+
+ for (i = 0; i < sizeof(dl->ru); i++)
+ dl->ru[i] = 0x71;
+
+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
+ struct mt7915_tm_muru_dl_usr *dl_usr = &dl->usr[dl->user_num];
+
+ dl_usr->wlan_idx = cpu_to_le16(wcid->idx);
+ dl_usr->ru_alloc_seg = ed->aid < 8 ? 0 : 1;
+ dl_usr->ru_idx = ed->ru_idx;
+ dl_usr->mcs = ed->tx_rate_idx;
+ dl_usr->nss = ed->tx_rate_nss - 1;
+ dl_usr->ldpc = ed->tx_rate_ldpc;
+ dl->ru[dl->user_num] = ed->ru_alloc;
+
+ dl->user_num++;
+ }
+
+ muru.cfg_comm = cpu_to_le32(MURU_COMM_SET);
+ muru.cfg_dl = cpu_to_le32(MURU_DL_SET);
+
+ return mt7915_tm_set_muru_cfg(phy, &muru);
+}
+
+static int
+mt7915_tm_set_muru_pkt_cnt(struct mt7915_phy *phy, bool enable, u32 tx_count)
+{
+#define MURU_SET_TX_PKT_CNT 105
+#define MURU_SET_TX_EN 106
+ struct mt7915_dev *dev = phy->dev;
+ struct {
+ __le32 cmd;
+ u8 band;
+ u8 enable;
+ u8 _rsv[2];
+ __le32 tx_count;
+ } __packed req = {
+ .band = phy->mt76->band_idx,
+ .enable = enable,
+ .tx_count = enable ? cpu_to_le32(tx_count) : 0,
+ };
+ int ret;
+
+ req.cmd = enable ? cpu_to_le32(MURU_SET_TX_PKT_CNT) :
+ cpu_to_le32(MURU_SET_TX_EN);
+
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
+ sizeof(req), false);
+ if (ret)
+ return ret;
+
+ req.cmd = enable ? cpu_to_le32(MURU_SET_TX_EN) :
+ cpu_to_le32(MURU_SET_TX_PKT_CNT);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
+ sizeof(req), false);
}
static void
-mt7915_tm_update_channel(struct mt7915_phy *phy)
+mt7915_tm_tx_frames_mu(struct mt7915_phy *phy, bool enable)
{
- mutex_unlock(&phy->dev->mt76.mutex);
- mt7915_set_channel(phy);
- mutex_lock(&phy->dev->mt76.mutex);
+ struct mt76_testmode_data *td = &phy->mt76->test;
- mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
+ if (enable) {
+ struct mt7915_dev *dev = phy->dev;
+
+ mt7915_tm_set_ba(phy);
+ mt7915_tm_set_muru_dl(phy);
+ mt76_rr(dev, MT_MIB_DR8(phy != &dev->phy));
+ } else {
+ /* set to zero for counting real tx free num */
+ td->tx_done = 0;
+ }
+
+ mt7915_tm_set_muru_pkt_cnt(phy, enable, td->tx_count);
+ usleep_range(100000, 200000);
}
static void
mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
{
struct mt76_testmode_data *td = &phy->mt76->test;
- struct mt7915_dev *dev = phy->dev;
- struct ieee80211_tx_info *info;
- u8 duty_cycle = td->tx_duty_cycle;
- u32 tx_time = td->tx_time;
- u32 ipg = td->tx_ipg;
mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
- mt7915_tm_clean_hwq(phy, dev->mt76.global_wcid.idx);
+ mt7915_tm_set_trx(phy, TM_MAC_TX, false);
if (en) {
- mt7915_tm_update_channel(phy);
+ u32 tx_time = td->tx_time, ipg = td->tx_ipg;
+ u8 duty_cycle = td->tx_duty_cycle;
+
+ if (!phy->test.bf_en)
+ mt7915_tm_update_channel(phy);
if (td->tx_spe_idx)
phy->test.spe_idx = td->tx_spe_idx;
else
phy->test.spe_idx = mt76_connac_spe_idx(td->tx_antenna_mask);
- }
- mt7915_tm_set_tam_arb(phy, en,
- td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU);
+ /* if all three params are set, duty_cycle will be ignored */
+ if (duty_cycle && tx_time && !ipg) {
+ ipg = tx_time * 100 / duty_cycle - tx_time;
+ } else if (duty_cycle && !tx_time && ipg) {
+ if (duty_cycle < 100)
+ tx_time = duty_cycle * ipg / (100 - duty_cycle);
+ }
- /* if all three params are set, duty_cycle will be ignored */
- if (duty_cycle && tx_time && !ipg) {
- ipg = tx_time * 100 / duty_cycle - tx_time;
- } else if (duty_cycle && !tx_time && ipg) {
- if (duty_cycle < 100)
- tx_time = duty_cycle * ipg / (100 - duty_cycle);
- }
+ mt7915_tm_set_ipg_params(phy, ipg, td->tx_rate_mode);
+ mt7915_tm_set_tx_len(phy, tx_time);
- mt7915_tm_set_ipg_params(phy, ipg, td->tx_rate_mode);
- mt7915_tm_set_tx_len(phy, tx_time);
+ if (ipg)
+ td->tx_queued_limit = MT76_TM_TIMEOUT * 1000000 / ipg / 2;
- if (ipg)
- td->tx_queued_limit = MT76_TM_TIMEOUT * 1000000 / ipg / 2;
+ if (!mt7915_tm_check_skb(phy))
+ return;
+ } else {
+ mt7915_tm_clean_hwq(phy);
+ }
- if (!en || !td->tx_skb)
- return;
+ mt7915_tm_set_tam_arb(phy, en,
+ td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU);
- info = IEEE80211_SKB_CB(td->tx_skb);
- info->control.vif = phy->monitor_vif;
+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
+ mt7915_tm_tx_frames_mu(phy, en);
mt7915_tm_set_trx(phy, TM_MAC_TX, en);
}
@@ -544,10 +1482,6 @@ mt7915_tm_get_rx_stats(struct mt7915_phy *phy, bool clear)
return ret;
rs_band = (struct mt7915_tm_rx_stat_band *)skb->data;
- /* pr_info("mdrdy_cnt = %d\n", le32_to_cpu(rs_band->mdrdy_cnt)); */
- /* pr_info("fcs_err = %d\n", le16_to_cpu(rs_band->fcs_err)); */
- /* pr_info("len_mismatch = %d\n", le16_to_cpu(rs_band->len_mismatch)); */
- /* pr_info("fcs_ok = %d\n", le16_to_cpu(rs_band->fcs_succ)); */
if (!clear) {
enum mt76_rxq_id q = req.band ? MT_RXQ_BAND1 : MT_RXQ_MAIN;
@@ -562,13 +1496,61 @@ mt7915_tm_get_rx_stats(struct mt7915_phy *phy, bool clear)
return 0;
}
+static int
+mt7915_tm_set_rx_user_idx(struct mt7915_phy *phy, u8 aid)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt76_wcid *wcid = NULL;
+ struct mt76_testmode_entry_data *ed;
+ struct {
+ u8 band;
+ u8 _rsv;
+ __le16 wlan_idx;
+ } __packed req = {
+ .band = phy->mt76->band_idx,
+ };
+
+ mt76_tm_for_each_entry(phy->mt76, wcid, ed)
+ if (ed->aid == aid)
+ break;
+
+ if (!wcid)
+ return -EINVAL;
+
+ req.wlan_idx = cpu_to_le16(wcid->idx);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_STAT_USER_CTRL),
+ &req, sizeof(req), false);
+}
+
+static int
+mt7915_tm_set_muru_aid(struct mt7915_phy *phy, u16 aid)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_tm_cmd req = {
+ .testmode_en = 1,
+ .param_idx = MCU_ATE_SET_MU_RX_AID,
+ .param.rx_aid.band = cpu_to_le32(phy->mt76->band_idx),
+ .param.rx_aid.aid = cpu_to_le16(aid),
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
+ sizeof(req), false);
+}
+
static void
mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
{
+ struct mt76_testmode_data *td = &phy->mt76->test;
+
+ mt7915_tm_set_trx(phy, TM_MAC_TX, false);
mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
if (en) {
- mt7915_tm_update_channel(phy);
+ if (!phy->test.bf_en)
+ mt7915_tm_update_channel(phy);
+ if (td->aid)
+ mt7915_tm_set_rx_user_idx(phy, td->aid);
/* read-clear */
mt7915_tm_get_rx_stats(phy, true);
@@ -576,9 +1558,12 @@ mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
/* clear fw count */
mt7915_tm_set_phy_count(phy, 0);
mt7915_tm_set_phy_count(phy, 1);
-
- mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
}
+
+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
+ mt7915_tm_set_muru_aid(phy, en ? td->aid : 0xf800);
+
+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
}
static int
@@ -617,34 +1602,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
tx_cont->tx_ant = td->tx_antenna_mask;
tx_cont->band = band;
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_40:
- tx_cont->bw = CMD_CBW_40MHZ;
- break;
- case NL80211_CHAN_WIDTH_80:
- tx_cont->bw = CMD_CBW_80MHZ;
- break;
- case NL80211_CHAN_WIDTH_80P80:
- tx_cont->bw = CMD_CBW_8080MHZ;
- break;
- case NL80211_CHAN_WIDTH_160:
- tx_cont->bw = CMD_CBW_160MHZ;
- break;
- case NL80211_CHAN_WIDTH_5:
- tx_cont->bw = CMD_CBW_5MHZ;
- break;
- case NL80211_CHAN_WIDTH_10:
- tx_cont->bw = CMD_CBW_10MHZ;
- break;
- case NL80211_CHAN_WIDTH_20:
- tx_cont->bw = CMD_CBW_20MHZ;
- break;
- case NL80211_CHAN_WIDTH_20_NOHT:
- tx_cont->bw = CMD_CBW_20MHZ;
- break;
- default:
- return -EINVAL;
- }
+ tx_cont->bw = mt7915_tm_chan_bw(chandef->width);
if (!en) {
req.op.rf.param.func_data = cpu_to_le32(band);
@@ -728,6 +1686,12 @@ mt7915_tm_update_params(struct mt7915_phy *phy, u32 changed)
mt7915_tm_set_freq_offset(phy, en, en ? td->freq_offset : 0);
if (changed & BIT(TM_CHANGED_TXPOWER))
mt7915_tm_set_tx_power(phy);
+ if (changed & BIT(TM_CHANGED_AID))
+ mt7915_tm_set_entry(phy);
+ if (changed & BIT(TM_CHANGED_CFG))
+ mt7915_tm_set_cfg(phy);
+ if (changed & BIT(TM_CHANGED_TXBF_ACT))
+ mt7915_tm_set_txbf(phy);
}
static int
@@ -807,6 +1771,7 @@ static int
mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
{
struct mt7915_phy *phy = mphy->priv;
+ struct mt7915_dev *dev = phy->dev;
void *rx, *rssi;
int i;
@@ -852,11 +1817,68 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
nla_nest_end(msg, rx);
+ if (mphy->test.tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
+ mphy->test.tx_done += mt76_rr(dev, MT_MIB_DR8(phy != &dev->phy));
+
return mt7915_tm_get_rx_stats(phy, false);
}
+static int
+mt7915_tm_write_back_to_efuse(struct mt7915_dev *dev)
+{
+ struct mt7915_mcu_eeprom_info req = {};
+ u8 *eeprom = dev->mt76.eeprom.data;
+ int i, ret = -EINVAL;
+
+ /* prevent from damaging chip id in efuse */
+ if (mt76_chip(&dev->mt76) != get_unaligned_le16(eeprom))
+ goto out;
+
+ for (i = 0; i < mt7915_eeprom_size(dev); i += MT76_TM_EEPROM_BLOCK_SIZE) {
+ req.addr = cpu_to_le32(i);
+ memcpy(&req.data, eeprom + i, MT76_TM_EEPROM_BLOCK_SIZE);
+
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_ACCESS),
+ &req, sizeof(req), true);
+ if (ret)
+ return ret;
+ }
+
+out:
+ return ret;
+}
+
+static int
+mt7915_tm_set_eeprom(struct mt76_phy *mphy, u32 offset, u8 *val, u8 action)
+{
+ struct mt7915_phy *phy = mphy->priv;
+ struct mt7915_dev *dev = phy->dev;
+ u8 *eeprom = dev->mt76.eeprom.data;
+ int ret = 0;
+
+ if (offset >= mt7915_eeprom_size(dev))
+ return -EINVAL;
+
+ switch (action) {
+ case MT76_TM_EEPROM_ACTION_UPDATE_DATA:
+ memcpy(eeprom + offset, val, MT76_TM_EEPROM_BLOCK_SIZE);
+ break;
+ case MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE:
+ ret = mt7915_mcu_set_eeprom(dev, true);
+ break;
+ case MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE:
+ ret = mt7915_tm_write_back_to_efuse(dev);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
const struct mt76_testmode_ops mt7915_testmode_ops = {
.set_state = mt7915_tm_set_state,
.set_params = mt7915_tm_set_params,
.dump_stats = mt7915_tm_dump_stats,
+ .set_eeprom = mt7915_tm_set_eeprom,
};
diff --git a/mt7915/testmode.h b/mt7915/testmode.h
index a1c54c89..01b08e9e 100644
--- a/mt7915/testmode.h
+++ b/mt7915/testmode.h
@@ -4,6 +4,8 @@
#ifndef __MT7915_TESTMODE_H
#define __MT7915_TESTMODE_H
+#include "mcu.h"
+
struct mt7915_tm_trx {
u8 type;
u8 enable;
@@ -39,6 +41,11 @@ struct mt7915_tm_cfg {
u8 _rsv[2];
};
+struct mt7915_tm_mu_rx_aid {
+ __le32 band;
+ __le16 aid;
+};
+
struct mt7915_tm_cmd {
u8 testmode_en;
u8 param_idx;
@@ -50,6 +57,7 @@ struct mt7915_tm_cmd {
struct mt7915_tm_slot_time slot;
struct mt7915_tm_clean_txq clean;
struct mt7915_tm_cfg cfg;
+ struct mt7915_tm_mu_rx_aid rx_aid;
u8 test[72];
} param;
} __packed;
@@ -109,6 +117,16 @@ enum {
TAM_ARB_OP_MODE_FORCE_SU = 5,
};
+enum {
+ TM_CBW_20MHZ,
+ TM_CBW_40MHZ,
+ TM_CBW_80MHZ,
+ TM_CBW_10MHZ,
+ TM_CBW_5MHZ,
+ TM_CBW_160MHZ,
+ TM_CBW_8080MHZ,
+};
+
struct mt7915_tm_rx_stat_band {
u8 category;
@@ -130,4 +148,264 @@ struct mt7915_tm_rx_stat_band {
__le16 mdrdy_cnt_ofdm;
};
+struct mt7915_tm_muru_comm {
+ u8 ppdu_format;
+ u8 sch_type;
+ u8 band;
+ u8 wmm_idx;
+ u8 spe_idx;
+ u8 proc_type;
+};
+
+struct mt7915_tm_muru_dl_usr {
+ __le16 wlan_idx;
+ u8 ru_alloc_seg;
+ u8 ru_idx;
+ u8 ldpc;
+ u8 nss;
+ u8 mcs;
+ u8 mu_group_idx;
+ u8 vht_groud_id;
+ u8 vht_up;
+ u8 he_start_stream;
+ u8 he_mu_spatial;
+ u8 ack_policy;
+ __le16 tx_power_alpha;
+};
+
+struct mt7915_tm_muru_dl {
+ u8 user_num;
+ u8 tx_mode;
+ u8 bw;
+ u8 gi;
+ u8 ltf;
+ /* sigB */
+ u8 mcs;
+ u8 dcm;
+ u8 cmprs;
+
+ u8 tx_power;
+ u8 ru[8];
+ u8 c26[2];
+ u8 ack_policy;
+
+ struct mt7915_tm_muru_dl_usr usr[16];
+};
+
+struct mt7915_tm_muru_ul_usr {
+ __le16 wlan_idx;
+ u8 ru_alloc;
+ u8 ru_idx;
+ u8 ldpc;
+ u8 nss;
+ u8 mcs;
+ u8 target_rssi;
+ __le32 trig_pkt_size;
+};
+
+struct mt7915_tm_muru_ul {
+ u8 user_num;
+
+ /* UL TX */
+ u8 trig_type;
+ __le16 trig_cnt;
+ __le16 trig_intv;
+ u8 bw;
+ u8 gi_ltf;
+ __le16 ul_len;
+ u8 pad;
+ u8 trig_ta[ETH_ALEN];
+ u8 ru[8];
+ u8 c26[2];
+
+ struct mt7915_tm_muru_ul_usr usr[16];
+ /* HE TB RX Debug */
+ __le32 rx_hetb_nonsf_en_bitmap;
+ __le32 rx_hetb_cfg[2];
+
+ /* DL TX */
+ u8 ba_type;
+};
+
+struct mt7915_tm_muru {
+ __le32 cfg_comm;
+ __le32 cfg_dl;
+ __le32 cfg_ul;
+
+ struct mt7915_tm_muru_comm comm;
+ struct mt7915_tm_muru_dl dl;
+ struct mt7915_tm_muru_ul ul;
+};
+
+#define MURU_PPDU_HE_MU BIT(3)
+
+/* Common Config */
+/* #define MURU_COMM_PPDU_FMT BIT(0) */
+/* #define MURU_COMM_SCH_TYPE BIT(1) */
+/* #define MURU_COMM_BAND BIT(2) */
+/* #define MURU_COMM_WMM BIT(3) */
+/* #define MURU_COMM_SPE_IDX BIT(4) */
+/* #define MURU_COMM_PROC_TYPE BIT(5) */
+/* #define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_BAND | \ */
+/* MURU_COMM_WMM | MURU_COMM_SPE_IDX) */
+/* DL Config */
+#define MURU_DL_BW BIT(0)
+#define MURU_DL_GI BIT(1)
+#define MURU_DL_TX_MODE BIT(2)
+#define MURU_DL_TONE_PLAN BIT(3)
+#define MURU_DL_USER_CNT BIT(4)
+#define MURU_DL_LTF BIT(5)
+#define MURU_DL_SIGB_MCS BIT(6)
+#define MURU_DL_SIGB_DCM BIT(7)
+#define MURU_DL_SIGB_CMPRS BIT(8)
+#define MURU_DL_ACK_POLICY BIT(9)
+#define MURU_DL_TXPOWER BIT(10)
+/* DL Per User Config */
+#define MURU_DL_USER_WLAN_ID BIT(16)
+#define MURU_DL_USER_COD BIT(17)
+#define MURU_DL_USER_MCS BIT(18)
+#define MURU_DL_USER_NSS BIT(19)
+#define MURU_DL_USER_RU_ALLOC BIT(20)
+#define MURU_DL_USER_MUMIMO_GRP BIT(21)
+#define MURU_DL_USER_MUMIMO_VHT BIT(22)
+#define MURU_DL_USER_ACK_POLICY BIT(23)
+#define MURU_DL_USER_MUMIMO_HE BIT(24)
+#define MURU_DL_USER_PWR_ALPHA BIT(25)
+#define MURU_DL_SET (GENMASK(7, 0) | GENMASK(20, 16) | BIT(25))
+
+#define MAX_PHASE_GROUP_NUM 9
+
+struct mt7915_tm_txbf_phase {
+ u8 status;
+ struct {
+ u8 r0_uh;
+ u8 r0_h;
+ u8 r0_m;
+ u8 r0_l;
+ u8 r0_ul;
+ u8 r1_uh;
+ u8 r1_h;
+ u8 r1_m;
+ u8 r1_l;
+ u8 r1_ul;
+ u8 r2_uh;
+ u8 r2_h;
+ u8 r2_m;
+ u8 r2_l;
+ u8 r2_ul;
+ u8 r3_uh;
+ u8 r3_h;
+ u8 r3_m;
+ u8 r3_l;
+ u8 r3_ul;
+ u8 r2_uh_sx2;
+ u8 r2_h_sx2;
+ u8 r2_m_sx2;
+ u8 r2_l_sx2;
+ u8 r2_ul_sx2;
+ u8 r3_uh_sx2;
+ u8 r3_h_sx2;
+ u8 r3_m_sx2;
+ u8 r3_l_sx2;
+ u8 r3_ul_sx2;
+ u8 m_t0_h;
+ u8 m_t1_h;
+ u8 m_t2_h;
+ u8 m_t2_h_sx2;
+ u8 r0_reserved;
+ u8 r1_reserved;
+ u8 r2_reserved;
+ u8 r3_reserved;
+ u8 r2_sx2_reserved;
+ u8 r3_sx2_reserved;
+ } phase;
+};
+
+struct mt7915_tm_pfmu_tag1 {
+ __le32 pfmu_idx:10;
+ __le32 ebf:1;
+ __le32 data_bw:2;
+ __le32 lm:2;
+ __le32 is_mu:1;
+ __le32 nr:3, nc:3;
+ __le32 codebook:2;
+ __le32 ngroup:2;
+ __le32 _rsv:2;
+ __le32 invalid_prof:1;
+ __le32 rmsd:3;
+
+ __le32 col_id1:6, row_id1:10;
+ __le32 col_id2:6, row_id2:10;
+ __le32 col_id3:6, row_id3:10;
+ __le32 col_id4:6, row_id4:10;
+
+ __le32 ru_start_id:7;
+ __le32 _rsv1:1;
+ __le32 ru_end_id:7;
+ __le32 _rsv2:1;
+ __le32 mob_cal_en:1;
+ __le32 _rsv3:15;
+
+ __le32 snr_sts0:8, snr_sts1:8, snr_sts2:8, snr_sts3:8;
+ __le32 snr_sts4:8, snr_sts5:8, snr_sts6:8, snr_sts7:8;
+
+ __le32 _rsv4;
+} __packed;
+
+struct mt7915_tm_pfmu_tag2 {
+ __le32 smart_ant:24;
+ __le32 se_idx:5;
+ __le32 _rsv:3;
+
+ __le32 _rsv1:8;
+ __le32 rmsd_thres:3;
+ __le32 _rsv2:5;
+ __le32 ibf_timeout:8;
+ __le32 _rsv3:8;
+
+ __le32 _rsv4:16;
+ __le32 ibf_data_bw:2;
+ __le32 ibf_nc:3;
+ __le32 ibf_nr:3;
+ __le32 ibf_ru:8;
+
+ __le32 mob_delta_t:8;
+ __le32 mob_lq_result:7;
+ __le32 _rsv5:1;
+ __le32 _rsv6:16;
+
+ __le32 _rsv7;
+} __packed;
+
+struct mt7915_tm_pfmu_tag {
+ struct mt7915_tm_pfmu_tag1 t1;
+ struct mt7915_tm_pfmu_tag2 t2;
+};
+
+struct mt7915_tm_pfmu_data {
+ __le16 subc_idx;
+ __le16 phi11;
+ __le16 phi21;
+ __le16 phi31;
+};
+
+struct mt7915_tm_ibf_cal_info {
+ u8 format_id;
+ u8 group_l_m_n;
+ u8 group;
+ bool sx2;
+ u8 status;
+ u8 cal_type;
+ u8 _rsv[2];
+ u8 buf[1000];
+} __packed;
+
+enum {
+ IBF_PHASE_CAL_UNSPEC,
+ IBF_PHASE_CAL_NORMAL,
+ IBF_PHASE_CAL_VERIFY,
+ IBF_PHASE_CAL_NORMAL_INSTRUMENT,
+ IBF_PHASE_CAL_VERIFY_INSTRUMENT,
+};
+
#endif
diff --git a/testmode.c b/testmode.c
index 1d0d5d30..7a9ed543 100644
--- a/testmode.c
+++ b/testmode.c
@@ -27,28 +27,16 @@ const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
};
EXPORT_SYMBOL_GPL(mt76_tm_policy);
-void mt76_testmode_tx_pending(struct mt76_phy *phy)
+static void
+mt76_testmode_queue_tx(struct mt76_phy *phy, struct mt76_wcid *wcid,
+ struct sk_buff *skb, struct mt76_queue *q, int qid,
+ u16 limit)
{
struct mt76_testmode_data *td = &phy->test;
struct mt76_dev *dev = phy->dev;
- struct mt76_wcid *wcid = &dev->global_wcid;
- struct sk_buff *skb = td->tx_skb;
- struct mt76_queue *q;
- u16 tx_queued_limit;
- int qid;
-
- if (!skb || !td->tx_pending)
- return;
+ u16 count = limit;
- qid = skb_get_queue_mapping(skb);
- q = phy->q_tx[qid];
-
- tx_queued_limit = td->tx_queued_limit ? td->tx_queued_limit : 1000;
-
- spin_lock_bh(&q->lock);
-
- while (td->tx_pending > 0 &&
- td->tx_queued - td->tx_done < tx_queued_limit &&
+ while (td->tx_pending > 0 && count &&
q->queued < q->ndesc / 2) {
int ret;
@@ -57,13 +45,65 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy)
if (ret < 0)
break;
+ count--;
td->tx_pending--;
td->tx_queued++;
+
+ if (td->tx_rate_mode != MT76_TM_TX_MODE_HE_MU)
+ if (td->tx_queued - td->tx_done >= limit)
+ break;
}
dev->queue_ops->kick(dev, q);
+}
+
+void mt76_testmode_tx_pending(struct mt76_phy *phy)
+{
+ struct mt76_testmode_data *td = &phy->test;
+ struct mt76_testmode_entry_data *ed;
+ struct mt76_queue *q;
+ int qid;
+ u16 tx_queued_limit;
+ u32 remain;
+ bool is_mu;
+
+ if (!td->tx_pending)
+ return;
+
+ /* tx_queued_limit = td->tx_queued_limit ?: 100; */
+ tx_queued_limit = 100;
+
+ if (!td->aid) {
+ qid = skb_get_queue_mapping(td->tx_skb);
+ q = phy->q_tx[qid];
+ spin_lock_bh(&q->lock);
+ mt76_testmode_queue_tx(phy, &phy->dev->global_wcid,
+ td->tx_skb, q, qid, tx_queued_limit);
+ spin_unlock_bh(&q->lock);
+
+ return;
+ }
+
+ is_mu = td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU;
+ ed = mt76_testmode_entry_data(phy, td->cur_entry);
+ qid = skb_get_queue_mapping(ed->tx_skb);
+ q = phy->q_tx[qid];
+
+ spin_lock_bh(&q->lock);
+
+ remain = is_mu ? 1 : (td->tx_pending % td->tx_count) ?: td->tx_count;
+ if (remain < tx_queued_limit)
+ tx_queued_limit = remain;
+
+ mt76_testmode_queue_tx(phy, td->cur_entry, ed->tx_skb, q, qid, tx_queued_limit);
+
+ if (td->tx_pending % td->tx_count == 0 || is_mu)
+ td->cur_entry = list_next_entry(td->cur_entry, list);
spin_unlock_bh(&q->lock);
+
+ if (is_mu && td->tx_pending)
+ mt76_worker_schedule(&phy->dev->tx_worker);
}
static u32
@@ -89,15 +129,31 @@ mt76_testmode_max_mpdu_len(struct mt76_phy *phy, u8 tx_rate_mode)
}
static void
-mt76_testmode_free_skb(struct mt76_phy *phy)
+mt76_testmode_free_skb(struct sk_buff **tx_skb)
+{
+ if (!(*tx_skb))
+ return;
+
+ dev_kfree_skb(*tx_skb);
+ *tx_skb = NULL;
+}
+
+static void
+mt76_testmode_free_skb_all(struct mt76_phy *phy)
{
struct mt76_testmode_data *td = &phy->test;
+ struct mt76_testmode_entry_data *ed = &td->ed;
+ struct mt76_wcid *wcid;
+
+ mt76_testmode_free_skb(&ed->tx_skb);
- dev_kfree_skb(td->tx_skb);
- td->tx_skb = NULL;
+ mt76_tm_for_each_entry(phy, wcid, ed)
+ mt76_testmode_free_skb(&ed->tx_skb);
}
-int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
+static int
+mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len,
+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN])
{
#define MT_TXP_MAX_LEN 4095
u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
@@ -118,7 +174,8 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
nfrags = len / MT_TXP_MAX_LEN;
head_len = nfrags ? MT_TXP_MAX_LEN : len;
- if (len > IEEE80211_MAX_FRAME_LEN)
+ if (len > IEEE80211_MAX_FRAME_LEN ||
+ td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
fc |= IEEE80211_STYPE_QOS_DATA;
head = alloc_skb(head_len, GFP_KERNEL);
@@ -127,9 +184,9 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
hdr = __skb_put_zero(head, sizeof(*hdr));
hdr->frame_control = cpu_to_le16(fc);
- memcpy(hdr->addr1, td->addr[0], ETH_ALEN);
- memcpy(hdr->addr2, td->addr[1], ETH_ALEN);
- memcpy(hdr->addr3, td->addr[2], ETH_ALEN);
+ memcpy(hdr->addr1, addr[0], ETH_ALEN);
+ memcpy(hdr->addr2, addr[1], ETH_ALEN);
+ memcpy(hdr->addr3, addr[2], ETH_ALEN);
skb_set_queue_mapping(head, IEEE80211_AC_BE);
get_random_bytes(__skb_put(head, head_len - sizeof(*hdr)),
head_len - sizeof(*hdr));
@@ -153,7 +210,7 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
frag = alloc_skb(frag_len, GFP_KERNEL);
if (!frag) {
- mt76_testmode_free_skb(phy);
+ mt76_testmode_free_skb(tx_skb);
dev_kfree_skb(head);
return -ENOMEM;
}
@@ -166,15 +223,14 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
frag_tail = &(*frag_tail)->next;
}
- mt76_testmode_free_skb(phy);
- td->tx_skb = head;
+ mt76_testmode_free_skb(tx_skb);
+ *tx_skb = head;
return 0;
}
-EXPORT_SYMBOL(mt76_testmode_alloc_skb);
-static int
-mt76_testmode_tx_init(struct mt76_phy *phy)
+int mt76_testmode_init_skb(struct mt76_phy *phy, u32 len,
+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN])
{
struct mt76_testmode_data *td = &phy->test;
struct ieee80211_tx_info *info;
@@ -182,7 +238,7 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
u8 max_nss = hweight8(phy->antenna_mask);
int ret;
- ret = mt76_testmode_alloc_skb(phy, td->tx_mpdu_len);
+ ret = mt76_testmode_alloc_skb(phy, len, tx_skb, addr);
if (ret)
return ret;
@@ -192,7 +248,7 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
if (td->tx_antenna_mask)
max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask));
- info = IEEE80211_SKB_CB(td->tx_skb);
+ info = IEEE80211_SKB_CB(*tx_skb);
rate = &info->control.rates[0];
rate->count = 1;
rate->idx = td->tx_rate_idx;
@@ -264,6 +320,25 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
out:
return 0;
}
+EXPORT_SYMBOL(mt76_testmode_init_skb);
+
+static int
+mt76_testmode_tx_init(struct mt76_phy *phy)
+{
+ struct mt76_testmode_entry_data *ed;
+ struct mt76_wcid *wcid;
+
+ mt76_tm_for_each_entry(phy, wcid, ed) {
+ int ret;
+
+ ret = mt76_testmode_init_skb(phy, ed->tx_mpdu_len,
+ &ed->tx_skb, ed->addr);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
static void
mt76_testmode_tx_start(struct mt76_phy *phy)
@@ -274,6 +349,14 @@ mt76_testmode_tx_start(struct mt76_phy *phy)
td->tx_queued = 0;
td->tx_done = 0;
td->tx_pending = td->tx_count;
+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
+ td->tx_pending = 1;
+ if (td->entry_num) {
+ td->tx_pending *= td->entry_num;
+ td->cur_entry = list_first_entry(&td->tm_entry_list,
+ struct mt76_wcid, list);
+ }
+
mt76_worker_schedule(&dev->tx_worker);
}
@@ -292,7 +375,7 @@ mt76_testmode_tx_stop(struct mt76_phy *phy)
wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued,
MT76_TM_TIMEOUT * HZ);
- mt76_testmode_free_skb(phy);
+ mt76_testmode_free_skb_all(phy);
}
static inline void
@@ -323,6 +406,8 @@ mt76_testmode_init_defaults(struct mt76_phy *phy)
memcpy(td->addr[0], phy->macaddr, ETH_ALEN);
memcpy(td->addr[1], phy->macaddr, ETH_ALEN);
memcpy(td->addr[2], phy->macaddr, ETH_ALEN);
+
+ INIT_LIST_HEAD(&phy->test.tm_entry_list);
}
static int
@@ -332,8 +417,12 @@ __mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state)
struct mt76_dev *dev = phy->dev;
int err;
- if (prev_state == MT76_TM_STATE_TX_FRAMES)
+ if (prev_state == MT76_TM_STATE_TX_FRAMES) {
+ /* MU needs to clean hwq for free done event */
+ if (phy->test.tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
+ dev->test_ops->set_state(phy, MT76_TM_STATE_IDLE);
mt76_testmode_tx_stop(phy);
+ }
if (state == MT76_TM_STATE_TX_FRAMES) {
err = mt76_testmode_tx_init(phy);
@@ -403,6 +492,44 @@ mt76_tm_get_u8(struct nlattr *attr, u8 *dest, u8 min, u8 max)
return 0;
}
+static int
+mt76_testmode_set_eeprom(struct mt76_phy *phy, struct nlattr **tb)
+{
+ struct mt76_dev *dev = phy->dev;
+ u8 action, val[MT76_TM_EEPROM_BLOCK_SIZE];
+ u32 offset = 0;
+ int err = -EINVAL;
+
+ if (!dev->test_ops->set_eeprom)
+ return -EOPNOTSUPP;
+
+ if (mt76_tm_get_u8(tb[MT76_TM_ATTR_EEPROM_ACTION], &action,
+ 0, MT76_TM_EEPROM_ACTION_MAX))
+ goto out;
+
+ if (tb[MT76_TM_ATTR_EEPROM_OFFSET]) {
+ struct nlattr *cur;
+ int rem, idx = 0;
+
+ offset = nla_get_u32(tb[MT76_TM_ATTR_EEPROM_OFFSET]);
+ if (!!(offset % MT76_TM_EEPROM_BLOCK_SIZE) ||
+ !tb[MT76_TM_ATTR_EEPROM_VAL])
+ goto out;
+
+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_EEPROM_VAL], rem) {
+ if (nla_len(cur) != 1 || idx >= ARRAY_SIZE(val))
+ goto out;
+
+ val[idx++] = nla_get_u8(cur);
+ }
+ }
+
+ err = dev->test_ops->set_eeprom(phy, offset, val, action);
+
+out:
+ return err;
+}
+
int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len)
{
@@ -426,6 +553,11 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&dev->mutex);
+ if (tb[MT76_TM_ATTR_EEPROM_ACTION]) {
+ err = mt76_testmode_set_eeprom(phy, tb);
+ goto out;
+ }
+
if (tb[MT76_TM_ATTR_RESET]) {
mt76_testmode_set_state(phy, MT76_TM_STATE_OFF);
memset(td, 0, sizeof(*td));
@@ -452,7 +584,10 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_DUTY_CYCLE],
&td->tx_duty_cycle, 0, 99) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_POWER_CONTROL],
- &td->tx_power_control, 0, 1))
+ &td->tx_power_control, 0, 1) ||
+ mt76_tm_get_u8(tb[MT76_TM_ATTR_AID], &td->aid, 0, 16) ||
+ mt76_tm_get_u8(tb[MT76_TM_ATTR_RU_ALLOC], &td->ru_alloc, 0, 0xff) ||
+ mt76_tm_get_u8(tb[MT76_TM_ATTR_RU_IDX], &td->ru_idx, 0, 68))
goto out;
if (tb[MT76_TM_ATTR_TX_LENGTH]) {
@@ -484,8 +619,7 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (tb[MT76_TM_ATTR_TX_POWER]) {
struct nlattr *cur;
- int idx = 0;
- int rem;
+ int rem, idx = 0;
nla_for_each_nested(cur, tb[MT76_TM_ATTR_TX_POWER], rem) {
if (nla_len(cur) != 1 ||
@@ -505,11 +639,45 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (nla_len(cur) != ETH_ALEN || idx >= 3)
goto out;
- memcpy(td->addr[idx], nla_data(cur), ETH_ALEN);
+ memcpy(td->addr[idx++], nla_data(cur), ETH_ALEN);
+ }
+ }
+
+ if (tb[MT76_TM_ATTR_CFG]) {
+ struct nlattr *cur;
+ int rem, idx = 0;
+
+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_CFG], rem) {
+ if (nla_len(cur) != 1 || idx >= 2)
+ goto out;
+
+ if (idx == 0)
+ td->cfg.type = nla_get_u8(cur);
+ else
+ td->cfg.enable = nla_get_u8(cur);
idx++;
}
}
+ if (tb[MT76_TM_ATTR_TXBF_ACT]) {
+ struct nlattr *cur;
+ int rem, idx = 0;
+
+ if (!tb[MT76_TM_ATTR_TXBF_PARAM] ||
+ mt76_tm_get_u8(tb[MT76_TM_ATTR_TXBF_ACT], &td->txbf_act,
+ 0, MT76_TM_TXBF_ACT_MAX))
+ goto out;
+
+ memset(td->txbf_param, 0, sizeof(td->txbf_param));
+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_TXBF_PARAM], rem) {
+ if (nla_len(cur) != 2 ||
+ idx >= ARRAY_SIZE(td->txbf_param))
+ goto out;
+
+ td->txbf_param[idx++] = nla_get_u16(cur);
+ }
+ }
+
if (dev->test_ops->set_params) {
err = dev->test_ops->set_params(phy, tb, state);
if (err)
@@ -574,6 +742,7 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
struct mt76_testmode_data *td = &phy->test;
+ struct mt76_testmode_entry_data *ed = &td->ed;
struct nlattr *tb[NUM_MT76_TM_ATTRS] = {};
int err = 0;
void *a;
@@ -606,6 +775,19 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
goto out;
}
+ if (tb[MT76_TM_ATTR_AID]) {
+ struct mt76_wcid *wcid;
+ u8 aid;
+
+ err = mt76_tm_get_u8(tb[MT76_TM_ATTR_AID], &aid, 1, 16);
+ if (err)
+ goto out;
+
+ mt76_tm_for_each_entry(phy, wcid, ed)
+ if (ed->aid == aid)
+ ed = mt76_testmode_entry_data(phy, wcid);
+ }
+
mt76_testmode_init_defaults(phy);
err = -EMSGSIZE;
@@ -618,12 +800,8 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
goto out;
if (nla_put_u32(msg, MT76_TM_ATTR_TX_COUNT, td->tx_count) ||
- nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_mpdu_len) ||
nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_MODE, td->tx_rate_mode) ||
- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, td->tx_rate_nss) ||
- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, td->tx_rate_idx) ||
nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_SGI, td->tx_rate_sgi) ||
- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, td->tx_rate_ldpc) ||
nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_STBC, td->tx_rate_stbc) ||
(mt76_testmode_param_present(td, MT76_TM_ATTR_TX_LTF) &&
nla_put_u8(msg, MT76_TM_ATTR_TX_LTF, td->tx_ltf)) ||
@@ -643,6 +821,15 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
nla_put_u8(msg, MT76_TM_ATTR_FREQ_OFFSET, td->freq_offset)))
goto out;
+ if (nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, ed->tx_mpdu_len) ||
+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, ed->tx_rate_nss) ||
+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, ed->tx_rate_idx) ||
+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, ed->tx_rate_ldpc) ||
+ nla_put_u8(msg, MT76_TM_ATTR_AID, ed->aid) ||
+ nla_put_u8(msg, MT76_TM_ATTR_RU_ALLOC, ed->ru_alloc) ||
+ nla_put_u8(msg, MT76_TM_ATTR_RU_IDX, ed->ru_idx))
+ goto out;
+
if (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER)) {
a = nla_nest_start(msg, MT76_TM_ATTR_TX_POWER);
if (!a)
diff --git a/testmode.h b/testmode.h
index 89613266..57949f2b 100644
--- a/testmode.h
+++ b/testmode.h
@@ -6,6 +6,8 @@
#define __MT76_TESTMODE_H
#define MT76_TM_TIMEOUT 10
+#define MT76_TM_MAX_ENTRY_NUM 16
+#define MT76_TM_EEPROM_BLOCK_SIZE 16
/**
* enum mt76_testmode_attr - testmode attributes inside NL80211_ATTR_TESTDATA
@@ -47,6 +49,15 @@
* @MT76_TM_ATTR_DRV_DATA: driver specific netlink attrs (nested)
*
* @MT76_TM_ATTR_MAC_ADDRS: array of nested MAC addresses (nested)
+ *
+ * @MT76_TM_ATTR_EEPROM_ACTION: eeprom setting actions
+ * (u8, see &enum mt76_testmode_eeprom_action)
+ * @MT76_TM_ATTR_EEPROM_OFFSET: offset of eeprom data block for writing (u32)
+ * @MT76_TM_ATTR_EEPROM_VAL: values for writing into a 16-byte data block
+ * (nested, u8 attrs)
+ *
+ * @MT76_TM_ATTR_CFG: config testmode rf feature (nested, see &mt76_testmode_cfg)
+ *
*/
enum mt76_testmode_attr {
MT76_TM_ATTR_UNSPEC,
@@ -84,6 +95,17 @@ enum mt76_testmode_attr {
MT76_TM_ATTR_DRV_DATA,
MT76_TM_ATTR_MAC_ADDRS,
+ MT76_TM_ATTR_AID,
+ MT76_TM_ATTR_RU_ALLOC,
+ MT76_TM_ATTR_RU_IDX,
+
+ MT76_TM_ATTR_EEPROM_ACTION,
+ MT76_TM_ATTR_EEPROM_OFFSET,
+ MT76_TM_ATTR_EEPROM_VAL,
+
+ MT76_TM_ATTR_CFG,
+ MT76_TM_ATTR_TXBF_ACT,
+ MT76_TM_ATTR_TXBF_PARAM,
/* keep last */
NUM_MT76_TM_ATTRS,
@@ -198,4 +220,57 @@ enum mt76_testmode_tx_mode {
extern const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS];
+/**
+ * enum mt76_testmode_eeprom_action - eeprom setting actions
+ *
+ * @MT76_TM_EEPROM_ACTION_UPDATE_DATA: update rf values to specific
+ * eeprom data block
+ * @MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE: send updated eeprom data to fw
+ * @MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE: write eeprom data back to efuse
+ */
+enum mt76_testmode_eeprom_action {
+ MT76_TM_EEPROM_ACTION_UPDATE_DATA,
+ MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE,
+ MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE,
+
+ /* keep last */
+ NUM_MT76_TM_EEPROM_ACTION,
+ MT76_TM_EEPROM_ACTION_MAX = NUM_MT76_TM_EEPROM_ACTION - 1,
+};
+
+/**
+ * enum mt76_testmode_cfg - packet tx phy mode
+ *
+ * @MT76_TM_EEPROM_ACTION_UPDATE_DATA: update rf values to specific
+ * eeprom data block
+ * @MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE: send updated eeprom data to fw
+ * @MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE: write eeprom data back to efuse
+ */
+enum mt76_testmode_cfg {
+ MT76_TM_CFG_TSSI,
+ MT76_TM_CFG_DPD,
+ MT76_TM_CFG_RATE_POWER_OFFSET,
+ MT76_TM_CFG_THERMAL_COMP,
+
+ /* keep last */
+ NUM_MT76_TM_CFG,
+ MT76_TM_CFG_MAX = NUM_MT76_TM_CFG - 1,
+};
+
+enum mt76_testmode_txbf_act {
+ MT76_TM_TXBF_ACT_INIT,
+ MT76_TM_TXBF_ACT_UPDATE_CH,
+ MT76_TM_TXBF_ACT_PHASE_COMP,
+ MT76_TM_TXBF_ACT_TX_PREP,
+ MT76_TM_TXBF_ACT_IBF_PROF_UPDATE,
+ MT76_TM_TXBF_ACT_EBF_PROF_UPDATE,
+ MT76_TM_TXBF_ACT_PHASE_CAL,
+ MT76_TM_TXBF_ACT_PROF_UPDATE_ALL,
+ MT76_TM_TXBF_ACT_E2P_UPDATE,
+
+ /* keep last */
+ NUM_MT76_TM_TXBF_ACT,
+ MT76_TM_TXBF_ACT_MAX = NUM_MT76_TM_TXBF_ACT - 1,
+};
+
#endif
diff --git a/tools/fields.c b/tools/fields.c
index e3f69089..6e36ab27 100644
--- a/tools/fields.c
+++ b/tools/fields.c
@@ -10,6 +10,7 @@ static const char * const testmode_state[] = {
[MT76_TM_STATE_IDLE] = "idle",
[MT76_TM_STATE_TX_FRAMES] = "tx_frames",
[MT76_TM_STATE_RX_FRAMES] = "rx_frames",
+ [MT76_TM_STATE_TX_CONT] = "tx_cont",
};
static const char * const testmode_tx_mode[] = {
@@ -201,6 +202,63 @@ static void print_extra_stats(const struct tm_field *field, struct nlattr **tb)
printf("%srx_per=%.02f%%\n", prefix, 100 * failed / total);
}
+static bool parse_mac(const struct tm_field *field, int idx,
+ struct nl_msg *msg, const char *val)
+{
+#define ETH_ALEN 6
+ bool ret = true;
+ char *str, *cur, *ap;
+ void *a;
+
+ ap = str = strdup(val);
+
+ a = nla_nest_start(msg, idx);
+
+ idx = 0;
+ while ((cur = strsep(&ap, ",")) != NULL) {
+ unsigned char addr[ETH_ALEN];
+ char *val, *tmp = cur;
+ int i = 0;
+
+ while ((val = strsep(&tmp, ":")) != NULL) {
+ if (i >= ETH_ALEN)
+ break;
+
+ addr[i++] = strtoul(val, NULL, 16);
+ }
+
+ nla_put(msg, idx, ETH_ALEN, addr);
+
+ idx++;
+ }
+
+ nla_nest_end(msg, a);
+
+ free(str);
+
+ return ret;
+}
+
+static void print_mac(const struct tm_field *field, struct nlattr *attr)
+{
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+ unsigned char addr[3][6];
+ struct nlattr *cur;
+ int idx = 0;
+ int rem;
+
+ nla_for_each_nested(cur, attr, rem) {
+ if (nla_len(cur) != 6)
+ continue;
+ memcpy(addr[idx++], nla_data(cur), 6);
+ }
+
+ printf("" MACSTR "," MACSTR "," MACSTR "",
+ MAC2STR(addr[0]), MAC2STR(addr[1]), MAC2STR(addr[2]));
+
+ return;
+}
#define FIELD_GENERIC(_field, _name, ...) \
[FIELD_NAME(_field)] = { \
@@ -250,6 +308,13 @@ static void print_extra_stats(const struct tm_field *field, struct nlattr **tb)
##__VA_ARGS__ \
)
+#define FIELD_MAC(_field, _name) \
+ [FIELD_NAME(_field)] = { \
+ .name = _name, \
+ .parse = parse_mac, \
+ .print = print_mac \
+ }
+
#define FIELD_NAME(_field) MT76_TM_RX_ATTR_##_field
static const struct tm_field rx_fields[NUM_MT76_TM_RX_ATTRS] = {
FIELD_RO(s32, FREQ_OFFSET, "freq_offset"),
@@ -300,10 +365,18 @@ static const struct tm_field testdata_fields[NUM_MT76_TM_ATTRS] = {
FIELD(u8, TX_RATE_LDPC, "tx_rate_ldpc"),
FIELD(u8, TX_RATE_STBC, "tx_rate_stbc"),
FIELD(u8, TX_LTF, "tx_ltf"),
+ FIELD(u8, TX_DUTY_CYCLE, "tx_duty_cycle"),
+ FIELD(u32, TX_IPG, "tx_ipg"),
+ FIELD(u32, TX_TIME, "tx_time"),
FIELD(u8, TX_POWER_CONTROL, "tx_power_control"),
FIELD_ARRAY(u8, TX_POWER, "tx_power"),
FIELD(u8, TX_ANTENNA, "tx_antenna"),
+ FIELD(u8, TX_SPE_IDX, "tx_spe_idx"),
FIELD(u32, FREQ_OFFSET, "freq_offset"),
+ FIELD(u8, AID, "aid"),
+ FIELD(u8, RU_ALLOC, "ru_alloc"),
+ FIELD(u8, RU_IDX, "ru_idx"),
+ FIELD_MAC(MAC_ADDRS, "mac_addrs"),
FIELD_NESTED_RO(STATS, stats, "",
.print_extra = print_extra_stats),
};
@@ -322,9 +395,16 @@ static struct nla_policy testdata_policy[NUM_MT76_TM_ATTRS] = {
[MT76_TM_ATTR_TX_RATE_LDPC] = { .type = NLA_U8 },
[MT76_TM_ATTR_TX_RATE_STBC] = { .type = NLA_U8 },
[MT76_TM_ATTR_TX_LTF] = { .type = NLA_U8 },
+ [MT76_TM_ATTR_TX_DUTY_CYCLE] = { .type = NLA_U8 },
+ [MT76_TM_ATTR_TX_IPG] = { .type = NLA_U32 },
+ [MT76_TM_ATTR_TX_TIME] = { .type = NLA_U32 },
[MT76_TM_ATTR_TX_POWER_CONTROL] = { .type = NLA_U8 },
[MT76_TM_ATTR_TX_ANTENNA] = { .type = NLA_U8 },
+ [MT76_TM_ATTR_TX_SPE_IDX] = { .type = NLA_U8 },
[MT76_TM_ATTR_FREQ_OFFSET] = { .type = NLA_U32 },
+ [MT76_TM_ATTR_AID] = { .type = NLA_U8 },
+ [MT76_TM_ATTR_RU_ALLOC] = { .type = NLA_U8 },
+ [MT76_TM_ATTR_RU_IDX] = { .type = NLA_U8 },
[MT76_TM_ATTR_STATS] = { .type = NLA_NESTED },
};
diff --git a/tx.c b/tx.c
index 1f309d05..6d55566f 100644
--- a/tx.c
+++ b/tx.c
@@ -250,8 +250,7 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *
if (mt76_is_testmode_skb(dev, skb, &hw)) {
struct mt76_phy *phy = hw->priv;
- if (skb == phy->test.tx_skb)
- phy->test.tx_done++;
+ phy->test.tx_done++;
if (phy->test.tx_queued == phy->test.tx_done)
wake_up(&dev->tx_wait);
--
2.18.0