blob: 743b087a633d7313b556c89ab8b0df16971161ec [file] [log] [blame]
developer62713c82023-03-20 10:46:08 +08001From 6a285836d16bb46349603858116df5305787e774 Mon Sep 17 00:00:00 2001
developer4c6b6002022-05-30 16:36:44 +08002From: Shayne Chen <shayne.chen@mediatek.com>
3Date: Thu, 21 Apr 2022 15:43:19 +0800
developer62713c82023-03-20 10:46:08 +08004Subject: [PATCH 1112/1132] mt76: testmode: additional supports
developer4c6b6002022-05-30 16:36:44 +08005
6Signed-off-by: Shayne Chen <shayne.chen@mediatek.com>
developer287ee9f2023-03-02 20:13:34 +08007Signed-off-by: StanleyYP Wang <StanleyYP.Wang@mediatek.com>
developer4c6b6002022-05-30 16:36:44 +08008---
developerf40484f2022-08-25 15:33:33 +08009 dma.c | 3 +-
10 mac80211.c | 12 +
developer1d9fede2022-08-29 15:24:07 +080011 mt76.h | 108 ++++-
developerf40484f2022-08-25 15:33:33 +080012 mt76_connac_mcu.c | 4 +
13 mt76_connac_mcu.h | 2 +
14 mt7915/init.c | 2 +-
developer4feb1012023-01-30 17:29:07 +080015 mt7915/mac.c | 39 +-
developerf40484f2022-08-25 15:33:33 +080016 mt7915/main.c | 2 +-
17 mt7915/mcu.c | 10 +-
18 mt7915/mcu.h | 28 +-
19 mt7915/mmio.c | 2 +
20 mt7915/mt7915.h | 14 +-
21 mt7915/regs.h | 3 +
developer287ee9f2023-03-02 20:13:34 +080022 mt7915/testmode.c | 1177 ++++++++++++++++++++++++++++++++++++++++++---
developerf40484f2022-08-25 15:33:33 +080023 mt7915/testmode.h | 278 +++++++++++
developer62713c82023-03-20 10:46:08 +080024 testmode.c | 280 +++++++++--
developerf40484f2022-08-25 15:33:33 +080025 testmode.h | 75 +++
developer62713c82023-03-20 10:46:08 +080026 tools/fields.c | 84 +++-
developerf40484f2022-08-25 15:33:33 +080027 tx.c | 3 +-
developer62713c82023-03-20 10:46:08 +080028 19 files changed, 1975 insertions(+), 151 deletions(-)
developer4c6b6002022-05-30 16:36:44 +080029
30diff --git a/dma.c b/dma.c
developer62713c82023-03-20 10:46:08 +080031index df2ca73..fe65e0f 100644
developer4c6b6002022-05-30 16:36:44 +080032--- a/dma.c
33+++ b/dma.c
developer62713c82023-03-20 10:46:08 +080034@@ -568,8 +568,7 @@ free:
developer4c6b6002022-05-30 16:36:44 +080035 if (mt76_is_testmode_skb(dev, skb, &hw)) {
36 struct mt76_phy *phy = hw->priv;
37
38- if (tx_info.skb == phy->test.tx_skb)
39- phy->test.tx_done--;
40+ phy->test.tx_done--;
41 }
42 #endif
43
44diff --git a/mac80211.c b/mac80211.c
developer62713c82023-03-20 10:46:08 +080045index ad5d6f8..9107c0c 100644
developer4c6b6002022-05-30 16:36:44 +080046--- a/mac80211.c
47+++ b/mac80211.c
developerc04f5402023-02-03 09:22:26 +080048@@ -56,6 +56,13 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = {
developer4c6b6002022-05-30 16:36:44 +080049 CHAN5G(60, 5300),
50 CHAN5G(64, 5320),
51
52+ CHAN5G(68, 5340),
53+ CHAN5G(80, 5400),
54+ CHAN5G(84, 5420),
55+ CHAN5G(88, 5440),
56+ CHAN5G(92, 5460),
57+ CHAN5G(96, 5480),
58+
59 CHAN5G(100, 5500),
60 CHAN5G(104, 5520),
61 CHAN5G(108, 5540),
developerc04f5402023-02-03 09:22:26 +080062@@ -76,6 +83,11 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = {
developer4c6b6002022-05-30 16:36:44 +080063 CHAN5G(165, 5825),
64 CHAN5G(169, 5845),
65 CHAN5G(173, 5865),
66+
67+ CHAN5G(184, 4920),
68+ CHAN5G(188, 4940),
69+ CHAN5G(192, 4960),
70+ CHAN5G(196, 4980),
71 };
72
73 static const struct ieee80211_channel mt76_channels_6ghz[] = {
74diff --git a/mt76.h b/mt76.h
developer62713c82023-03-20 10:46:08 +080075index ca66448..898726e 100644
developer4c6b6002022-05-30 16:36:44 +080076--- a/mt76.h
77+++ b/mt76.h
developer62713c82023-03-20 10:46:08 +080078@@ -642,6 +642,21 @@ struct mt76_testmode_ops {
developer4c6b6002022-05-30 16:36:44 +080079 int (*set_params)(struct mt76_phy *phy, struct nlattr **tb,
80 enum mt76_testmode_state new_state);
81 int (*dump_stats)(struct mt76_phy *phy, struct sk_buff *msg);
82+ int (*set_eeprom)(struct mt76_phy *phy, u32 offset, u8 *val, u8 action);
83+};
84+
85+struct mt76_testmode_entry_data {
86+ struct sk_buff *tx_skb;
87+
88+ u16 tx_mpdu_len;
89+ u8 tx_rate_idx;
90+ u8 tx_rate_nss;
91+ u8 tx_rate_ldpc;
92+
93+ u8 addr[3][ETH_ALEN];
94+ u8 aid;
95+ u8 ru_alloc;
96+ u8 ru_idx;
97 };
98
99 #define MT_TM_FW_RX_COUNT BIT(0)
developer62713c82023-03-20 10:46:08 +0800100@@ -650,16 +665,11 @@ struct mt76_testmode_data {
developer4c6b6002022-05-30 16:36:44 +0800101 enum mt76_testmode_state state;
102
103 u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)];
104- struct sk_buff *tx_skb;
105
106 u32 tx_count;
107- u16 tx_mpdu_len;
108
109 u8 tx_rate_mode;
110- u8 tx_rate_idx;
111- u8 tx_rate_nss;
112 u8 tx_rate_sgi;
113- u8 tx_rate_ldpc;
114 u8 tx_rate_stbc;
115 u8 tx_ltf;
116
developer62713c82023-03-20 10:46:08 +0800117@@ -675,10 +685,37 @@ struct mt76_testmode_data {
developer4c6b6002022-05-30 16:36:44 +0800118 u8 tx_power[4];
119 u8 tx_power_control;
120
121- u8 addr[3][ETH_ALEN];
122+ struct list_head tm_entry_list;
123+ struct mt76_wcid *cur_entry;
124+ u8 entry_num;
125+ union {
126+ struct mt76_testmode_entry_data ed;
127+ struct {
128+ /* must be the same as mt76_testmode_entry_data */
129+ struct sk_buff *tx_skb;
130+
131+ u16 tx_mpdu_len;
132+ u8 tx_rate_idx;
133+ u8 tx_rate_nss;
134+ u8 tx_rate_ldpc;
135+
136+ u8 addr[3][ETH_ALEN];
137+ u8 aid;
138+ u8 ru_alloc;
139+ u8 ru_idx;
140+ };
141+ };
142
143 u8 flag;
144
145+ struct {
146+ u8 type;
147+ u8 enable;
148+ } cfg;
149+
150+ u8 txbf_act;
151+ u16 txbf_param[8];
152+
153 u32 tx_pending;
154 u32 tx_queued;
155 u16 tx_queued_limit;
developer62713c82023-03-20 10:46:08 +0800156@@ -1142,6 +1179,59 @@ static inline bool mt76_testmode_enabled(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +0800157 #endif
158 }
159
160+#ifdef CONFIG_NL80211_TESTMODE
161+static inline struct mt76_wcid *
162+mt76_testmode_first_entry(struct mt76_phy *phy)
163+{
164+ if (list_empty(&phy->test.tm_entry_list) && !phy->test.aid)
165+ return &phy->dev->global_wcid;
166+
167+ return list_first_entry(&phy->test.tm_entry_list,
168+ typeof(struct mt76_wcid),
169+ list);
170+}
171+
172+static inline struct mt76_testmode_entry_data *
173+mt76_testmode_entry_data(struct mt76_phy *phy, struct mt76_wcid *wcid)
174+{
175+ if (!wcid)
176+ return NULL;
177+ if (wcid == &phy->dev->global_wcid)
178+ return &phy->test.ed;
179+
180+ return (struct mt76_testmode_entry_data *)((u8 *)wcid +
181+ phy->hw->sta_data_size);
182+}
183+
184+#define mt76_tm_for_each_entry(phy, wcid, ed) \
185+ for (wcid = mt76_testmode_first_entry(phy), \
186+ ed = mt76_testmode_entry_data(phy, wcid); \
187+ ((phy->test.aid && \
188+ !list_entry_is_head(wcid, &phy->test.tm_entry_list, list)) || \
189+ (!phy->test.aid && wcid == &phy->dev->global_wcid)) && ed; \
190+ wcid = list_next_entry(wcid, list), \
191+ ed = mt76_testmode_entry_data(phy, wcid))
192+#endif
193+
194+static inline bool __mt76_is_testmode_skb(struct mt76_phy *phy,
195+ struct sk_buff *skb)
196+{
197+#ifdef CONFIG_NL80211_TESTMODE
198+ struct mt76_testmode_entry_data *ed = &phy->test.ed;
199+ struct mt76_wcid *wcid;
200+
201+ if (skb == ed->tx_skb)
202+ return true;
203+
204+ mt76_tm_for_each_entry(phy, wcid, ed)
205+ if (skb == ed->tx_skb)
206+ return true;
207+ return false;
208+#else
209+ return false;
210+#endif
211+}
212+
213 static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
214 struct sk_buff *skb,
215 struct ieee80211_hw **hw)
developer62713c82023-03-20 10:46:08 +0800216@@ -1152,7 +1242,8 @@ static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
developer1d9fede2022-08-29 15:24:07 +0800217 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
218 struct mt76_phy *phy = dev->phys[i];
219
220- if (phy && skb == phy->test.tx_skb) {
221+ if (phy && mt76_testmode_enabled(phy) &&
222+ __mt76_is_testmode_skb(phy, skb)) {
223 *hw = dev->phys[i]->hw;
224 return true;
225 }
developer62713c82023-03-20 10:46:08 +0800226@@ -1254,7 +1345,8 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer4c6b6002022-05-30 16:36:44 +0800227 int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
228 struct netlink_callback *cb, void *data, int len);
229 int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state);
230-int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len);
231+int mt76_testmode_init_skb(struct mt76_phy *phy, u32 len,
232+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN]);
233
234 static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable)
235 {
236diff --git a/mt76_connac_mcu.c b/mt76_connac_mcu.c
developer287ee9f2023-03-02 20:13:34 +0800237index 2fefac6..b6c2ccf 100644
developer4c6b6002022-05-30 16:36:44 +0800238--- a/mt76_connac_mcu.c
239+++ b/mt76_connac_mcu.c
developer144824b2022-11-25 21:27:43 +0800240@@ -394,6 +394,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
developer4c6b6002022-05-30 16:36:44 +0800241 switch (vif->type) {
242 case NL80211_IFTYPE_MESH_POINT:
243 case NL80211_IFTYPE_AP:
244+ case NL80211_IFTYPE_MONITOR:
245 if (vif->p2p)
246 conn_type = CONNECTION_P2P_GC;
247 else
developer144824b2022-11-25 21:27:43 +0800248@@ -575,6 +576,9 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
developer4c6b6002022-05-30 16:36:44 +0800249 rx->rca2 = 1;
250 rx->rv = 1;
251
252+ if (vif->type == NL80211_IFTYPE_MONITOR)
253+ rx->rca1 = 0;
254+
255 if (!is_connac_v1(dev))
256 return;
257
258diff --git a/mt76_connac_mcu.h b/mt76_connac_mcu.h
developer62713c82023-03-20 10:46:08 +0800259index 8ba8ebf..4a63a24 100644
developer4c6b6002022-05-30 16:36:44 +0800260--- a/mt76_connac_mcu.h
261+++ b/mt76_connac_mcu.h
developer62713c82023-03-20 10:46:08 +0800262@@ -996,6 +996,7 @@ enum {
developer4c6b6002022-05-30 16:36:44 +0800263 MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13,
264 MCU_EXT_EVENT_THERMAL_PROTECT = 0x22,
265 MCU_EXT_EVENT_ASSERT_DUMP = 0x23,
266+ MCU_EXT_EVENT_BF_STATUS_READ = 0x35,
267 MCU_EXT_EVENT_RDD_REPORT = 0x3a,
268 MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
269 MCU_EXT_EVENT_BCC_NOTIFY = 0x75,
developer62713c82023-03-20 10:46:08 +0800270@@ -1197,6 +1198,7 @@ enum {
developer4c6b6002022-05-30 16:36:44 +0800271 MCU_EXT_CMD_PHY_STAT_INFO = 0xad,
272 /* for vendor csi and air monitor */
273 MCU_EXT_CMD_SMESH_CTRL = 0xae,
274+ MCU_EXT_CMD_RX_STAT_USER_CTRL = 0xb3,
275 MCU_EXT_CMD_CERT_CFG = 0xb7,
276 MCU_EXT_CMD_CSI_CTRL = 0xc2,
277 };
278diff --git a/mt7915/init.c b/mt7915/init.c
developer62713c82023-03-20 10:46:08 +0800279index 1177e4e..f40d09c 100644
developer4c6b6002022-05-30 16:36:44 +0800280--- a/mt7915/init.c
281+++ b/mt7915/init.c
developer62713c82023-03-20 10:46:08 +0800282@@ -695,7 +695,7 @@ static void mt7915_init_work(struct work_struct *work)
developer4c6b6002022-05-30 16:36:44 +0800283 struct mt7915_dev *dev = container_of(work, struct mt7915_dev,
284 init_work);
285
286- mt7915_mcu_set_eeprom(dev);
287+ mt7915_mcu_set_eeprom(dev, dev->flash_mode);
288 mt7915_mac_init(dev);
289 mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband);
290 mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband);
291diff --git a/mt7915/mac.c b/mt7915/mac.c
developer287ee9f2023-03-02 20:13:34 +0800292index de2bdba..1460a32 100644
developer4c6b6002022-05-30 16:36:44 +0800293--- a/mt7915/mac.c
294+++ b/mt7915/mac.c
developerc04f5402023-02-03 09:22:26 +0800295@@ -627,16 +627,38 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
developer4c6b6002022-05-30 16:36:44 +0800296 {
297 #ifdef CONFIG_NL80211_TESTMODE
298 struct mt76_testmode_data *td = &phy->mt76->test;
299+ struct mt76_testmode_entry_data *ed;
300+ struct mt76_wcid *wcid;
301 const struct ieee80211_rate *r;
302- u8 bw, mode, nss = td->tx_rate_nss;
303- u8 rate_idx = td->tx_rate_idx;
304+ u8 bw, mode, nss, rate_idx, ldpc;
305 u16 rateval = 0;
306 u32 val;
307 bool cck = false;
308 int band;
309
310- if (skb != phy->mt76->test.tx_skb)
311+ txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
312+ txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
313+ phy->test.spe_idx));
314+
315+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU) {
316+ txwi[1] |= cpu_to_le32(BIT(18));
317+ txwi[2] = 0;
318+ txwi[3] &= ~cpu_to_le32(MT_TXD3_NO_ACK);
319+ le32p_replace_bits(&txwi[3], 0x1f, MT_TXD3_REM_TX_COUNT);
320+
developerf40484f2022-08-25 15:33:33 +0800321 return;
developer4c6b6002022-05-30 16:36:44 +0800322+ }
323+
324+ mt76_tm_for_each_entry(phy->mt76, wcid, ed)
325+ if (ed->tx_skb == skb)
326+ break;
327+
328+ if (!ed)
developerf40484f2022-08-25 15:33:33 +0800329+ return;
330+
developer4c6b6002022-05-30 16:36:44 +0800331+ nss = ed->tx_rate_nss;
332+ rate_idx = ed->tx_rate_idx;
333+ ldpc = ed->tx_rate_ldpc;
developerf40484f2022-08-25 15:33:33 +0800334
developer4c6b6002022-05-30 16:36:44 +0800335 switch (td->tx_rate_mode) {
336 case MT76_TM_TX_MODE_HT:
developerc04f5402023-02-03 09:22:26 +0800337@@ -667,7 +689,7 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
developer4feb1012023-01-30 17:29:07 +0800338 rate_idx += 4;
339
340 r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx];
341- val = cck ? r->hw_value_short : r->hw_value;
342+ val = r->hw_value;
343
344 mode = val >> 8;
345 rate_idx = val & 0xff;
developerc04f5402023-02-03 09:22:26 +0800346@@ -726,13 +748,14 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
developer4c6b6002022-05-30 16:36:44 +0800347 if (mode >= MT_PHY_TYPE_HE_SU)
348 val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf);
349
350- if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
351+ if (ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
352 val |= MT_TXD6_LDPC;
353
developerf40484f2022-08-25 15:33:33 +0800354 txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
developer4c6b6002022-05-30 16:36:44 +0800355+ if (phy->test.bf_en)
356+ val |= MT_TXD6_TX_IBF | MT_TXD6_TX_EBF;
357+
358 txwi[6] |= cpu_to_le32(val);
359- txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
360- phy->test.spe_idx));
361 #endif
362 }
363
developerc04f5402023-02-03 09:22:26 +0800364@@ -1479,7 +1502,7 @@ mt7915_mac_restart(struct mt7915_dev *dev)
developer356ecec2022-11-14 10:25:04 +0800365 goto out;
366
367 /* set the necessary init items */
368- ret = mt7915_mcu_set_eeprom(dev);
369+ ret = mt7915_mcu_set_eeprom(dev, dev->flash_mode);
370 if (ret)
371 goto out;
372
developer4c6b6002022-05-30 16:36:44 +0800373diff --git a/mt7915/main.c b/mt7915/main.c
developer287ee9f2023-03-02 20:13:34 +0800374index f0cbfbe..651887c 100644
developer4c6b6002022-05-30 16:36:44 +0800375--- a/mt7915/main.c
376+++ b/mt7915/main.c
developer9851a292022-12-15 17:33:43 +0800377@@ -238,7 +238,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
developer4c6b6002022-05-30 16:36:44 +0800378 mvif->phy = phy;
developereb6a0182022-12-12 18:53:32 +0800379 mvif->mt76.band_idx = phy->mt76->band_idx;
developer4c6b6002022-05-30 16:36:44 +0800380
381- mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
382+ mvif->mt76.wmm_idx = (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_MONITOR);
383 if (ext_phy)
384 mvif->mt76.wmm_idx += 2;
385
386diff --git a/mt7915/mcu.c b/mt7915/mcu.c
developer62713c82023-03-20 10:46:08 +0800387index 7b7916c..ec695c7 100644
developer4c6b6002022-05-30 16:36:44 +0800388--- a/mt7915/mcu.c
389+++ b/mt7915/mcu.c
developereb6a0182022-12-12 18:53:32 +0800390@@ -383,6 +383,11 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
developer4c6b6002022-05-30 16:36:44 +0800391 case MCU_EXT_EVENT_BCC_NOTIFY:
392 mt7915_mcu_rx_bcc_notify(dev, skb);
393 break;
394+#ifdef CONFIG_NL80211_TESTMODE
395+ case MCU_EXT_EVENT_BF_STATUS_READ:
396+ mt7915_tm_txbf_status_read(dev, skb);
397+ break;
398+#endif
399 default:
400 break;
401 }
developereb6a0182022-12-12 18:53:32 +0800402@@ -414,6 +419,7 @@ void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb)
developer4c6b6002022-05-30 16:36:44 +0800403 rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP ||
404 rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC ||
405 rxd->ext_eid == MCU_EXT_EVENT_BCC_NOTIFY ||
406+ rxd->ext_eid == MCU_EXT_EVENT_BF_STATUS_READ ||
407 !rxd->seq)
408 mt7915_mcu_rx_unsolicited_event(dev, skb);
409 else
developer62713c82023-03-20 10:46:08 +0800410@@ -2850,14 +2856,14 @@ static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev)
developer4c6b6002022-05-30 16:36:44 +0800411 return 0;
412 }
413
414-int mt7915_mcu_set_eeprom(struct mt7915_dev *dev)
415+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev, bool flash_mode)
416 {
417 struct mt7915_mcu_eeprom req = {
418 .buffer_mode = EE_MODE_EFUSE,
419 .format = EE_FORMAT_WHOLE,
420 };
421
422- if (dev->flash_mode)
423+ if (flash_mode)
424 return mt7915_mcu_set_eeprom_flash(dev);
425
426 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_BUFFER_MODE),
427diff --git a/mt7915/mcu.h b/mt7915/mcu.h
developer62713c82023-03-20 10:46:08 +0800428index 3c16d21..0a99cb3 100644
developer4c6b6002022-05-30 16:36:44 +0800429--- a/mt7915/mcu.h
430+++ b/mt7915/mcu.h
developerf64861f2022-06-22 11:44:53 +0800431@@ -8,10 +8,15 @@
developer4c6b6002022-05-30 16:36:44 +0800432
433 enum {
434 MCU_ATE_SET_TRX = 0x1,
435+ MCU_ATE_SET_TSSI = 0x5,
436+ MCU_ATE_SET_DPD = 0x6,
437+ MCU_ATE_SET_RATE_POWER_OFFSET = 0x7,
438+ MCU_ATE_SET_THERMAL_COMP = 0x8,
439 MCU_ATE_SET_FREQ_OFFSET = 0xa,
440 MCU_ATE_SET_PHY_COUNT = 0x11,
441 MCU_ATE_SET_SLOT_TIME = 0x13,
442 MCU_ATE_CLEAN_TXQUEUE = 0x1c,
443+ MCU_ATE_SET_MU_RX_AID = 0x1e,
444 };
445
developerf64861f2022-06-22 11:44:53 +0800446 struct mt7915_mcu_thermal_ctrl {
developerc04f5402023-02-03 09:22:26 +0800447@@ -472,6 +477,12 @@ enum {
developer4c6b6002022-05-30 16:36:44 +0800448
449 enum {
450 MT_BF_SOUNDING_ON = 1,
451+ MT_BF_DATA_PACKET_APPLY = 2,
452+ MT_BF_PFMU_TAG_READ = 5,
453+ MT_BF_PFMU_TAG_WRITE = 6,
454+ MT_BF_PHASE_CAL = 14,
455+ MT_BF_IBF_PHASE_COMP = 15,
456+ MT_BF_PROFILE_WRITE_ALL = 17,
457 MT_BF_TYPE_UPDATE = 20,
458 MT_BF_MODULE_UPDATE = 25
459 };
developerc04f5402023-02-03 09:22:26 +0800460@@ -718,10 +729,19 @@ struct mt7915_muru {
developer4c6b6002022-05-30 16:36:44 +0800461 #define MURU_OFDMA_SCH_TYPE_UL BIT(1)
462
developerf64861f2022-06-22 11:44:53 +0800463 /* Common Config */
developer4c6b6002022-05-30 16:36:44 +0800464-#define MURU_COMM_PPDU_FMT BIT(0)
465-#define MURU_COMM_SCH_TYPE BIT(1)
466-#define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_SCH_TYPE)
developer4c6b6002022-05-30 16:36:44 +0800467-/* DL&UL User config*/
developer4c6b6002022-05-30 16:36:44 +0800468+/* #define MURU_COMM_PPDU_FMT BIT(0) */
469+/* #define MURU_COMM_SCH_TYPE BIT(1) */
470+/* #define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_SCH_TYPE) */
developer4721e252022-06-21 16:41:28 +0800471+#define MURU_COMM_PPDU_FMT BIT(0)
472+#define MURU_COMM_SCH_TYPE BIT(1)
473+#define MURU_COMM_BAND BIT(2)
474+#define MURU_COMM_WMM BIT(3)
475+#define MURU_COMM_SPE_IDX BIT(4)
476+#define MURU_COMM_PROC_TYPE BIT(5)
477+#define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_BAND | \
478+ MURU_COMM_WMM | MURU_COMM_SPE_IDX)
developer4c6b6002022-05-30 16:36:44 +0800479+
480+/* DL&UL User config */
481 #define MURU_USER_CNT BIT(4)
482
483 enum {
484diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developer62713c82023-03-20 10:46:08 +0800485index dbafb5f..bb752a8 100644
developer4c6b6002022-05-30 16:36:44 +0800486--- a/mt7915/mmio.c
487+++ b/mt7915/mmio.c
developerc04f5402023-02-03 09:22:26 +0800488@@ -134,6 +134,7 @@ static const u32 mt7915_offs[] = {
developer4c6b6002022-05-30 16:36:44 +0800489 [ARB_DRNGR0] = 0x194,
490 [ARB_SCR] = 0x080,
491 [RMAC_MIB_AIRTIME14] = 0x3b8,
492+ [AGG_AALCR0] = 0x048,
493 [AGG_AWSCR0] = 0x05c,
494 [AGG_PCR0] = 0x06c,
495 [AGG_ACR0] = 0x084,
developerc04f5402023-02-03 09:22:26 +0800496@@ -209,6 +210,7 @@ static const u32 mt7916_offs[] = {
developer4c6b6002022-05-30 16:36:44 +0800497 [ARB_DRNGR0] = 0x1e0,
498 [ARB_SCR] = 0x000,
499 [RMAC_MIB_AIRTIME14] = 0x0398,
500+ [AGG_AALCR0] = 0x028,
501 [AGG_AWSCR0] = 0x030,
502 [AGG_PCR0] = 0x040,
503 [AGG_ACR0] = 0x054,
504diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
developer287ee9f2023-03-02 20:13:34 +0800505index 8b292c3..9d9c5fc 100644
developer4c6b6002022-05-30 16:36:44 +0800506--- a/mt7915/mt7915.h
507+++ b/mt7915/mt7915.h
developer287ee9f2023-03-02 20:13:34 +0800508@@ -323,6 +323,9 @@ struct mt7915_phy {
developer4c6b6002022-05-30 16:36:44 +0800509 u8 last_snr;
510
511 u8 spe_idx;
512+
513+ bool bf_en;
514+ bool bf_ever_en;
515 } test;
516 #endif
517
developer287ee9f2023-03-02 20:13:34 +0800518@@ -422,6 +425,14 @@ struct mt7915_dev {
developer4c6b6002022-05-30 16:36:44 +0800519 void __iomem *dcm;
520 void __iomem *sku;
521
522+#ifdef CONFIG_NL80211_TESTMODE
523+ struct {
524+ void *txbf_phase_cal;
525+ void *txbf_pfmu_data;
526+ void *txbf_pfmu_tag;
527+ } test;
528+#endif
529+
530 #ifdef MTK_DEBUG
531 u16 wlan_idx;
532 struct {
developer287ee9f2023-03-02 20:13:34 +0800533@@ -592,7 +603,7 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
developer4c6b6002022-05-30 16:36:44 +0800534 struct ieee80211_vif *vif,
535 struct ieee80211_sta *sta,
536 void *data, u32 field);
537-int mt7915_mcu_set_eeprom(struct mt7915_dev *dev);
538+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev, bool flash_mode);
539 int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset);
540 int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num);
541 int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable,
developer287ee9f2023-03-02 20:13:34 +0800542@@ -630,6 +641,7 @@ int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl);
developer4c6b6002022-05-30 16:36:44 +0800543 int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level);
544 void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb);
545 void mt7915_mcu_exit(struct mt7915_dev *dev);
546+int mt7915_tm_txbf_status_read(struct mt7915_dev *dev, struct sk_buff *skb);
547
548 static inline u16 mt7915_wtbl_size(struct mt7915_dev *dev)
549 {
550diff --git a/mt7915/regs.h b/mt7915/regs.h
developer287ee9f2023-03-02 20:13:34 +0800551index d6a05f1..e876848 100644
developer4c6b6002022-05-30 16:36:44 +0800552--- a/mt7915/regs.h
553+++ b/mt7915/regs.h
developer144824b2022-11-25 21:27:43 +0800554@@ -62,6 +62,7 @@ enum offs_rev {
developer4c6b6002022-05-30 16:36:44 +0800555 ARB_DRNGR0,
556 ARB_SCR,
557 RMAC_MIB_AIRTIME14,
558+ AGG_AALCR0,
559 AGG_AWSCR0,
560 AGG_PCR0,
561 AGG_ACR0,
developer144824b2022-11-25 21:27:43 +0800562@@ -482,6 +483,8 @@ enum offs_rev {
developer4c6b6002022-05-30 16:36:44 +0800563 #define MT_WF_AGG_BASE(_band) ((_band) ? 0x820f2000 : 0x820e2000)
564 #define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
565
566+#define MT_AGG_AALCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AALCR0) + \
567+ (_n) * 4))
568 #define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AWSCR0) + \
569 (_n) * 4))
570 #define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_PCR0) + \
571diff --git a/mt7915/testmode.c b/mt7915/testmode.c
developer287ee9f2023-03-02 20:13:34 +0800572index 4693919..9317a8e 100644
developer4c6b6002022-05-30 16:36:44 +0800573--- a/mt7915/testmode.c
574+++ b/mt7915/testmode.c
575@@ -9,6 +9,9 @@
576 enum {
577 TM_CHANGED_TXPOWER,
578 TM_CHANGED_FREQ_OFFSET,
579+ TM_CHANGED_AID,
580+ TM_CHANGED_CFG,
581+ TM_CHANGED_TXBF_ACT,
582
583 /* must be last */
584 NUM_TM_CHANGED
585@@ -17,6 +20,9 @@ enum {
586 static const u8 tm_change_map[] = {
587 [TM_CHANGED_TXPOWER] = MT76_TM_ATTR_TX_POWER,
588 [TM_CHANGED_FREQ_OFFSET] = MT76_TM_ATTR_FREQ_OFFSET,
589+ [TM_CHANGED_AID] = MT76_TM_ATTR_AID,
590+ [TM_CHANGED_CFG] = MT76_TM_ATTR_CFG,
591+ [TM_CHANGED_TXBF_ACT] = MT76_TM_ATTR_TXBF_ACT,
592 };
593
594 struct reg_band {
developerc6f56bb2022-06-14 18:36:30 +0800595@@ -33,6 +39,38 @@ struct reg_band {
developer4c6b6002022-05-30 16:36:44 +0800596 #define TM_REG_MAX_ID 20
597 static struct reg_band reg_backup_list[TM_REG_MAX_ID];
598
developerc6f56bb2022-06-14 18:36:30 +0800599+static void mt7915_tm_update_entry(struct mt7915_phy *phy);
600+
developer4c6b6002022-05-30 16:36:44 +0800601+static u8 mt7915_tm_chan_bw(enum nl80211_chan_width width)
602+{
603+ static const u8 width_to_bw[] = {
604+ [NL80211_CHAN_WIDTH_40] = TM_CBW_40MHZ,
605+ [NL80211_CHAN_WIDTH_80] = TM_CBW_80MHZ,
606+ [NL80211_CHAN_WIDTH_80P80] = TM_CBW_8080MHZ,
607+ [NL80211_CHAN_WIDTH_160] = TM_CBW_160MHZ,
608+ [NL80211_CHAN_WIDTH_5] = TM_CBW_5MHZ,
609+ [NL80211_CHAN_WIDTH_10] = TM_CBW_10MHZ,
610+ [NL80211_CHAN_WIDTH_20] = TM_CBW_20MHZ,
611+ [NL80211_CHAN_WIDTH_20_NOHT] = TM_CBW_20MHZ,
612+ };
613+
614+ if (width >= ARRAY_SIZE(width_to_bw))
615+ return 0;
616+
617+ return width_to_bw[width];
618+}
619+
620+static void
621+mt7915_tm_update_channel(struct mt7915_phy *phy)
622+{
623+ mutex_unlock(&phy->dev->mt76.mutex);
624+ mt7915_set_channel(phy);
625+ mutex_lock(&phy->dev->mt76.mutex);
626+
627+ mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
developerc6f56bb2022-06-14 18:36:30 +0800628+
629+ mt7915_tm_update_entry(phy);
developer4c6b6002022-05-30 16:36:44 +0800630+}
631
632 static int
633 mt7915_tm_set_tx_power(struct mt7915_phy *phy)
developerc6f56bb2022-06-14 18:36:30 +0800634@@ -119,18 +157,28 @@ mt7915_tm_set_trx(struct mt7915_phy *phy, int type, bool en)
developer4c6b6002022-05-30 16:36:44 +0800635 }
636
637 static int
638-mt7915_tm_clean_hwq(struct mt7915_phy *phy, u8 wcid)
639+mt7915_tm_clean_hwq(struct mt7915_phy *phy)
640 {
641+ struct mt76_testmode_entry_data *ed;
642+ struct mt76_wcid *wcid;
643 struct mt7915_dev *dev = phy->dev;
644 struct mt7915_tm_cmd req = {
645 .testmode_en = 1,
646 .param_idx = MCU_ATE_CLEAN_TXQUEUE,
647- .param.clean.wcid = wcid,
developereb6a0182022-12-12 18:53:32 +0800648 .param.clean.band = phy->mt76->band_idx,
developer4c6b6002022-05-30 16:36:44 +0800649 };
650
651- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
652- sizeof(req), false);
653+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
654+ int ret;
655+
656+ req.param.clean.wcid = wcid->idx;
657+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL),
658+ &req, sizeof(req), false);
659+ if (ret)
660+ return ret;
661+ }
662+
663+ return 0;
664 }
665
666 static int
developereb6a0182022-12-12 18:53:32 +0800667@@ -141,7 +189,7 @@ mt7915_tm_set_phy_count(struct mt7915_phy *phy, u8 control)
668 .testmode_en = 1,
669 .param_idx = MCU_ATE_SET_PHY_COUNT,
670 .param.cfg.enable = control,
671- .param.cfg.band = phy != &dev->phy,
672+ .param.cfg.band = phy->mt76->band_idx,
673 };
674
675 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
developerf64861f2022-06-22 11:44:53 +0800676@@ -182,12 +230,738 @@ mt7915_tm_set_tam_arb(struct mt7915_phy *phy, bool enable, bool mu)
developer4c6b6002022-05-30 16:36:44 +0800677 return mt7915_mcu_set_muru_ctrl(dev, MURU_SET_ARB_OP_MODE, op_mode);
678 }
679
680+static int
681+mt7915_tm_set_cfg(struct mt7915_phy *phy)
682+{
683+ static const u8 cfg_cmd[] = {
684+ [MT76_TM_CFG_TSSI] = MCU_ATE_SET_TSSI,
685+ [MT76_TM_CFG_DPD] = MCU_ATE_SET_DPD,
686+ [MT76_TM_CFG_RATE_POWER_OFFSET] = MCU_ATE_SET_RATE_POWER_OFFSET,
687+ [MT76_TM_CFG_THERMAL_COMP] = MCU_ATE_SET_THERMAL_COMP,
688+ };
689+ struct mt76_testmode_data *td = &phy->mt76->test;
690+ struct mt7915_dev *dev = phy->dev;
691+ struct mt7915_tm_cmd req = {
692+ .testmode_en = !(phy->mt76->test.state == MT76_TM_STATE_OFF),
693+ .param_idx = cfg_cmd[td->cfg.type],
694+ .param.cfg.enable = td->cfg.enable,
developereb6a0182022-12-12 18:53:32 +0800695+ .param.cfg.band = phy->mt76->band_idx,
developer4c6b6002022-05-30 16:36:44 +0800696+ };
697+
698+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
699+ sizeof(req), false);
700+}
701+
702+static int
703+mt7915_tm_add_txbf(struct mt7915_phy *phy, struct ieee80211_vif *vif,
704+ struct ieee80211_sta *sta, u8 pfmu_idx, u8 nr,
705+ u8 nc, bool ebf)
706+{
707+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
708+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
709+ struct mt7915_dev *dev = phy->dev;
710+ struct sk_buff *skb;
711+ struct sta_rec_bf *bf;
712+ struct tlv *tlv;
713+ u8 ndp_rate;
714+
715+ if (nr == 1)
716+ ndp_rate = 8;
717+ else if (nr == 2)
718+ ndp_rate = 16;
719+ else
720+ ndp_rate = 24;
721+
722+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
723+ &msta->wcid);
724+ if (IS_ERR(skb))
725+ return PTR_ERR(skb);
726+
727+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
728+ bf = (struct sta_rec_bf *)tlv;
729+
730+ bf->pfmu = cpu_to_le16(pfmu_idx);
731+ bf->sounding_phy = 1;
732+ bf->bf_cap = ebf;
733+ bf->ncol = nc;
734+ bf->nrow = nr;
735+ bf->ndp_rate = ndp_rate;
736+ bf->ibf_timeout = 0xff;
737+ bf->tx_mode = MT_PHY_TYPE_HT;
738+
739+ if (ebf) {
740+ bf->mem[0].row = 0;
741+ bf->mem[1].row = 1;
742+ bf->mem[2].row = 2;
743+ bf->mem[3].row = 3;
744+ } else {
745+ bf->mem[0].row = 4;
746+ bf->mem[1].row = 5;
747+ bf->mem[2].row = 6;
748+ bf->mem[3].row = 7;
749+ }
750+
751+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
752+ MCU_EXT_CMD(STA_REC_UPDATE), true);
753+}
754+
755+static int
756+mt7915_tm_entry_add(struct mt7915_phy *phy, u8 aid)
757+{
758+ struct mt76_testmode_data *td = &phy->mt76->test;
759+ struct mt76_testmode_entry_data *ed;
760+ struct ieee80211_sband_iftype_data *sdata;
761+ struct ieee80211_supported_band *sband;
762+ struct ieee80211_sta *sta;
763+ struct mt7915_sta *msta;
764+ int tid, ret;
765+
766+ if (td->entry_num >= MT76_TM_MAX_ENTRY_NUM)
767+ return -EINVAL;
768+
769+ sta = kzalloc(sizeof(*sta) + phy->mt76->hw->sta_data_size +
770+ sizeof(*ed), GFP_KERNEL);
771+ if (!sta)
772+ return -ENOMEM;
773+
774+ msta = (struct mt7915_sta *)sta->drv_priv;
775+ ed = mt76_testmode_entry_data(phy->mt76, &msta->wcid);
776+ memcpy(ed, &td->ed, sizeof(*ed));
777+
778+ if (phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ) {
779+ sband = &phy->mt76->sband_5g.sband;
780+ sdata = phy->iftype[NL80211_BAND_5GHZ];
781+ } else if (phy->mt76->chandef.chan->band == NL80211_BAND_6GHZ) {
782+ sband = &phy->mt76->sband_6g.sband;
783+ sdata = phy->iftype[NL80211_BAND_6GHZ];
784+ } else {
785+ sband = &phy->mt76->sband_2g.sband;
786+ sdata = phy->iftype[NL80211_BAND_2GHZ];
787+ }
788+
789+ memcpy(sta->addr, ed->addr[0], ETH_ALEN);
790+ if (phy->test.bf_en) {
791+ u8 addr[ETH_ALEN] = {0x00, 0x11, 0x11, 0x11, 0x11, 0x11};
792+
793+ memcpy(sta->addr, addr, ETH_ALEN);
794+ }
795+
796+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_HT)
developereb6a0182022-12-12 18:53:32 +0800797+ memcpy(&sta->deflink.ht_cap, &sband->ht_cap, sizeof(sta->deflink.ht_cap));
developer4c6b6002022-05-30 16:36:44 +0800798+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_VHT)
developereb6a0182022-12-12 18:53:32 +0800799+ memcpy(&sta->deflink.vht_cap, &sband->vht_cap, sizeof(sta->deflink.vht_cap));
developer4c6b6002022-05-30 16:36:44 +0800800+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_HE_SU)
developereb6a0182022-12-12 18:53:32 +0800801+ memcpy(&sta->deflink.he_cap, &sdata[NL80211_IFTYPE_STATION].he_cap,
802+ sizeof(sta->deflink.he_cap));
developer4c6b6002022-05-30 16:36:44 +0800803+ sta->aid = aid;
804+ sta->wme = 1;
805+
806+ ret = mt7915_mac_sta_add(&phy->dev->mt76, phy->monitor_vif, sta);
807+ if (ret) {
808+ kfree(sta);
809+ return ret;
810+ }
811+
812+ /* prevent from starting tx ba session */
813+ for (tid = 0; tid < 8; tid++)
814+ set_bit(tid, &msta->ampdu_state);
815+
816+ list_add_tail(&msta->wcid.list, &td->tm_entry_list);
817+ td->entry_num++;
818+
819+ return 0;
820+}
821+
822+static void
823+mt7915_tm_entry_remove(struct mt7915_phy *phy, u8 aid)
824+{
825+ struct mt76_testmode_data *td = &phy->mt76->test;
826+ struct mt76_wcid *wcid, *tmp;
827+
828+ if (list_empty(&td->tm_entry_list))
829+ return;
830+
831+ list_for_each_entry_safe(wcid, tmp, &td->tm_entry_list, list) {
developerc6f56bb2022-06-14 18:36:30 +0800832+ struct mt76_testmode_entry_data *ed;
developer4c6b6002022-05-30 16:36:44 +0800833+ struct mt7915_dev *dev = phy->dev;
developerc6f56bb2022-06-14 18:36:30 +0800834+ struct ieee80211_sta *sta;
developer4c6b6002022-05-30 16:36:44 +0800835+
developerc6f56bb2022-06-14 18:36:30 +0800836+ ed = mt76_testmode_entry_data(phy->mt76, wcid);
837+ if (aid && ed->aid != aid)
838+ continue;
839+
840+ sta = wcid_to_sta(wcid);
developer4c6b6002022-05-30 16:36:44 +0800841+ mt7915_mac_sta_remove(&dev->mt76, phy->monitor_vif, sta);
842+ mt76_wcid_mask_clear(dev->mt76.wcid_mask, wcid->idx);
843+
844+ list_del_init(&wcid->list);
845+ kfree(sta);
846+ phy->mt76->test.entry_num--;
847+ }
848+}
849+
850+static int
851+mt7915_tm_set_entry(struct mt7915_phy *phy)
852+{
853+ struct mt76_testmode_data *td = &phy->mt76->test;
854+ struct mt76_testmode_entry_data *ed;
855+ struct mt76_wcid *wcid;
856+
857+ if (!td->aid) {
858+ if (td->state > MT76_TM_STATE_IDLE)
859+ mt76_testmode_set_state(phy->mt76, MT76_TM_STATE_IDLE);
860+ mt7915_tm_entry_remove(phy, td->aid);
861+ return 0;
862+ }
863+
864+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
865+ if (ed->aid == td->aid) {
866+ struct sk_buff *skb;
867+
868+ local_bh_disable();
869+ skb = ed->tx_skb;
870+ memcpy(ed, &td->ed, sizeof(*ed));
871+ ed->tx_skb = skb;
872+ local_bh_enable();
873+
874+ return 0;
875+ }
876+ }
877+
878+ return mt7915_tm_entry_add(phy, td->aid);
879+}
880+
developerc6f56bb2022-06-14 18:36:30 +0800881+static void
882+mt7915_tm_update_entry(struct mt7915_phy *phy)
883+{
884+ struct mt76_testmode_data *td = &phy->mt76->test;
885+ struct mt76_testmode_entry_data *ed, tmp;
886+ struct mt76_wcid *wcid, *last;
887+
888+ if (!td->aid || phy->test.bf_en)
889+ return;
890+
891+ memcpy(&tmp, &td->ed, sizeof(tmp));
892+ last = list_last_entry(&td->tm_entry_list,
893+ struct mt76_wcid, list);
894+
895+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
896+ memcpy(&td->ed, ed, sizeof(td->ed));
897+ mt7915_tm_entry_remove(phy, td->aid);
898+ mt7915_tm_entry_add(phy, td->aid);
899+ if (wcid == last)
900+ break;
901+ }
902+
903+ memcpy(&td->ed, &tmp, sizeof(td->ed));
904+}
905+
developer4c6b6002022-05-30 16:36:44 +0800906+static int
907+mt7915_tm_txbf_init(struct mt7915_phy *phy, u16 *val)
908+{
909+ struct mt76_testmode_data *td = &phy->mt76->test;
910+ struct mt7915_dev *dev = phy->dev;
911+ bool enable = val[0];
912+ void *phase_cal, *pfmu_data, *pfmu_tag;
913+ u8 addr[ETH_ALEN] = {0x00, 0x22, 0x22, 0x22, 0x22, 0x22};
914+
915+ if (!enable) {
916+ phy->test.bf_en = 0;
917+ return 0;
918+ }
919+
920+ if (!dev->test.txbf_phase_cal) {
921+ phase_cal = devm_kzalloc(dev->mt76.dev,
922+ sizeof(struct mt7915_tm_txbf_phase) *
923+ MAX_PHASE_GROUP_NUM,
924+ GFP_KERNEL);
925+ if (!phase_cal)
926+ return -ENOMEM;
927+
928+ dev->test.txbf_phase_cal = phase_cal;
929+ }
930+
931+ if (!dev->test.txbf_pfmu_data) {
932+ pfmu_data = devm_kzalloc(dev->mt76.dev, 512, GFP_KERNEL);
933+ if (!pfmu_data)
934+ return -ENOMEM;
935+
936+ dev->test.txbf_pfmu_data = pfmu_data;
937+ }
938+
939+ if (!dev->test.txbf_pfmu_tag) {
940+ pfmu_tag = devm_kzalloc(dev->mt76.dev,
941+ sizeof(struct mt7915_tm_pfmu_tag), GFP_KERNEL);
942+ if (!pfmu_tag)
943+ return -ENOMEM;
944+
945+ dev->test.txbf_pfmu_tag = pfmu_tag;
946+ }
947+
948+ memcpy(phy->monitor_vif->addr, addr, ETH_ALEN);
949+ mt7915_mcu_add_dev_info(phy, phy->monitor_vif, true);
950+
951+ td->tx_rate_mode = MT76_TM_TX_MODE_HT;
952+ td->tx_mpdu_len = 1024;
953+ td->tx_rate_sgi = 0;
954+ td->tx_ipg = 100;
955+ phy->test.bf_en = 1;
956+
957+ return mt7915_tm_set_trx(phy, TM_MAC_TX, true);
958+}
959+
960+static int
961+mt7915_tm_txbf_phase_comp(struct mt7915_phy *phy, u16 *val)
962+{
963+ struct mt7915_dev *dev = phy->dev;
964+ struct {
965+ u8 category;
966+ u8 wlan_idx_lo;
967+ u8 bw;
968+ u8 jp_band;
969+ u8 dbdc_idx;
970+ bool read_from_e2p;
971+ bool disable;
972+ u8 wlan_idx_hi;
973+ u8 buf[40];
974+ } __packed req = {
975+ .category = MT_BF_IBF_PHASE_COMP,
976+ .bw = val[0],
977+ .jp_band = (val[2] == 1) ? 1 : 0,
developereb6a0182022-12-12 18:53:32 +0800978+ .dbdc_idx = phy->mt76->band_idx,
developer4c6b6002022-05-30 16:36:44 +0800979+ .read_from_e2p = val[3],
980+ .disable = val[4],
981+ };
982+ struct mt7915_tm_txbf_phase *phase =
983+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
984+
985+ wait_event_timeout(dev->mt76.tx_wait, phase[val[2]].status != 0, HZ);
986+ memcpy(req.buf, &phase[val[2]].phase, sizeof(req.buf));
987+
988+ pr_info("ibf cal process: phase comp info\n");
989+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1,
990+ &req, sizeof(req), 0);
991+
992+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
993+ sizeof(req), true);
994+}
995+
996+static int
997+mt7915_tm_txbf_profile_tag_read(struct mt7915_phy *phy, u8 pfmu_idx)
998+{
999+ struct mt7915_dev *dev = phy->dev;
1000+ struct {
1001+ u8 format_id;
1002+ u8 pfmu_idx;
1003+ bool bfer;
1004+ u8 dbdc_idx;
1005+ } __packed req = {
1006+ .format_id = MT_BF_PFMU_TAG_READ,
1007+ .pfmu_idx = pfmu_idx,
1008+ .bfer = 1,
1009+ .dbdc_idx = phy != &dev->phy,
1010+ };
1011+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
1012+
1013+ tag->t1.pfmu_idx = 0;
1014+
1015+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1016+ sizeof(req), true);
1017+}
1018+
1019+static int
1020+mt7915_tm_txbf_profile_tag_write(struct mt7915_phy *phy, u8 pfmu_idx,
1021+ struct mt7915_tm_pfmu_tag *tag)
1022+{
1023+ struct mt7915_dev *dev = phy->dev;
1024+ struct {
1025+ u8 format_id;
1026+ u8 pfmu_idx;
1027+ bool bfer;
1028+ u8 dbdc_idx;
1029+ u8 buf[64];
1030+ } __packed req = {
1031+ .format_id = MT_BF_PFMU_TAG_WRITE,
1032+ .pfmu_idx = pfmu_idx,
1033+ .bfer = 1,
1034+ .dbdc_idx = phy != &dev->phy,
1035+ };
1036+
1037+ memcpy(req.buf, tag, sizeof(*tag));
1038+ wait_event_timeout(dev->mt76.tx_wait, tag->t1.pfmu_idx != 0, HZ);
1039+
1040+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1041+ sizeof(req), false);
1042+}
1043+
1044+static int
1045+mt7915_tm_txbf_apply_tx(struct mt7915_phy *phy, u16 wlan_idx, bool ebf,
1046+ bool ibf, bool phase_cal)
1047+{
1048+#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
1049+#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
1050+ struct mt7915_dev *dev = phy->dev;
1051+ struct {
1052+ u8 category;
1053+ u8 wlan_idx_lo;
1054+ bool ebf;
1055+ bool ibf;
1056+ bool mu_txbf;
1057+ bool phase_cal;
1058+ u8 wlan_idx_hi;
1059+ u8 _rsv;
1060+ } __packed req = {
1061+ .category = MT_BF_DATA_PACKET_APPLY,
1062+ .wlan_idx_lo = to_wcid_lo(wlan_idx),
1063+ .ebf = ebf,
1064+ .ibf = ibf,
1065+ .phase_cal = phase_cal,
1066+ .wlan_idx_hi = to_wcid_hi(wlan_idx),
1067+ };
1068+
1069+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1070+ sizeof(req), false);
1071+}
1072+
1073+static int mt7915_tm_txbf_set_rate(struct mt7915_phy *phy,
1074+ struct mt76_wcid *wcid)
1075+{
1076+ struct mt7915_dev *dev = phy->dev;
1077+ struct mt76_testmode_entry_data *ed = mt76_testmode_entry_data(phy->mt76, wcid);
1078+ struct ieee80211_sta *sta = wcid_to_sta(wcid);
1079+ struct sta_phy rate = {};
1080+
1081+ if (!sta)
1082+ return 0;
1083+
1084+ rate.type = MT_PHY_TYPE_HT;
1085+ rate.bw = mt7915_tm_chan_bw(phy->mt76->chandef.width);
1086+ rate.nss = ed->tx_rate_nss;
1087+ rate.mcs = ed->tx_rate_idx;
1088+ rate.ldpc = (rate.bw || ed->tx_rate_ldpc) * GENMASK(2, 0);
1089+
1090+ return mt7915_mcu_set_fixed_rate_ctrl(dev, phy->monitor_vif, sta,
1091+ &rate, RATE_PARAM_FIXED);
1092+}
1093+
1094+static int
1095+mt7915_tm_txbf_set_tx(struct mt7915_phy *phy, u16 *val)
1096+{
1097+ bool bf_on = val[0], update = val[3];
1098+ /* u16 wlan_idx = val[2]; */
1099+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
1100+ struct mt76_testmode_data *td = &phy->mt76->test;
1101+ struct mt76_wcid *wcid;
1102+
1103+ if (bf_on) {
1104+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
1105+ mt7915_tm_txbf_profile_tag_read(phy, 2);
1106+ tag->t1.invalid_prof = false;
1107+ mt7915_tm_txbf_profile_tag_write(phy, 2, tag);
1108+
1109+ phy->test.bf_ever_en = true;
1110+
1111+ if (update)
1112+ mt7915_tm_txbf_apply_tx(phy, 1, 0, 1, 1);
1113+ } else {
1114+ if (!phy->test.bf_ever_en) {
1115+ if (update)
1116+ mt7915_tm_txbf_apply_tx(phy, 1, 0, 0, 0);
1117+ } else {
1118+ phy->test.bf_ever_en = false;
1119+
1120+ mt7915_tm_txbf_profile_tag_read(phy, 2);
1121+ tag->t1.invalid_prof = true;
1122+ mt7915_tm_txbf_profile_tag_write(phy, 2, tag);
1123+ }
1124+ }
1125+
1126+ wcid = list_first_entry(&td->tm_entry_list, struct mt76_wcid, list);
1127+ mt7915_tm_txbf_set_rate(phy, wcid);
1128+
1129+ return 0;
1130+}
1131+
1132+static int
1133+mt7915_tm_txbf_profile_update(struct mt7915_phy *phy, u16 *val, bool ebf)
1134+{
1135+ static const u8 mode_to_lm[] = {
1136+ [MT76_TM_TX_MODE_CCK] = 0,
1137+ [MT76_TM_TX_MODE_OFDM] = 0,
1138+ [MT76_TM_TX_MODE_HT] = 1,
1139+ [MT76_TM_TX_MODE_VHT] = 2,
1140+ [MT76_TM_TX_MODE_HE_SU] = 3,
1141+ [MT76_TM_TX_MODE_HE_EXT_SU] = 3,
1142+ [MT76_TM_TX_MODE_HE_TB] = 3,
1143+ [MT76_TM_TX_MODE_HE_MU] = 3,
1144+ };
1145+ struct mt76_testmode_data *td = &phy->mt76->test;
1146+ struct mt76_wcid *wcid;
1147+ struct ieee80211_vif *vif = phy->monitor_vif;
1148+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
1149+ u8 pfmu_idx = val[0], nc = val[2], nr;
1150+ int ret;
1151+
1152+ if (td->tx_antenna_mask == 3)
1153+ nr = 1;
1154+ else if (td->tx_antenna_mask == 7)
1155+ nr = 2;
1156+ else
1157+ nr = 3;
1158+
1159+ memset(tag, 0, sizeof(*tag));
1160+ tag->t1.pfmu_idx = pfmu_idx;
1161+ tag->t1.ebf = ebf;
1162+ tag->t1.nr = nr;
1163+ tag->t1.nc = nc;
1164+ tag->t1.invalid_prof = true;
1165+
1166+ tag->t1.snr_sts4 = 0xc0;
1167+ tag->t1.snr_sts5 = 0xff;
1168+ tag->t1.snr_sts6 = 0xff;
1169+ tag->t1.snr_sts7 = 0xff;
1170+
1171+ if (ebf) {
1172+ tag->t1.row_id1 = 0;
1173+ tag->t1.row_id2 = 1;
1174+ tag->t1.row_id3 = 2;
1175+ tag->t1.row_id4 = 3;
1176+ tag->t1.lm = mode_to_lm[MT76_TM_TX_MODE_HT];
1177+ } else {
1178+ tag->t1.row_id1 = 4;
1179+ tag->t1.row_id2 = 5;
1180+ tag->t1.row_id3 = 6;
1181+ tag->t1.row_id4 = 7;
1182+ tag->t1.lm = mode_to_lm[MT76_TM_TX_MODE_OFDM];
1183+
1184+ tag->t2.ibf_timeout = 0xff;
1185+ tag->t2.ibf_nr = nr;
1186+ }
1187+
1188+ ret = mt7915_tm_txbf_profile_tag_write(phy, pfmu_idx, tag);
1189+ if (ret)
1190+ return ret;
1191+
1192+ wcid = list_first_entry(&td->tm_entry_list, struct mt76_wcid, list);
1193+ ret = mt7915_tm_add_txbf(phy, vif, wcid_to_sta(wcid), pfmu_idx, nr, nc, ebf);
1194+ if (ret)
1195+ return ret;
1196+
1197+ if (!ebf)
1198+ return mt7915_tm_txbf_apply_tx(phy, 1, false, true, true);
1199+
1200+ return 0;
1201+}
1202+
1203+static int
1204+mt7915_tm_txbf_phase_cal(struct mt7915_phy *phy, u16 *val)
1205+{
1206+#define GROUP_L 0
1207+#define GROUP_M 1
1208+#define GROUP_H 2
1209+ struct mt7915_dev *dev = phy->dev;
1210+ struct {
1211+ u8 category;
1212+ u8 group_l_m_n;
1213+ u8 group;
1214+ bool sx2;
1215+ u8 cal_type;
1216+ u8 lna_gain_level;
1217+ u8 _rsv[2];
1218+ } __packed req = {
1219+ .category = MT_BF_PHASE_CAL,
1220+ .group = val[0],
1221+ .group_l_m_n = val[1],
1222+ .sx2 = val[2],
1223+ .cal_type = val[3],
1224+ .lna_gain_level = 0, /* for test purpose */
1225+ };
1226+ struct mt7915_tm_txbf_phase *phase =
1227+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1228+
1229+ phase[req.group].status = 0;
1230+
1231+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1232+ sizeof(req), true);
1233+}
1234+
1235+int mt7915_tm_txbf_status_read(struct mt7915_dev *dev, struct sk_buff *skb)
1236+{
1237+#define BF_PFMU_TAG 16
1238+#define BF_CAL_PHASE 21
1239+ u8 format_id;
1240+
developerf64861f2022-06-22 11:44:53 +08001241+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
developer4c6b6002022-05-30 16:36:44 +08001242+ format_id = *(u8 *)skb->data;
1243+
1244+ if (format_id == BF_PFMU_TAG) {
1245+ struct mt7915_tm_pfmu_tag *tag = dev->test.txbf_pfmu_tag;
1246+
1247+ skb_pull(skb, 8);
1248+ memcpy(tag, skb->data, sizeof(struct mt7915_tm_pfmu_tag));
1249+ } else if (format_id == BF_CAL_PHASE) {
1250+ struct mt7915_tm_ibf_cal_info *cal;
1251+ struct mt7915_tm_txbf_phase *phase =
1252+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1253+
1254+ cal = (struct mt7915_tm_ibf_cal_info *)skb->data;
1255+ switch (cal->cal_type) {
1256+ case IBF_PHASE_CAL_NORMAL:
1257+ case IBF_PHASE_CAL_NORMAL_INSTRUMENT:
1258+ if (cal->group_l_m_n != GROUP_M)
1259+ break;
1260+ phase = &phase[cal->group];
1261+ memcpy(&phase->phase, cal->buf + 16, sizeof(phase->phase));
1262+ phase->status = cal->status;
1263+ break;
1264+ case IBF_PHASE_CAL_VERIFY:
1265+ case IBF_PHASE_CAL_VERIFY_INSTRUMENT:
1266+ break;
1267+ default:
1268+ break;
1269+ }
1270+ }
1271+
1272+ wake_up(&dev->mt76.tx_wait);
1273+
1274+ return 0;
1275+}
1276+
1277+static int
1278+mt7915_tm_txbf_profile_update_all(struct mt7915_phy *phy, u16 *val)
1279+{
1280+ struct mt76_testmode_data *td = &phy->mt76->test;
1281+ u16 pfmu_idx = val[0];
1282+ u16 subc_id = val[1];
1283+ u16 angle11 = val[2];
1284+ u16 angle21 = val[3];
1285+ u16 angle31 = val[4];
1286+ u16 angle41 = val[5];
1287+ s16 phi11 = 0, phi21 = 0, phi31 = 0;
1288+ struct mt7915_tm_pfmu_data *pfmu_data;
1289+
1290+ if (subc_id > 63)
1291+ return -EINVAL;
1292+
1293+ if (td->tx_antenna_mask == 2) {
1294+ phi11 = (s16)(angle21 - angle11);
1295+ } else if (td->tx_antenna_mask == 3) {
1296+ phi11 = (s16)(angle31 - angle11);
1297+ phi21 = (s16)(angle31 - angle21);
1298+ } else {
1299+ phi11 = (s16)(angle41 - angle11);
1300+ phi21 = (s16)(angle41 - angle21);
1301+ phi31 = (s16)(angle41 - angle31);
1302+ }
1303+
1304+ pfmu_data = (struct mt7915_tm_pfmu_data *)phy->dev->test.txbf_pfmu_data;
1305+ pfmu_data = &pfmu_data[subc_id];
1306+
1307+ if (subc_id < 32)
1308+ pfmu_data->subc_idx = cpu_to_le16(subc_id + 224);
1309+ else
1310+ pfmu_data->subc_idx = cpu_to_le16(subc_id - 32);
1311+ pfmu_data->phi11 = cpu_to_le16(phi11);
1312+ pfmu_data->phi21 = cpu_to_le16(phi21);
1313+ pfmu_data->phi31 = cpu_to_le16(phi31);
1314+
1315+ if (subc_id == 63) {
1316+ struct mt7915_dev *dev = phy->dev;
1317+ struct {
1318+ u8 format_id;
1319+ u8 pfmu_idx;
1320+ u8 dbdc_idx;
1321+ u8 _rsv;
1322+ u8 buf[512];
1323+ } __packed req = {
1324+ .format_id = MT_BF_PROFILE_WRITE_ALL,
1325+ .pfmu_idx = pfmu_idx,
1326+ .dbdc_idx = phy != &dev->phy,
1327+ };
1328+
1329+ memcpy(req.buf, dev->test.txbf_pfmu_data, 512);
1330+
1331+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION),
1332+ &req, sizeof(req), true);
1333+ }
1334+
1335+ return 0;
1336+}
1337+
1338+static int
1339+mt7915_tm_txbf_e2p_update(struct mt7915_phy *phy)
1340+{
1341+ struct mt7915_tm_txbf_phase *phase, *p;
1342+ struct mt7915_dev *dev = phy->dev;
1343+ u8 *eeprom = dev->mt76.eeprom.data;
1344+ u16 offset;
1345+ bool is_7976;
1346+ int i;
1347+
1348+ is_7976 = mt7915_check_adie(dev, false) || is_mt7916(&dev->mt76);
1349+ offset = is_7976 ? 0x60a : 0x651;
1350+
1351+ phase = (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1352+ for (i = 0; i < MAX_PHASE_GROUP_NUM; i++) {
1353+ p = &phase[i];
1354+
1355+ if (!p->status)
1356+ continue;
1357+
1358+ /* copy phase cal data to eeprom */
1359+ memcpy(eeprom + offset + i * sizeof(p->phase), &p->phase,
1360+ sizeof(p->phase));
1361+ }
1362+
1363+ return 0;
1364+}
1365+
1366+static int
1367+mt7915_tm_set_txbf(struct mt7915_phy *phy)
1368+{
1369+ struct mt76_testmode_data *td = &phy->mt76->test;
1370+ u16 *val = td->txbf_param;
1371+
1372+ pr_info("ibf cal process: act = %u, val = %u, %u, %u, %u, %u\n",
1373+ td->txbf_act, val[0], val[1], val[2], val[3], val[4]);
1374+
1375+ switch (td->txbf_act) {
1376+ case MT76_TM_TXBF_ACT_INIT:
1377+ return mt7915_tm_txbf_init(phy, val);
1378+ case MT76_TM_TXBF_ACT_UPDATE_CH:
1379+ mt7915_tm_update_channel(phy);
1380+ break;
1381+ case MT76_TM_TXBF_ACT_PHASE_COMP:
1382+ return mt7915_tm_txbf_phase_comp(phy, val);
1383+ case MT76_TM_TXBF_ACT_TX_PREP:
1384+ return mt7915_tm_txbf_set_tx(phy, val);
1385+ case MT76_TM_TXBF_ACT_IBF_PROF_UPDATE:
1386+ return mt7915_tm_txbf_profile_update(phy, val, false);
1387+ case MT76_TM_TXBF_ACT_EBF_PROF_UPDATE:
1388+ return mt7915_tm_txbf_profile_update(phy, val, true);
1389+ case MT76_TM_TXBF_ACT_PHASE_CAL:
1390+ return mt7915_tm_txbf_phase_cal(phy, val);
1391+ case MT76_TM_TXBF_ACT_PROF_UPDATE_ALL:
1392+ return mt7915_tm_txbf_profile_update_all(phy, val);
1393+ case MT76_TM_TXBF_ACT_E2P_UPDATE:
1394+ return mt7915_tm_txbf_e2p_update(phy);
1395+ default:
1396+ break;
1397+ };
1398+
1399+ return 0;
1400+}
1401+
1402 static int
developerf64861f2022-06-22 11:44:53 +08001403 mt7915_tm_set_wmm_qid(struct mt7915_phy *phy, u8 qid, u8 aifs, u8 cw_min,
developer4c6b6002022-05-30 16:36:44 +08001404- u16 cw_max, u16 txop)
1405+ u16 cw_max, u16 txop, u8 tx_cmd)
1406 {
developerf64861f2022-06-22 11:44:53 +08001407 struct mt7915_vif *mvif = (struct mt7915_vif *)phy->monitor_vif->drv_priv;
developer4c6b6002022-05-30 16:36:44 +08001408- struct mt7915_mcu_tx req = { .total = 1 };
1409+ struct mt7915_mcu_tx req = {
1410+ .valid = true,
1411+ .mode = tx_cmd,
1412+ .total = 1,
1413+ };
1414 struct edca *e = &req.edca[0];
1415
developerf64861f2022-06-22 11:44:53 +08001416 e->queue = qid + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS;
developereb6a0182022-12-12 18:53:32 +08001417@@ -263,7 +1037,8 @@ done:
developer4c6b6002022-05-30 16:36:44 +08001418
developerf64861f2022-06-22 11:44:53 +08001419 return mt7915_tm_set_wmm_qid(phy,
developer4c6b6002022-05-30 16:36:44 +08001420 mt76_connac_lmac_mapping(IEEE80211_AC_BE),
1421- aifsn, cw, cw, 0);
1422+ aifsn, cw, cw, 0,
1423+ mode == MT76_TM_TX_MODE_HE_MU);
1424 }
1425
1426 static int
developereb6a0182022-12-12 18:53:32 +08001427@@ -339,7 +1114,7 @@ mt7915_tm_set_tx_len(struct mt7915_phy *phy, u32 tx_time)
developer4c6b6002022-05-30 16:36:44 +08001428 bitrate = cfg80211_calculate_bitrate(&rate);
1429 tx_len = bitrate * tx_time / 10 / 8;
1430
1431- ret = mt76_testmode_alloc_skb(phy->mt76, tx_len);
1432+ ret = mt76_testmode_init_skb(phy->mt76, tx_len, &td->tx_skb, td->addr);
1433 if (ret)
1434 return ret;
1435
developereb6a0182022-12-12 18:53:32 +08001436@@ -458,64 +1233,227 @@ mt7915_tm_init(struct mt7915_phy *phy, bool en)
developer4c6b6002022-05-30 16:36:44 +08001437
1438 phy->mt76->test.flag |= MT_TM_FW_RX_COUNT;
1439
1440- if (!en)
1441+ if (!en) {
1442 mt7915_tm_set_tam_arb(phy, en, 0);
1443+
1444+ phy->mt76->test.aid = 0;
1445+ phy->mt76->test.tx_mpdu_len = 0;
1446+ phy->test.bf_en = 0;
1447+ mt7915_tm_set_entry(phy);
1448+ }
1449+}
1450+
1451+static bool
1452+mt7915_tm_check_skb(struct mt7915_phy *phy)
1453+{
1454+ struct mt76_testmode_entry_data *ed;
1455+ struct mt76_wcid *wcid;
1456+
1457+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
1458+ struct ieee80211_tx_info *info;
1459+
1460+ if (!ed->tx_skb)
1461+ return false;
1462+
1463+ info = IEEE80211_SKB_CB(ed->tx_skb);
1464+ info->control.vif = phy->monitor_vif;
1465+ }
1466+
1467+ return true;
1468+}
1469+
1470+static int
1471+mt7915_tm_set_ba(struct mt7915_phy *phy)
1472+{
1473+ struct mt7915_dev *dev = phy->dev;
1474+ struct mt76_testmode_data *td = &phy->mt76->test;
1475+ struct mt76_wcid *wcid;
1476+ struct ieee80211_vif *vif = phy->monitor_vif;
1477+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1478+ struct ieee80211_ampdu_params params = { .buf_size = 256 };
1479+
1480+ list_for_each_entry(wcid, &td->tm_entry_list, list) {
1481+ int tid, ret;
1482+
1483+ params.sta = wcid_to_sta(wcid);
1484+ for (tid = 0; tid < 8; tid++) {
1485+ params.tid = tid;
1486+ ret = mt7915_mcu_add_tx_ba(phy->dev, &params, true);
1487+ if (ret)
1488+ return ret;
1489+ }
1490+ }
1491+
1492+ mt76_wr(dev, MT_AGG_AALCR0(mvif->mt76.band_idx, mvif->mt76.wmm_idx),
1493+ 0x01010101);
1494+
1495+ return 0;
1496+}
1497+
1498+static int
1499+mt7915_tm_set_muru_cfg(struct mt7915_phy *phy, struct mt7915_tm_muru *muru)
1500+{
1501+/* #define MURU_SET_MANUAL_CFG 100 */
1502+ struct mt7915_dev *dev = phy->dev;
1503+ struct {
1504+ __le32 cmd;
1505+ struct mt7915_tm_muru muru;
1506+ } __packed req = {
1507+ .cmd = cpu_to_le32(MURU_SET_MANUAL_CFG),
1508+ };
1509+
1510+ memcpy(&req.muru, muru, sizeof(struct mt7915_tm_muru));
1511+
1512+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
1513+ sizeof(req), false);
1514+}
1515+
1516+static int
1517+mt7915_tm_set_muru_dl(struct mt7915_phy *phy)
1518+{
1519+ struct mt76_testmode_data *td = &phy->mt76->test;
1520+ struct mt76_testmode_entry_data *ed;
1521+ struct mt76_wcid *wcid;
1522+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
1523+ struct ieee80211_vif *vif = phy->monitor_vif;
1524+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1525+ struct mt7915_tm_muru muru = {};
1526+ struct mt7915_tm_muru_comm *comm = &muru.comm;
1527+ struct mt7915_tm_muru_dl *dl = &muru.dl;
1528+ int i;
1529+
1530+ comm->ppdu_format = MURU_PPDU_HE_MU;
1531+ comm->band = mvif->mt76.band_idx;
1532+ comm->wmm_idx = mvif->mt76.wmm_idx;
1533+ comm->spe_idx = phy->test.spe_idx;
1534+
1535+ dl->bw = mt7915_tm_chan_bw(chandef->width);
1536+ dl->gi = td->tx_rate_sgi;;
1537+ dl->ltf = td->tx_ltf;
1538+ dl->tx_mode = MT_PHY_TYPE_HE_MU;
1539+
1540+ for (i = 0; i < sizeof(dl->ru); i++)
1541+ dl->ru[i] = 0x71;
1542+
1543+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
1544+ struct mt7915_tm_muru_dl_usr *dl_usr = &dl->usr[dl->user_num];
1545+
1546+ dl_usr->wlan_idx = cpu_to_le16(wcid->idx);
1547+ dl_usr->ru_alloc_seg = ed->aid < 8 ? 0 : 1;
1548+ dl_usr->ru_idx = ed->ru_idx;
1549+ dl_usr->mcs = ed->tx_rate_idx;
1550+ dl_usr->nss = ed->tx_rate_nss - 1;
1551+ dl_usr->ldpc = ed->tx_rate_ldpc;
1552+ dl->ru[dl->user_num] = ed->ru_alloc;
1553+
1554+ dl->user_num++;
1555+ }
1556+
1557+ muru.cfg_comm = cpu_to_le32(MURU_COMM_SET);
1558+ muru.cfg_dl = cpu_to_le32(MURU_DL_SET);
1559+
1560+ return mt7915_tm_set_muru_cfg(phy, &muru);
1561+}
1562+
1563+static int
1564+mt7915_tm_set_muru_pkt_cnt(struct mt7915_phy *phy, bool enable, u32 tx_count)
1565+{
1566+#define MURU_SET_TX_PKT_CNT 105
1567+#define MURU_SET_TX_EN 106
1568+ struct mt7915_dev *dev = phy->dev;
1569+ struct {
1570+ __le32 cmd;
1571+ u8 band;
1572+ u8 enable;
1573+ u8 _rsv[2];
1574+ __le32 tx_count;
1575+ } __packed req = {
developereb6a0182022-12-12 18:53:32 +08001576+ .band = phy->mt76->band_idx,
developer4c6b6002022-05-30 16:36:44 +08001577+ .enable = enable,
1578+ .tx_count = enable ? cpu_to_le32(tx_count) : 0,
1579+ };
1580+ int ret;
1581+
1582+ req.cmd = enable ? cpu_to_le32(MURU_SET_TX_PKT_CNT) :
1583+ cpu_to_le32(MURU_SET_TX_EN);
1584+
1585+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
1586+ sizeof(req), false);
1587+ if (ret)
1588+ return ret;
1589+
1590+ req.cmd = enable ? cpu_to_le32(MURU_SET_TX_EN) :
1591+ cpu_to_le32(MURU_SET_TX_PKT_CNT);
1592+
1593+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
1594+ sizeof(req), false);
1595 }
1596
1597 static void
1598-mt7915_tm_update_channel(struct mt7915_phy *phy)
1599+mt7915_tm_tx_frames_mu(struct mt7915_phy *phy, bool enable)
1600 {
1601- mutex_unlock(&phy->dev->mt76.mutex);
1602- mt7915_set_channel(phy);
1603- mutex_lock(&phy->dev->mt76.mutex);
1604+ struct mt76_testmode_data *td = &phy->mt76->test;
1605
1606- mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
1607+ if (enable) {
1608+ struct mt7915_dev *dev = phy->dev;
1609+
1610+ mt7915_tm_set_ba(phy);
1611+ mt7915_tm_set_muru_dl(phy);
1612+ mt76_rr(dev, MT_MIB_DR8(phy != &dev->phy));
1613+ } else {
1614+ /* set to zero for counting real tx free num */
1615+ td->tx_done = 0;
1616+ }
1617+
1618+ mt7915_tm_set_muru_pkt_cnt(phy, enable, td->tx_count);
1619+ usleep_range(100000, 200000);
1620 }
1621
1622 static void
developerd59e4772022-07-14 13:48:49 +08001623 mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
1624 {
developer4c6b6002022-05-30 16:36:44 +08001625 struct mt76_testmode_data *td = &phy->mt76->test;
1626- struct mt7915_dev *dev = phy->dev;
1627- struct ieee80211_tx_info *info;
1628- u8 duty_cycle = td->tx_duty_cycle;
1629- u32 tx_time = td->tx_time;
1630- u32 ipg = td->tx_ipg;
1631
1632 mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
1633- mt7915_tm_clean_hwq(phy, dev->mt76.global_wcid.idx);
1634+ mt7915_tm_set_trx(phy, TM_MAC_TX, false);
1635
1636 if (en) {
1637- mt7915_tm_update_channel(phy);
1638+ u32 tx_time = td->tx_time, ipg = td->tx_ipg;
1639+ u8 duty_cycle = td->tx_duty_cycle;
1640+
1641+ if (!phy->test.bf_en)
1642+ mt7915_tm_update_channel(phy);
1643
developerd59e4772022-07-14 13:48:49 +08001644 if (td->tx_spe_idx)
developer4c6b6002022-05-30 16:36:44 +08001645 phy->test.spe_idx = td->tx_spe_idx;
developerd59e4772022-07-14 13:48:49 +08001646 else
1647 phy->test.spe_idx = mt76_connac_spe_idx(td->tx_antenna_mask);
developer4c6b6002022-05-30 16:36:44 +08001648- }
1649
1650- mt7915_tm_set_tam_arb(phy, en,
1651- td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU);
1652+ /* if all three params are set, duty_cycle will be ignored */
1653+ if (duty_cycle && tx_time && !ipg) {
1654+ ipg = tx_time * 100 / duty_cycle - tx_time;
1655+ } else if (duty_cycle && !tx_time && ipg) {
1656+ if (duty_cycle < 100)
1657+ tx_time = duty_cycle * ipg / (100 - duty_cycle);
1658+ }
1659
1660- /* if all three params are set, duty_cycle will be ignored */
1661- if (duty_cycle && tx_time && !ipg) {
1662- ipg = tx_time * 100 / duty_cycle - tx_time;
1663- } else if (duty_cycle && !tx_time && ipg) {
1664- if (duty_cycle < 100)
1665- tx_time = duty_cycle * ipg / (100 - duty_cycle);
1666- }
1667+ mt7915_tm_set_ipg_params(phy, ipg, td->tx_rate_mode);
1668+ mt7915_tm_set_tx_len(phy, tx_time);
1669
1670- mt7915_tm_set_ipg_params(phy, ipg, td->tx_rate_mode);
1671- mt7915_tm_set_tx_len(phy, tx_time);
1672+ if (ipg)
1673+ td->tx_queued_limit = MT76_TM_TIMEOUT * 1000000 / ipg / 2;
1674
1675- if (ipg)
1676- td->tx_queued_limit = MT76_TM_TIMEOUT * 1000000 / ipg / 2;
1677+ if (!mt7915_tm_check_skb(phy))
1678+ return;
1679+ } else {
1680+ mt7915_tm_clean_hwq(phy);
1681+ }
1682
1683- if (!en || !td->tx_skb)
1684- return;
1685+ mt7915_tm_set_tam_arb(phy, en,
1686+ td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU);
1687
1688- info = IEEE80211_SKB_CB(td->tx_skb);
1689- info->control.vif = phy->monitor_vif;
1690+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
1691+ mt7915_tm_tx_frames_mu(phy, en);
1692
1693 mt7915_tm_set_trx(phy, TM_MAC_TX, en);
1694 }
developereb6a0182022-12-12 18:53:32 +08001695@@ -544,10 +1482,6 @@ mt7915_tm_get_rx_stats(struct mt7915_phy *phy, bool clear)
developer4c6b6002022-05-30 16:36:44 +08001696 return ret;
1697
1698 rs_band = (struct mt7915_tm_rx_stat_band *)skb->data;
1699- /* pr_info("mdrdy_cnt = %d\n", le32_to_cpu(rs_band->mdrdy_cnt)); */
1700- /* pr_info("fcs_err = %d\n", le16_to_cpu(rs_band->fcs_err)); */
1701- /* pr_info("len_mismatch = %d\n", le16_to_cpu(rs_band->len_mismatch)); */
1702- /* pr_info("fcs_ok = %d\n", le16_to_cpu(rs_band->fcs_succ)); */
1703
1704 if (!clear) {
developer1d9fede2022-08-29 15:24:07 +08001705 enum mt76_rxq_id q = req.band ? MT_RXQ_BAND1 : MT_RXQ_MAIN;
developereb6a0182022-12-12 18:53:32 +08001706@@ -562,13 +1496,61 @@ mt7915_tm_get_rx_stats(struct mt7915_phy *phy, bool clear)
developer4c6b6002022-05-30 16:36:44 +08001707 return 0;
1708 }
1709
1710+static int
1711+mt7915_tm_set_rx_user_idx(struct mt7915_phy *phy, u8 aid)
1712+{
1713+ struct mt7915_dev *dev = phy->dev;
1714+ struct mt76_wcid *wcid = NULL;
1715+ struct mt76_testmode_entry_data *ed;
1716+ struct {
1717+ u8 band;
1718+ u8 _rsv;
1719+ __le16 wlan_idx;
1720+ } __packed req = {
developereb6a0182022-12-12 18:53:32 +08001721+ .band = phy->mt76->band_idx,
developer4c6b6002022-05-30 16:36:44 +08001722+ };
1723+
1724+ mt76_tm_for_each_entry(phy->mt76, wcid, ed)
1725+ if (ed->aid == aid)
1726+ break;
1727+
1728+ if (!wcid)
1729+ return -EINVAL;
1730+
1731+ req.wlan_idx = cpu_to_le16(wcid->idx);
1732+
1733+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_STAT_USER_CTRL),
1734+ &req, sizeof(req), false);
1735+}
1736+
1737+static int
1738+mt7915_tm_set_muru_aid(struct mt7915_phy *phy, u16 aid)
1739+{
1740+ struct mt7915_dev *dev = phy->dev;
1741+ struct mt7915_tm_cmd req = {
1742+ .testmode_en = 1,
1743+ .param_idx = MCU_ATE_SET_MU_RX_AID,
developereb6a0182022-12-12 18:53:32 +08001744+ .param.rx_aid.band = cpu_to_le32(phy->mt76->band_idx),
developer4c6b6002022-05-30 16:36:44 +08001745+ .param.rx_aid.aid = cpu_to_le16(aid),
1746+ };
1747+
1748+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
1749+ sizeof(req), false);
1750+}
1751+
1752 static void
1753 mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
1754 {
1755+ struct mt76_testmode_data *td = &phy->mt76->test;
1756+
1757+ mt7915_tm_set_trx(phy, TM_MAC_TX, false);
1758 mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
1759
1760 if (en) {
1761- mt7915_tm_update_channel(phy);
1762+ if (!phy->test.bf_en)
1763+ mt7915_tm_update_channel(phy);
1764+ if (td->aid)
1765+ mt7915_tm_set_rx_user_idx(phy, td->aid);
1766
1767 /* read-clear */
1768 mt7915_tm_get_rx_stats(phy, true);
developereb6a0182022-12-12 18:53:32 +08001769@@ -576,9 +1558,12 @@ mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
developer4c6b6002022-05-30 16:36:44 +08001770 /* clear fw count */
1771 mt7915_tm_set_phy_count(phy, 0);
1772 mt7915_tm_set_phy_count(phy, 1);
1773-
1774- mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
1775 }
1776+
1777+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
1778+ mt7915_tm_set_muru_aid(phy, en ? td->aid : 0xf800);
1779+
1780+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
1781 }
1782
1783 static int
developereb6a0182022-12-12 18:53:32 +08001784@@ -617,34 +1602,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
developer4c6b6002022-05-30 16:36:44 +08001785 tx_cont->tx_ant = td->tx_antenna_mask;
developereb6a0182022-12-12 18:53:32 +08001786 tx_cont->band = band;
developer144824b2022-11-25 21:27:43 +08001787
developer4c6b6002022-05-30 16:36:44 +08001788- switch (chandef->width) {
1789- case NL80211_CHAN_WIDTH_40:
1790- tx_cont->bw = CMD_CBW_40MHZ;
1791- break;
1792- case NL80211_CHAN_WIDTH_80:
1793- tx_cont->bw = CMD_CBW_80MHZ;
1794- break;
1795- case NL80211_CHAN_WIDTH_80P80:
1796- tx_cont->bw = CMD_CBW_8080MHZ;
1797- break;
1798- case NL80211_CHAN_WIDTH_160:
1799- tx_cont->bw = CMD_CBW_160MHZ;
1800- break;
1801- case NL80211_CHAN_WIDTH_5:
1802- tx_cont->bw = CMD_CBW_5MHZ;
1803- break;
1804- case NL80211_CHAN_WIDTH_10:
1805- tx_cont->bw = CMD_CBW_10MHZ;
1806- break;
1807- case NL80211_CHAN_WIDTH_20:
1808- tx_cont->bw = CMD_CBW_20MHZ;
1809- break;
1810- case NL80211_CHAN_WIDTH_20_NOHT:
1811- tx_cont->bw = CMD_CBW_20MHZ;
1812- break;
1813- default:
1814- return -EINVAL;
1815- }
1816+ tx_cont->bw = mt7915_tm_chan_bw(chandef->width);
1817
1818 if (!en) {
developereb6a0182022-12-12 18:53:32 +08001819 req.op.rf.param.func_data = cpu_to_le32(band);
1820@@ -728,6 +1686,12 @@ mt7915_tm_update_params(struct mt7915_phy *phy, u32 changed)
developer4c6b6002022-05-30 16:36:44 +08001821 mt7915_tm_set_freq_offset(phy, en, en ? td->freq_offset : 0);
1822 if (changed & BIT(TM_CHANGED_TXPOWER))
1823 mt7915_tm_set_tx_power(phy);
1824+ if (changed & BIT(TM_CHANGED_AID))
1825+ mt7915_tm_set_entry(phy);
1826+ if (changed & BIT(TM_CHANGED_CFG))
1827+ mt7915_tm_set_cfg(phy);
1828+ if (changed & BIT(TM_CHANGED_TXBF_ACT))
1829+ mt7915_tm_set_txbf(phy);
1830 }
1831
1832 static int
developer287ee9f2023-03-02 20:13:34 +08001833@@ -737,6 +1701,11 @@ mt7915_tm_set_state(struct mt76_phy *mphy, enum mt76_testmode_state state)
1834 struct mt7915_phy *phy = mphy->priv;
1835 enum mt76_testmode_state prev_state = td->state;
1836
1837+ if (!phy->monitor_vif) {
1838+ dev_err(phy->dev->mt76.dev, "Please make sure monitor interface is up\n");
1839+ return -ENOTCONN;
1840+ }
1841+
1842 mphy->test.state = state;
1843
1844 if (prev_state == MT76_TM_STATE_TX_FRAMES ||
1845@@ -807,6 +1776,7 @@ static int
developer4c6b6002022-05-30 16:36:44 +08001846 mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
1847 {
1848 struct mt7915_phy *phy = mphy->priv;
1849+ struct mt7915_dev *dev = phy->dev;
1850 void *rx, *rssi;
1851 int i;
1852
developer287ee9f2023-03-02 20:13:34 +08001853@@ -852,11 +1822,68 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
developer4c6b6002022-05-30 16:36:44 +08001854
1855 nla_nest_end(msg, rx);
1856
1857+ if (mphy->test.tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
1858+ mphy->test.tx_done += mt76_rr(dev, MT_MIB_DR8(phy != &dev->phy));
1859+
1860 return mt7915_tm_get_rx_stats(phy, false);
1861 }
1862
1863+static int
1864+mt7915_tm_write_back_to_efuse(struct mt7915_dev *dev)
1865+{
1866+ struct mt7915_mcu_eeprom_info req = {};
1867+ u8 *eeprom = dev->mt76.eeprom.data;
1868+ int i, ret = -EINVAL;
1869+
1870+ /* prevent from damaging chip id in efuse */
1871+ if (mt76_chip(&dev->mt76) != get_unaligned_le16(eeprom))
1872+ goto out;
1873+
1874+ for (i = 0; i < mt7915_eeprom_size(dev); i += MT76_TM_EEPROM_BLOCK_SIZE) {
1875+ req.addr = cpu_to_le32(i);
1876+ memcpy(&req.data, eeprom + i, MT76_TM_EEPROM_BLOCK_SIZE);
1877+
1878+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_ACCESS),
1879+ &req, sizeof(req), true);
1880+ if (ret)
1881+ return ret;
1882+ }
1883+
1884+out:
1885+ return ret;
1886+}
1887+
1888+static int
1889+mt7915_tm_set_eeprom(struct mt76_phy *mphy, u32 offset, u8 *val, u8 action)
1890+{
1891+ struct mt7915_phy *phy = mphy->priv;
1892+ struct mt7915_dev *dev = phy->dev;
1893+ u8 *eeprom = dev->mt76.eeprom.data;
1894+ int ret = 0;
1895+
1896+ if (offset >= mt7915_eeprom_size(dev))
1897+ return -EINVAL;
1898+
1899+ switch (action) {
1900+ case MT76_TM_EEPROM_ACTION_UPDATE_DATA:
1901+ memcpy(eeprom + offset, val, MT76_TM_EEPROM_BLOCK_SIZE);
1902+ break;
1903+ case MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE:
1904+ ret = mt7915_mcu_set_eeprom(dev, true);
1905+ break;
1906+ case MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE:
1907+ ret = mt7915_tm_write_back_to_efuse(dev);
1908+ break;
1909+ default:
1910+ break;
1911+ }
1912+
1913+ return ret;
1914+}
1915+
1916 const struct mt76_testmode_ops mt7915_testmode_ops = {
1917 .set_state = mt7915_tm_set_state,
1918 .set_params = mt7915_tm_set_params,
1919 .dump_stats = mt7915_tm_dump_stats,
1920+ .set_eeprom = mt7915_tm_set_eeprom,
1921 };
1922diff --git a/mt7915/testmode.h b/mt7915/testmode.h
developer287ee9f2023-03-02 20:13:34 +08001923index a1c54c8..01b08e9 100644
developer4c6b6002022-05-30 16:36:44 +08001924--- a/mt7915/testmode.h
1925+++ b/mt7915/testmode.h
1926@@ -4,6 +4,8 @@
1927 #ifndef __MT7915_TESTMODE_H
1928 #define __MT7915_TESTMODE_H
1929
1930+#include "mcu.h"
1931+
1932 struct mt7915_tm_trx {
1933 u8 type;
1934 u8 enable;
1935@@ -39,6 +41,11 @@ struct mt7915_tm_cfg {
1936 u8 _rsv[2];
1937 };
1938
1939+struct mt7915_tm_mu_rx_aid {
1940+ __le32 band;
1941+ __le16 aid;
1942+};
1943+
1944 struct mt7915_tm_cmd {
1945 u8 testmode_en;
1946 u8 param_idx;
1947@@ -50,6 +57,7 @@ struct mt7915_tm_cmd {
1948 struct mt7915_tm_slot_time slot;
1949 struct mt7915_tm_clean_txq clean;
1950 struct mt7915_tm_cfg cfg;
1951+ struct mt7915_tm_mu_rx_aid rx_aid;
1952 u8 test[72];
1953 } param;
1954 } __packed;
1955@@ -109,6 +117,16 @@ enum {
1956 TAM_ARB_OP_MODE_FORCE_SU = 5,
1957 };
1958
1959+enum {
1960+ TM_CBW_20MHZ,
1961+ TM_CBW_40MHZ,
1962+ TM_CBW_80MHZ,
1963+ TM_CBW_10MHZ,
1964+ TM_CBW_5MHZ,
1965+ TM_CBW_160MHZ,
1966+ TM_CBW_8080MHZ,
1967+};
1968+
1969 struct mt7915_tm_rx_stat_band {
1970 u8 category;
1971
1972@@ -130,4 +148,264 @@ struct mt7915_tm_rx_stat_band {
1973 __le16 mdrdy_cnt_ofdm;
1974 };
1975
1976+struct mt7915_tm_muru_comm {
1977+ u8 ppdu_format;
1978+ u8 sch_type;
1979+ u8 band;
1980+ u8 wmm_idx;
1981+ u8 spe_idx;
1982+ u8 proc_type;
1983+};
1984+
1985+struct mt7915_tm_muru_dl_usr {
1986+ __le16 wlan_idx;
1987+ u8 ru_alloc_seg;
1988+ u8 ru_idx;
1989+ u8 ldpc;
1990+ u8 nss;
1991+ u8 mcs;
1992+ u8 mu_group_idx;
1993+ u8 vht_groud_id;
1994+ u8 vht_up;
1995+ u8 he_start_stream;
1996+ u8 he_mu_spatial;
1997+ u8 ack_policy;
1998+ __le16 tx_power_alpha;
1999+};
2000+
2001+struct mt7915_tm_muru_dl {
2002+ u8 user_num;
2003+ u8 tx_mode;
2004+ u8 bw;
2005+ u8 gi;
2006+ u8 ltf;
2007+ /* sigB */
2008+ u8 mcs;
2009+ u8 dcm;
2010+ u8 cmprs;
2011+
2012+ u8 tx_power;
2013+ u8 ru[8];
2014+ u8 c26[2];
2015+ u8 ack_policy;
2016+
2017+ struct mt7915_tm_muru_dl_usr usr[16];
2018+};
2019+
2020+struct mt7915_tm_muru_ul_usr {
2021+ __le16 wlan_idx;
2022+ u8 ru_alloc;
2023+ u8 ru_idx;
2024+ u8 ldpc;
2025+ u8 nss;
2026+ u8 mcs;
2027+ u8 target_rssi;
2028+ __le32 trig_pkt_size;
2029+};
2030+
2031+struct mt7915_tm_muru_ul {
2032+ u8 user_num;
2033+
2034+ /* UL TX */
2035+ u8 trig_type;
2036+ __le16 trig_cnt;
2037+ __le16 trig_intv;
2038+ u8 bw;
2039+ u8 gi_ltf;
2040+ __le16 ul_len;
2041+ u8 pad;
2042+ u8 trig_ta[ETH_ALEN];
2043+ u8 ru[8];
2044+ u8 c26[2];
2045+
2046+ struct mt7915_tm_muru_ul_usr usr[16];
2047+ /* HE TB RX Debug */
2048+ __le32 rx_hetb_nonsf_en_bitmap;
2049+ __le32 rx_hetb_cfg[2];
2050+
2051+ /* DL TX */
2052+ u8 ba_type;
2053+};
2054+
2055+struct mt7915_tm_muru {
2056+ __le32 cfg_comm;
2057+ __le32 cfg_dl;
2058+ __le32 cfg_ul;
2059+
2060+ struct mt7915_tm_muru_comm comm;
2061+ struct mt7915_tm_muru_dl dl;
2062+ struct mt7915_tm_muru_ul ul;
2063+};
2064+
2065+#define MURU_PPDU_HE_MU BIT(3)
2066+
2067+/* Common Config */
2068+/* #define MURU_COMM_PPDU_FMT BIT(0) */
2069+/* #define MURU_COMM_SCH_TYPE BIT(1) */
2070+/* #define MURU_COMM_BAND BIT(2) */
2071+/* #define MURU_COMM_WMM BIT(3) */
2072+/* #define MURU_COMM_SPE_IDX BIT(4) */
2073+/* #define MURU_COMM_PROC_TYPE BIT(5) */
2074+/* #define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_BAND | \ */
2075+/* MURU_COMM_WMM | MURU_COMM_SPE_IDX) */
2076+/* DL Config */
2077+#define MURU_DL_BW BIT(0)
2078+#define MURU_DL_GI BIT(1)
2079+#define MURU_DL_TX_MODE BIT(2)
2080+#define MURU_DL_TONE_PLAN BIT(3)
2081+#define MURU_DL_USER_CNT BIT(4)
2082+#define MURU_DL_LTF BIT(5)
2083+#define MURU_DL_SIGB_MCS BIT(6)
2084+#define MURU_DL_SIGB_DCM BIT(7)
2085+#define MURU_DL_SIGB_CMPRS BIT(8)
2086+#define MURU_DL_ACK_POLICY BIT(9)
2087+#define MURU_DL_TXPOWER BIT(10)
2088+/* DL Per User Config */
2089+#define MURU_DL_USER_WLAN_ID BIT(16)
2090+#define MURU_DL_USER_COD BIT(17)
2091+#define MURU_DL_USER_MCS BIT(18)
2092+#define MURU_DL_USER_NSS BIT(19)
2093+#define MURU_DL_USER_RU_ALLOC BIT(20)
2094+#define MURU_DL_USER_MUMIMO_GRP BIT(21)
2095+#define MURU_DL_USER_MUMIMO_VHT BIT(22)
2096+#define MURU_DL_USER_ACK_POLICY BIT(23)
2097+#define MURU_DL_USER_MUMIMO_HE BIT(24)
2098+#define MURU_DL_USER_PWR_ALPHA BIT(25)
2099+#define MURU_DL_SET (GENMASK(7, 0) | GENMASK(20, 16) | BIT(25))
2100+
2101+#define MAX_PHASE_GROUP_NUM 9
2102+
2103+struct mt7915_tm_txbf_phase {
2104+ u8 status;
2105+ struct {
2106+ u8 r0_uh;
2107+ u8 r0_h;
2108+ u8 r0_m;
2109+ u8 r0_l;
2110+ u8 r0_ul;
2111+ u8 r1_uh;
2112+ u8 r1_h;
2113+ u8 r1_m;
2114+ u8 r1_l;
2115+ u8 r1_ul;
2116+ u8 r2_uh;
2117+ u8 r2_h;
2118+ u8 r2_m;
2119+ u8 r2_l;
2120+ u8 r2_ul;
2121+ u8 r3_uh;
2122+ u8 r3_h;
2123+ u8 r3_m;
2124+ u8 r3_l;
2125+ u8 r3_ul;
2126+ u8 r2_uh_sx2;
2127+ u8 r2_h_sx2;
2128+ u8 r2_m_sx2;
2129+ u8 r2_l_sx2;
2130+ u8 r2_ul_sx2;
2131+ u8 r3_uh_sx2;
2132+ u8 r3_h_sx2;
2133+ u8 r3_m_sx2;
2134+ u8 r3_l_sx2;
2135+ u8 r3_ul_sx2;
2136+ u8 m_t0_h;
2137+ u8 m_t1_h;
2138+ u8 m_t2_h;
2139+ u8 m_t2_h_sx2;
2140+ u8 r0_reserved;
2141+ u8 r1_reserved;
2142+ u8 r2_reserved;
2143+ u8 r3_reserved;
2144+ u8 r2_sx2_reserved;
2145+ u8 r3_sx2_reserved;
2146+ } phase;
2147+};
2148+
2149+struct mt7915_tm_pfmu_tag1 {
2150+ __le32 pfmu_idx:10;
2151+ __le32 ebf:1;
2152+ __le32 data_bw:2;
2153+ __le32 lm:2;
2154+ __le32 is_mu:1;
2155+ __le32 nr:3, nc:3;
2156+ __le32 codebook:2;
2157+ __le32 ngroup:2;
2158+ __le32 _rsv:2;
2159+ __le32 invalid_prof:1;
2160+ __le32 rmsd:3;
2161+
2162+ __le32 col_id1:6, row_id1:10;
2163+ __le32 col_id2:6, row_id2:10;
2164+ __le32 col_id3:6, row_id3:10;
2165+ __le32 col_id4:6, row_id4:10;
2166+
2167+ __le32 ru_start_id:7;
2168+ __le32 _rsv1:1;
2169+ __le32 ru_end_id:7;
2170+ __le32 _rsv2:1;
2171+ __le32 mob_cal_en:1;
2172+ __le32 _rsv3:15;
2173+
2174+ __le32 snr_sts0:8, snr_sts1:8, snr_sts2:8, snr_sts3:8;
2175+ __le32 snr_sts4:8, snr_sts5:8, snr_sts6:8, snr_sts7:8;
2176+
2177+ __le32 _rsv4;
2178+} __packed;
2179+
2180+struct mt7915_tm_pfmu_tag2 {
2181+ __le32 smart_ant:24;
2182+ __le32 se_idx:5;
2183+ __le32 _rsv:3;
2184+
2185+ __le32 _rsv1:8;
2186+ __le32 rmsd_thres:3;
2187+ __le32 _rsv2:5;
2188+ __le32 ibf_timeout:8;
2189+ __le32 _rsv3:8;
2190+
2191+ __le32 _rsv4:16;
2192+ __le32 ibf_data_bw:2;
2193+ __le32 ibf_nc:3;
2194+ __le32 ibf_nr:3;
2195+ __le32 ibf_ru:8;
2196+
2197+ __le32 mob_delta_t:8;
2198+ __le32 mob_lq_result:7;
2199+ __le32 _rsv5:1;
2200+ __le32 _rsv6:16;
2201+
2202+ __le32 _rsv7;
2203+} __packed;
2204+
2205+struct mt7915_tm_pfmu_tag {
2206+ struct mt7915_tm_pfmu_tag1 t1;
2207+ struct mt7915_tm_pfmu_tag2 t2;
2208+};
2209+
2210+struct mt7915_tm_pfmu_data {
2211+ __le16 subc_idx;
2212+ __le16 phi11;
2213+ __le16 phi21;
2214+ __le16 phi31;
2215+};
2216+
2217+struct mt7915_tm_ibf_cal_info {
2218+ u8 format_id;
2219+ u8 group_l_m_n;
2220+ u8 group;
2221+ bool sx2;
2222+ u8 status;
2223+ u8 cal_type;
2224+ u8 _rsv[2];
2225+ u8 buf[1000];
2226+} __packed;
2227+
2228+enum {
2229+ IBF_PHASE_CAL_UNSPEC,
2230+ IBF_PHASE_CAL_NORMAL,
2231+ IBF_PHASE_CAL_VERIFY,
2232+ IBF_PHASE_CAL_NORMAL_INSTRUMENT,
2233+ IBF_PHASE_CAL_VERIFY_INSTRUMENT,
2234+};
2235+
2236 #endif
2237diff --git a/testmode.c b/testmode.c
developer62713c82023-03-20 10:46:08 +08002238index 1d0d5d3..fd3b9b2 100644
developer4c6b6002022-05-30 16:36:44 +08002239--- a/testmode.c
2240+++ b/testmode.c
developere9954402022-07-12 10:15:11 -07002241@@ -27,28 +27,16 @@ const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
developer4c6b6002022-05-30 16:36:44 +08002242 };
2243 EXPORT_SYMBOL_GPL(mt76_tm_policy);
2244
2245-void mt76_testmode_tx_pending(struct mt76_phy *phy)
2246+static void
2247+mt76_testmode_queue_tx(struct mt76_phy *phy, struct mt76_wcid *wcid,
developerf1b69ea2022-07-04 10:54:39 +08002248+ struct sk_buff *skb, struct mt76_queue *q, int qid,
2249+ u16 limit)
developer4c6b6002022-05-30 16:36:44 +08002250 {
2251 struct mt76_testmode_data *td = &phy->test;
2252 struct mt76_dev *dev = phy->dev;
2253- struct mt76_wcid *wcid = &dev->global_wcid;
2254- struct sk_buff *skb = td->tx_skb;
2255- struct mt76_queue *q;
2256- u16 tx_queued_limit;
2257- int qid;
2258-
2259- if (!skb || !td->tx_pending)
2260- return;
2261+ u16 count = limit;
2262
2263- qid = skb_get_queue_mapping(skb);
2264- q = phy->q_tx[qid];
2265-
2266- tx_queued_limit = td->tx_queued_limit ? td->tx_queued_limit : 1000;
2267-
2268- spin_lock_bh(&q->lock);
2269-
2270- while (td->tx_pending > 0 &&
2271- td->tx_queued - td->tx_done < tx_queued_limit &&
2272+ while (td->tx_pending > 0 && count &&
2273 q->queued < q->ndesc / 2) {
2274 int ret;
2275
developer62713c82023-03-20 10:46:08 +08002276@@ -57,13 +45,68 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002277 if (ret < 0)
2278 break;
2279
developer62713c82023-03-20 10:46:08 +08002280- td->tx_pending--;
developer4c6b6002022-05-30 16:36:44 +08002281+ count--;
developer62713c82023-03-20 10:46:08 +08002282+
2283+ /* tx_count == UINT_MAX for continuous tx */
2284+ if (td->tx_count != UINT_MAX)
2285+ td->tx_pending--;
developer4c6b6002022-05-30 16:36:44 +08002286 td->tx_queued++;
2287+
2288+ if (td->tx_rate_mode != MT76_TM_TX_MODE_HE_MU)
2289+ if (td->tx_queued - td->tx_done >= limit)
2290+ break;
2291 }
2292
2293 dev->queue_ops->kick(dev, q);
2294+}
2295+
2296+void mt76_testmode_tx_pending(struct mt76_phy *phy)
2297+{
2298+ struct mt76_testmode_data *td = &phy->test;
2299+ struct mt76_testmode_entry_data *ed;
2300+ struct mt76_queue *q;
2301+ int qid;
2302+ u16 tx_queued_limit;
2303+ u32 remain;
2304+ bool is_mu;
2305+
2306+ if (!td->tx_pending)
2307+ return;
2308+
2309+ /* tx_queued_limit = td->tx_queued_limit ?: 100; */
2310+ tx_queued_limit = 100;
2311+
2312+ if (!td->aid) {
2313+ qid = skb_get_queue_mapping(td->tx_skb);
2314+ q = phy->q_tx[qid];
2315+ spin_lock_bh(&q->lock);
2316+ mt76_testmode_queue_tx(phy, &phy->dev->global_wcid,
developerf1b69ea2022-07-04 10:54:39 +08002317+ td->tx_skb, q, qid, tx_queued_limit);
developer4c6b6002022-05-30 16:36:44 +08002318+ spin_unlock_bh(&q->lock);
2319+
2320+ return;
2321+ }
2322+
2323+ is_mu = td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU;
2324+ ed = mt76_testmode_entry_data(phy, td->cur_entry);
2325+ qid = skb_get_queue_mapping(ed->tx_skb);
2326+ q = phy->q_tx[qid];
2327+
2328+ spin_lock_bh(&q->lock);
2329+
2330+ remain = is_mu ? 1 : (td->tx_pending % td->tx_count) ?: td->tx_count;
2331+ if (remain < tx_queued_limit)
2332+ tx_queued_limit = remain;
2333+
developerf1b69ea2022-07-04 10:54:39 +08002334+ mt76_testmode_queue_tx(phy, td->cur_entry, ed->tx_skb, q, qid, tx_queued_limit);
developer4c6b6002022-05-30 16:36:44 +08002335+
2336+ if (td->tx_pending % td->tx_count == 0 || is_mu)
2337+ td->cur_entry = list_next_entry(td->cur_entry, list);
2338
2339 spin_unlock_bh(&q->lock);
2340+
2341+ if (is_mu && td->tx_pending)
2342+ mt76_worker_schedule(&phy->dev->tx_worker);
2343 }
2344
2345 static u32
developer62713c82023-03-20 10:46:08 +08002346@@ -89,15 +132,31 @@ mt76_testmode_max_mpdu_len(struct mt76_phy *phy, u8 tx_rate_mode)
developer4c6b6002022-05-30 16:36:44 +08002347 }
2348
2349 static void
2350-mt76_testmode_free_skb(struct mt76_phy *phy)
2351+mt76_testmode_free_skb(struct sk_buff **tx_skb)
2352+{
2353+ if (!(*tx_skb))
2354+ return;
2355+
2356+ dev_kfree_skb(*tx_skb);
2357+ *tx_skb = NULL;
2358+}
2359+
2360+static void
2361+mt76_testmode_free_skb_all(struct mt76_phy *phy)
2362 {
2363 struct mt76_testmode_data *td = &phy->test;
2364+ struct mt76_testmode_entry_data *ed = &td->ed;
2365+ struct mt76_wcid *wcid;
2366+
2367+ mt76_testmode_free_skb(&ed->tx_skb);
2368
2369- dev_kfree_skb(td->tx_skb);
2370- td->tx_skb = NULL;
2371+ mt76_tm_for_each_entry(phy, wcid, ed)
2372+ mt76_testmode_free_skb(&ed->tx_skb);
2373 }
2374
2375-int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
2376+static int
2377+mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len,
2378+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN])
2379 {
2380 #define MT_TXP_MAX_LEN 4095
2381 u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
developer62713c82023-03-20 10:46:08 +08002382@@ -118,7 +177,8 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer4c6b6002022-05-30 16:36:44 +08002383 nfrags = len / MT_TXP_MAX_LEN;
2384 head_len = nfrags ? MT_TXP_MAX_LEN : len;
2385
2386- if (len > IEEE80211_MAX_FRAME_LEN)
2387+ if (len > IEEE80211_MAX_FRAME_LEN ||
2388+ td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
2389 fc |= IEEE80211_STYPE_QOS_DATA;
2390
2391 head = alloc_skb(head_len, GFP_KERNEL);
developer62713c82023-03-20 10:46:08 +08002392@@ -127,9 +187,9 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer4c6b6002022-05-30 16:36:44 +08002393
developere9954402022-07-12 10:15:11 -07002394 hdr = __skb_put_zero(head, sizeof(*hdr));
developer4c6b6002022-05-30 16:36:44 +08002395 hdr->frame_control = cpu_to_le16(fc);
2396- memcpy(hdr->addr1, td->addr[0], ETH_ALEN);
2397- memcpy(hdr->addr2, td->addr[1], ETH_ALEN);
2398- memcpy(hdr->addr3, td->addr[2], ETH_ALEN);
2399+ memcpy(hdr->addr1, addr[0], ETH_ALEN);
2400+ memcpy(hdr->addr2, addr[1], ETH_ALEN);
2401+ memcpy(hdr->addr3, addr[2], ETH_ALEN);
2402 skb_set_queue_mapping(head, IEEE80211_AC_BE);
developere9954402022-07-12 10:15:11 -07002403 get_random_bytes(__skb_put(head, head_len - sizeof(*hdr)),
2404 head_len - sizeof(*hdr));
developer62713c82023-03-20 10:46:08 +08002405@@ -153,7 +213,7 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer4c6b6002022-05-30 16:36:44 +08002406
2407 frag = alloc_skb(frag_len, GFP_KERNEL);
2408 if (!frag) {
2409- mt76_testmode_free_skb(phy);
2410+ mt76_testmode_free_skb(tx_skb);
2411 dev_kfree_skb(head);
2412 return -ENOMEM;
2413 }
developer62713c82023-03-20 10:46:08 +08002414@@ -166,15 +226,14 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer4c6b6002022-05-30 16:36:44 +08002415 frag_tail = &(*frag_tail)->next;
2416 }
2417
2418- mt76_testmode_free_skb(phy);
2419- td->tx_skb = head;
2420+ mt76_testmode_free_skb(tx_skb);
2421+ *tx_skb = head;
2422
2423 return 0;
2424 }
2425-EXPORT_SYMBOL(mt76_testmode_alloc_skb);
2426
2427-static int
2428-mt76_testmode_tx_init(struct mt76_phy *phy)
2429+int mt76_testmode_init_skb(struct mt76_phy *phy, u32 len,
2430+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN])
2431 {
2432 struct mt76_testmode_data *td = &phy->test;
2433 struct ieee80211_tx_info *info;
developer62713c82023-03-20 10:46:08 +08002434@@ -182,7 +241,7 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002435 u8 max_nss = hweight8(phy->antenna_mask);
2436 int ret;
2437
2438- ret = mt76_testmode_alloc_skb(phy, td->tx_mpdu_len);
2439+ ret = mt76_testmode_alloc_skb(phy, len, tx_skb, addr);
2440 if (ret)
2441 return ret;
2442
developer62713c82023-03-20 10:46:08 +08002443@@ -192,7 +251,7 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002444 if (td->tx_antenna_mask)
2445 max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask));
2446
2447- info = IEEE80211_SKB_CB(td->tx_skb);
2448+ info = IEEE80211_SKB_CB(*tx_skb);
2449 rate = &info->control.rates[0];
2450 rate->count = 1;
2451 rate->idx = td->tx_rate_idx;
developer62713c82023-03-20 10:46:08 +08002452@@ -264,6 +323,25 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002453 out:
2454 return 0;
2455 }
2456+EXPORT_SYMBOL(mt76_testmode_init_skb);
2457+
2458+static int
2459+mt76_testmode_tx_init(struct mt76_phy *phy)
2460+{
2461+ struct mt76_testmode_entry_data *ed;
2462+ struct mt76_wcid *wcid;
2463+
2464+ mt76_tm_for_each_entry(phy, wcid, ed) {
2465+ int ret;
2466+
2467+ ret = mt76_testmode_init_skb(phy, ed->tx_mpdu_len,
2468+ &ed->tx_skb, ed->addr);
2469+ if (ret)
2470+ return ret;
2471+ }
2472+
2473+ return 0;
2474+}
2475
2476 static void
2477 mt76_testmode_tx_start(struct mt76_phy *phy)
developer62713c82023-03-20 10:46:08 +08002478@@ -274,6 +352,14 @@ mt76_testmode_tx_start(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002479 td->tx_queued = 0;
2480 td->tx_done = 0;
2481 td->tx_pending = td->tx_count;
2482+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
2483+ td->tx_pending = 1;
2484+ if (td->entry_num) {
2485+ td->tx_pending *= td->entry_num;
2486+ td->cur_entry = list_first_entry(&td->tm_entry_list,
2487+ struct mt76_wcid, list);
2488+ }
2489+
2490 mt76_worker_schedule(&dev->tx_worker);
2491 }
2492
developer62713c82023-03-20 10:46:08 +08002493@@ -292,7 +378,7 @@ mt76_testmode_tx_stop(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002494 wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued,
2495 MT76_TM_TIMEOUT * HZ);
2496
2497- mt76_testmode_free_skb(phy);
2498+ mt76_testmode_free_skb_all(phy);
2499 }
2500
2501 static inline void
developer62713c82023-03-20 10:46:08 +08002502@@ -323,6 +409,8 @@ mt76_testmode_init_defaults(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002503 memcpy(td->addr[0], phy->macaddr, ETH_ALEN);
2504 memcpy(td->addr[1], phy->macaddr, ETH_ALEN);
2505 memcpy(td->addr[2], phy->macaddr, ETH_ALEN);
2506+
2507+ INIT_LIST_HEAD(&phy->test.tm_entry_list);
2508 }
2509
2510 static int
developer62713c82023-03-20 10:46:08 +08002511@@ -332,8 +420,12 @@ __mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state)
developer4c6b6002022-05-30 16:36:44 +08002512 struct mt76_dev *dev = phy->dev;
2513 int err;
2514
2515- if (prev_state == MT76_TM_STATE_TX_FRAMES)
2516+ if (prev_state == MT76_TM_STATE_TX_FRAMES) {
2517+ /* MU needs to clean hwq for free done event */
2518+ if (phy->test.tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
2519+ dev->test_ops->set_state(phy, MT76_TM_STATE_IDLE);
2520 mt76_testmode_tx_stop(phy);
2521+ }
2522
2523 if (state == MT76_TM_STATE_TX_FRAMES) {
2524 err = mt76_testmode_tx_init(phy);
developer62713c82023-03-20 10:46:08 +08002525@@ -403,6 +495,44 @@ mt76_tm_get_u8(struct nlattr *attr, u8 *dest, u8 min, u8 max)
developer4c6b6002022-05-30 16:36:44 +08002526 return 0;
2527 }
2528
2529+static int
2530+mt76_testmode_set_eeprom(struct mt76_phy *phy, struct nlattr **tb)
2531+{
2532+ struct mt76_dev *dev = phy->dev;
2533+ u8 action, val[MT76_TM_EEPROM_BLOCK_SIZE];
2534+ u32 offset = 0;
2535+ int err = -EINVAL;
2536+
2537+ if (!dev->test_ops->set_eeprom)
2538+ return -EOPNOTSUPP;
2539+
2540+ if (mt76_tm_get_u8(tb[MT76_TM_ATTR_EEPROM_ACTION], &action,
2541+ 0, MT76_TM_EEPROM_ACTION_MAX))
2542+ goto out;
2543+
2544+ if (tb[MT76_TM_ATTR_EEPROM_OFFSET]) {
2545+ struct nlattr *cur;
2546+ int rem, idx = 0;
2547+
2548+ offset = nla_get_u32(tb[MT76_TM_ATTR_EEPROM_OFFSET]);
2549+ if (!!(offset % MT76_TM_EEPROM_BLOCK_SIZE) ||
2550+ !tb[MT76_TM_ATTR_EEPROM_VAL])
2551+ goto out;
2552+
2553+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_EEPROM_VAL], rem) {
2554+ if (nla_len(cur) != 1 || idx >= ARRAY_SIZE(val))
2555+ goto out;
2556+
2557+ val[idx++] = nla_get_u8(cur);
2558+ }
2559+ }
2560+
2561+ err = dev->test_ops->set_eeprom(phy, offset, val, action);
2562+
2563+out:
2564+ return err;
2565+}
2566+
2567 int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2568 void *data, int len)
2569 {
developer62713c82023-03-20 10:46:08 +08002570@@ -426,6 +556,11 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer4c6b6002022-05-30 16:36:44 +08002571
2572 mutex_lock(&dev->mutex);
2573
2574+ if (tb[MT76_TM_ATTR_EEPROM_ACTION]) {
2575+ err = mt76_testmode_set_eeprom(phy, tb);
2576+ goto out;
2577+ }
2578+
2579 if (tb[MT76_TM_ATTR_RESET]) {
2580 mt76_testmode_set_state(phy, MT76_TM_STATE_OFF);
2581 memset(td, 0, sizeof(*td));
developer62713c82023-03-20 10:46:08 +08002582@@ -452,7 +587,10 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer4c6b6002022-05-30 16:36:44 +08002583 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_DUTY_CYCLE],
2584 &td->tx_duty_cycle, 0, 99) ||
2585 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_POWER_CONTROL],
2586- &td->tx_power_control, 0, 1))
2587+ &td->tx_power_control, 0, 1) ||
2588+ mt76_tm_get_u8(tb[MT76_TM_ATTR_AID], &td->aid, 0, 16) ||
2589+ mt76_tm_get_u8(tb[MT76_TM_ATTR_RU_ALLOC], &td->ru_alloc, 0, 0xff) ||
2590+ mt76_tm_get_u8(tb[MT76_TM_ATTR_RU_IDX], &td->ru_idx, 0, 68))
2591 goto out;
2592
2593 if (tb[MT76_TM_ATTR_TX_LENGTH]) {
developer62713c82023-03-20 10:46:08 +08002594@@ -484,8 +622,7 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer4c6b6002022-05-30 16:36:44 +08002595
2596 if (tb[MT76_TM_ATTR_TX_POWER]) {
2597 struct nlattr *cur;
2598- int idx = 0;
2599- int rem;
2600+ int rem, idx = 0;
2601
2602 nla_for_each_nested(cur, tb[MT76_TM_ATTR_TX_POWER], rem) {
2603 if (nla_len(cur) != 1 ||
developer62713c82023-03-20 10:46:08 +08002604@@ -505,11 +642,45 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer4c6b6002022-05-30 16:36:44 +08002605 if (nla_len(cur) != ETH_ALEN || idx >= 3)
2606 goto out;
2607
2608- memcpy(td->addr[idx], nla_data(cur), ETH_ALEN);
2609+ memcpy(td->addr[idx++], nla_data(cur), ETH_ALEN);
2610+ }
2611+ }
2612+
2613+ if (tb[MT76_TM_ATTR_CFG]) {
2614+ struct nlattr *cur;
2615+ int rem, idx = 0;
2616+
2617+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_CFG], rem) {
2618+ if (nla_len(cur) != 1 || idx >= 2)
2619+ goto out;
2620+
2621+ if (idx == 0)
2622+ td->cfg.type = nla_get_u8(cur);
2623+ else
2624+ td->cfg.enable = nla_get_u8(cur);
2625 idx++;
2626 }
2627 }
2628
2629+ if (tb[MT76_TM_ATTR_TXBF_ACT]) {
2630+ struct nlattr *cur;
2631+ int rem, idx = 0;
2632+
2633+ if (!tb[MT76_TM_ATTR_TXBF_PARAM] ||
2634+ mt76_tm_get_u8(tb[MT76_TM_ATTR_TXBF_ACT], &td->txbf_act,
2635+ 0, MT76_TM_TXBF_ACT_MAX))
2636+ goto out;
2637+
2638+ memset(td->txbf_param, 0, sizeof(td->txbf_param));
2639+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_TXBF_PARAM], rem) {
2640+ if (nla_len(cur) != 2 ||
2641+ idx >= ARRAY_SIZE(td->txbf_param))
2642+ goto out;
2643+
2644+ td->txbf_param[idx++] = nla_get_u16(cur);
2645+ }
2646+ }
2647+
2648 if (dev->test_ops->set_params) {
2649 err = dev->test_ops->set_params(phy, tb, state);
2650 if (err)
developer62713c82023-03-20 10:46:08 +08002651@@ -574,6 +745,7 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer4c6b6002022-05-30 16:36:44 +08002652 struct mt76_phy *phy = hw->priv;
2653 struct mt76_dev *dev = phy->dev;
2654 struct mt76_testmode_data *td = &phy->test;
2655+ struct mt76_testmode_entry_data *ed = &td->ed;
2656 struct nlattr *tb[NUM_MT76_TM_ATTRS] = {};
2657 int err = 0;
2658 void *a;
developer62713c82023-03-20 10:46:08 +08002659@@ -606,6 +778,19 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer4c6b6002022-05-30 16:36:44 +08002660 goto out;
2661 }
2662
2663+ if (tb[MT76_TM_ATTR_AID]) {
2664+ struct mt76_wcid *wcid;
2665+ u8 aid;
2666+
2667+ err = mt76_tm_get_u8(tb[MT76_TM_ATTR_AID], &aid, 1, 16);
2668+ if (err)
2669+ goto out;
2670+
2671+ mt76_tm_for_each_entry(phy, wcid, ed)
2672+ if (ed->aid == aid)
2673+ ed = mt76_testmode_entry_data(phy, wcid);
2674+ }
2675+
2676 mt76_testmode_init_defaults(phy);
2677
2678 err = -EMSGSIZE;
developer62713c82023-03-20 10:46:08 +08002679@@ -618,12 +803,8 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer4c6b6002022-05-30 16:36:44 +08002680 goto out;
2681
2682 if (nla_put_u32(msg, MT76_TM_ATTR_TX_COUNT, td->tx_count) ||
2683- nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_mpdu_len) ||
2684 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_MODE, td->tx_rate_mode) ||
2685- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, td->tx_rate_nss) ||
2686- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, td->tx_rate_idx) ||
2687 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_SGI, td->tx_rate_sgi) ||
2688- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, td->tx_rate_ldpc) ||
2689 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_STBC, td->tx_rate_stbc) ||
2690 (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_LTF) &&
2691 nla_put_u8(msg, MT76_TM_ATTR_TX_LTF, td->tx_ltf)) ||
developer62713c82023-03-20 10:46:08 +08002692@@ -643,6 +824,15 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer4c6b6002022-05-30 16:36:44 +08002693 nla_put_u8(msg, MT76_TM_ATTR_FREQ_OFFSET, td->freq_offset)))
2694 goto out;
2695
2696+ if (nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, ed->tx_mpdu_len) ||
2697+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, ed->tx_rate_nss) ||
2698+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, ed->tx_rate_idx) ||
2699+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, ed->tx_rate_ldpc) ||
2700+ nla_put_u8(msg, MT76_TM_ATTR_AID, ed->aid) ||
2701+ nla_put_u8(msg, MT76_TM_ATTR_RU_ALLOC, ed->ru_alloc) ||
2702+ nla_put_u8(msg, MT76_TM_ATTR_RU_IDX, ed->ru_idx))
2703+ goto out;
2704+
2705 if (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER)) {
2706 a = nla_nest_start(msg, MT76_TM_ATTR_TX_POWER);
2707 if (!a)
2708diff --git a/testmode.h b/testmode.h
developer287ee9f2023-03-02 20:13:34 +08002709index 8961326..57949f2 100644
developer4c6b6002022-05-30 16:36:44 +08002710--- a/testmode.h
2711+++ b/testmode.h
2712@@ -6,6 +6,8 @@
2713 #define __MT76_TESTMODE_H
2714
2715 #define MT76_TM_TIMEOUT 10
2716+#define MT76_TM_MAX_ENTRY_NUM 16
2717+#define MT76_TM_EEPROM_BLOCK_SIZE 16
2718
2719 /**
2720 * enum mt76_testmode_attr - testmode attributes inside NL80211_ATTR_TESTDATA
2721@@ -47,6 +49,15 @@
2722 * @MT76_TM_ATTR_DRV_DATA: driver specific netlink attrs (nested)
2723 *
2724 * @MT76_TM_ATTR_MAC_ADDRS: array of nested MAC addresses (nested)
2725+ *
2726+ * @MT76_TM_ATTR_EEPROM_ACTION: eeprom setting actions
2727+ * (u8, see &enum mt76_testmode_eeprom_action)
2728+ * @MT76_TM_ATTR_EEPROM_OFFSET: offset of eeprom data block for writing (u32)
2729+ * @MT76_TM_ATTR_EEPROM_VAL: values for writing into a 16-byte data block
2730+ * (nested, u8 attrs)
2731+ *
2732+ * @MT76_TM_ATTR_CFG: config testmode rf feature (nested, see &mt76_testmode_cfg)
2733+ *
2734 */
2735 enum mt76_testmode_attr {
2736 MT76_TM_ATTR_UNSPEC,
2737@@ -84,6 +95,17 @@ enum mt76_testmode_attr {
2738 MT76_TM_ATTR_DRV_DATA,
2739
2740 MT76_TM_ATTR_MAC_ADDRS,
2741+ MT76_TM_ATTR_AID,
2742+ MT76_TM_ATTR_RU_ALLOC,
2743+ MT76_TM_ATTR_RU_IDX,
2744+
2745+ MT76_TM_ATTR_EEPROM_ACTION,
2746+ MT76_TM_ATTR_EEPROM_OFFSET,
2747+ MT76_TM_ATTR_EEPROM_VAL,
2748+
2749+ MT76_TM_ATTR_CFG,
2750+ MT76_TM_ATTR_TXBF_ACT,
2751+ MT76_TM_ATTR_TXBF_PARAM,
2752
2753 /* keep last */
2754 NUM_MT76_TM_ATTRS,
2755@@ -198,4 +220,57 @@ enum mt76_testmode_tx_mode {
2756
2757 extern const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS];
2758
2759+/**
2760+ * enum mt76_testmode_eeprom_action - eeprom setting actions
2761+ *
2762+ * @MT76_TM_EEPROM_ACTION_UPDATE_DATA: update rf values to specific
2763+ * eeprom data block
2764+ * @MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE: send updated eeprom data to fw
2765+ * @MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE: write eeprom data back to efuse
2766+ */
2767+enum mt76_testmode_eeprom_action {
2768+ MT76_TM_EEPROM_ACTION_UPDATE_DATA,
2769+ MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE,
2770+ MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE,
2771+
2772+ /* keep last */
2773+ NUM_MT76_TM_EEPROM_ACTION,
2774+ MT76_TM_EEPROM_ACTION_MAX = NUM_MT76_TM_EEPROM_ACTION - 1,
2775+};
2776+
2777+/**
2778+ * enum mt76_testmode_cfg - packet tx phy mode
2779+ *
2780+ * @MT76_TM_EEPROM_ACTION_UPDATE_DATA: update rf values to specific
2781+ * eeprom data block
2782+ * @MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE: send updated eeprom data to fw
2783+ * @MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE: write eeprom data back to efuse
2784+ */
2785+enum mt76_testmode_cfg {
2786+ MT76_TM_CFG_TSSI,
2787+ MT76_TM_CFG_DPD,
2788+ MT76_TM_CFG_RATE_POWER_OFFSET,
2789+ MT76_TM_CFG_THERMAL_COMP,
2790+
2791+ /* keep last */
2792+ NUM_MT76_TM_CFG,
2793+ MT76_TM_CFG_MAX = NUM_MT76_TM_CFG - 1,
2794+};
2795+
2796+enum mt76_testmode_txbf_act {
2797+ MT76_TM_TXBF_ACT_INIT,
2798+ MT76_TM_TXBF_ACT_UPDATE_CH,
2799+ MT76_TM_TXBF_ACT_PHASE_COMP,
2800+ MT76_TM_TXBF_ACT_TX_PREP,
2801+ MT76_TM_TXBF_ACT_IBF_PROF_UPDATE,
2802+ MT76_TM_TXBF_ACT_EBF_PROF_UPDATE,
2803+ MT76_TM_TXBF_ACT_PHASE_CAL,
2804+ MT76_TM_TXBF_ACT_PROF_UPDATE_ALL,
2805+ MT76_TM_TXBF_ACT_E2P_UPDATE,
2806+
2807+ /* keep last */
2808+ NUM_MT76_TM_TXBF_ACT,
2809+ MT76_TM_TXBF_ACT_MAX = NUM_MT76_TM_TXBF_ACT - 1,
2810+};
2811+
2812 #endif
2813diff --git a/tools/fields.c b/tools/fields.c
developer62713c82023-03-20 10:46:08 +08002814index e3f6908..7e564a3 100644
developer4c6b6002022-05-30 16:36:44 +08002815--- a/tools/fields.c
2816+++ b/tools/fields.c
2817@@ -10,6 +10,7 @@ static const char * const testmode_state[] = {
2818 [MT76_TM_STATE_IDLE] = "idle",
2819 [MT76_TM_STATE_TX_FRAMES] = "tx_frames",
2820 [MT76_TM_STATE_RX_FRAMES] = "rx_frames",
2821+ [MT76_TM_STATE_TX_CONT] = "tx_cont",
2822 };
2823
2824 static const char * const testmode_tx_mode[] = {
developer62713c82023-03-20 10:46:08 +08002825@@ -86,12 +87,12 @@ static void print_s32(const struct tm_field *field, struct nlattr *attr)
2826
2827 static void print_u32(const struct tm_field *field, struct nlattr *attr)
2828 {
2829- printf("%d", nla_get_u32(attr));
2830+ printf("%u", nla_get_u32(attr));
2831 }
2832
2833 static void print_u64(const struct tm_field *field, struct nlattr *attr)
2834 {
2835- printf("%lld", (unsigned long long)nla_get_u64(attr));
2836+ printf("%llu", (unsigned long long)nla_get_u64(attr));
2837 }
2838
2839 static bool parse_flag(const struct tm_field *field, int idx,
developer4c6b6002022-05-30 16:36:44 +08002840@@ -201,6 +202,63 @@ static void print_extra_stats(const struct tm_field *field, struct nlattr **tb)
2841 printf("%srx_per=%.02f%%\n", prefix, 100 * failed / total);
2842 }
2843
2844+static bool parse_mac(const struct tm_field *field, int idx,
2845+ struct nl_msg *msg, const char *val)
2846+{
2847+#define ETH_ALEN 6
2848+ bool ret = true;
2849+ char *str, *cur, *ap;
2850+ void *a;
2851+
2852+ ap = str = strdup(val);
2853+
2854+ a = nla_nest_start(msg, idx);
2855+
2856+ idx = 0;
2857+ while ((cur = strsep(&ap, ",")) != NULL) {
2858+ unsigned char addr[ETH_ALEN];
2859+ char *val, *tmp = cur;
2860+ int i = 0;
2861+
2862+ while ((val = strsep(&tmp, ":")) != NULL) {
2863+ if (i >= ETH_ALEN)
2864+ break;
2865+
2866+ addr[i++] = strtoul(val, NULL, 16);
2867+ }
2868+
2869+ nla_put(msg, idx, ETH_ALEN, addr);
2870+
2871+ idx++;
2872+ }
2873+
2874+ nla_nest_end(msg, a);
2875+
2876+ free(str);
2877+
2878+ return ret;
2879+}
2880+
2881+static void print_mac(const struct tm_field *field, struct nlattr *attr)
2882+{
2883+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
2884+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
2885+ unsigned char addr[3][6];
2886+ struct nlattr *cur;
2887+ int idx = 0;
2888+ int rem;
2889+
2890+ nla_for_each_nested(cur, attr, rem) {
2891+ if (nla_len(cur) != 6)
2892+ continue;
2893+ memcpy(addr[idx++], nla_data(cur), 6);
2894+ }
2895+
2896+ printf("" MACSTR "," MACSTR "," MACSTR "",
2897+ MAC2STR(addr[0]), MAC2STR(addr[1]), MAC2STR(addr[2]));
2898+
2899+ return;
2900+}
2901
2902 #define FIELD_GENERIC(_field, _name, ...) \
2903 [FIELD_NAME(_field)] = { \
2904@@ -250,6 +308,13 @@ static void print_extra_stats(const struct tm_field *field, struct nlattr **tb)
2905 ##__VA_ARGS__ \
2906 )
2907
2908+#define FIELD_MAC(_field, _name) \
2909+ [FIELD_NAME(_field)] = { \
2910+ .name = _name, \
2911+ .parse = parse_mac, \
2912+ .print = print_mac \
2913+ }
2914+
2915 #define FIELD_NAME(_field) MT76_TM_RX_ATTR_##_field
2916 static const struct tm_field rx_fields[NUM_MT76_TM_RX_ATTRS] = {
2917 FIELD_RO(s32, FREQ_OFFSET, "freq_offset"),
2918@@ -300,10 +365,18 @@ static const struct tm_field testdata_fields[NUM_MT76_TM_ATTRS] = {
2919 FIELD(u8, TX_RATE_LDPC, "tx_rate_ldpc"),
2920 FIELD(u8, TX_RATE_STBC, "tx_rate_stbc"),
2921 FIELD(u8, TX_LTF, "tx_ltf"),
2922+ FIELD(u8, TX_DUTY_CYCLE, "tx_duty_cycle"),
2923+ FIELD(u32, TX_IPG, "tx_ipg"),
2924+ FIELD(u32, TX_TIME, "tx_time"),
2925 FIELD(u8, TX_POWER_CONTROL, "tx_power_control"),
2926 FIELD_ARRAY(u8, TX_POWER, "tx_power"),
2927 FIELD(u8, TX_ANTENNA, "tx_antenna"),
2928+ FIELD(u8, TX_SPE_IDX, "tx_spe_idx"),
2929 FIELD(u32, FREQ_OFFSET, "freq_offset"),
2930+ FIELD(u8, AID, "aid"),
2931+ FIELD(u8, RU_ALLOC, "ru_alloc"),
2932+ FIELD(u8, RU_IDX, "ru_idx"),
2933+ FIELD_MAC(MAC_ADDRS, "mac_addrs"),
2934 FIELD_NESTED_RO(STATS, stats, "",
2935 .print_extra = print_extra_stats),
2936 };
2937@@ -322,9 +395,16 @@ static struct nla_policy testdata_policy[NUM_MT76_TM_ATTRS] = {
2938 [MT76_TM_ATTR_TX_RATE_LDPC] = { .type = NLA_U8 },
2939 [MT76_TM_ATTR_TX_RATE_STBC] = { .type = NLA_U8 },
2940 [MT76_TM_ATTR_TX_LTF] = { .type = NLA_U8 },
2941+ [MT76_TM_ATTR_TX_DUTY_CYCLE] = { .type = NLA_U8 },
2942+ [MT76_TM_ATTR_TX_IPG] = { .type = NLA_U32 },
2943+ [MT76_TM_ATTR_TX_TIME] = { .type = NLA_U32 },
2944 [MT76_TM_ATTR_TX_POWER_CONTROL] = { .type = NLA_U8 },
2945 [MT76_TM_ATTR_TX_ANTENNA] = { .type = NLA_U8 },
2946+ [MT76_TM_ATTR_TX_SPE_IDX] = { .type = NLA_U8 },
2947 [MT76_TM_ATTR_FREQ_OFFSET] = { .type = NLA_U32 },
2948+ [MT76_TM_ATTR_AID] = { .type = NLA_U8 },
2949+ [MT76_TM_ATTR_RU_ALLOC] = { .type = NLA_U8 },
2950+ [MT76_TM_ATTR_RU_IDX] = { .type = NLA_U8 },
2951 [MT76_TM_ATTR_STATS] = { .type = NLA_NESTED },
2952 };
2953
2954diff --git a/tx.c b/tx.c
developer287ee9f2023-03-02 20:13:34 +08002955index 1f309d0..6d55566 100644
developer4c6b6002022-05-30 16:36:44 +08002956--- a/tx.c
2957+++ b/tx.c
developereb6a0182022-12-12 18:53:32 +08002958@@ -250,8 +250,7 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *
developer4c6b6002022-05-30 16:36:44 +08002959 if (mt76_is_testmode_skb(dev, skb, &hw)) {
2960 struct mt76_phy *phy = hw->priv;
2961
2962- if (skb == phy->test.tx_skb)
2963- phy->test.tx_done++;
2964+ phy->test.tx_done++;
2965 if (phy->test.tx_queued == phy->test.tx_done)
2966 wake_up(&dev->tx_wait);
2967
2968--
developer4feb1012023-01-30 17:29:07 +080029692.18.0
developer4c6b6002022-05-30 16:36:44 +08002970