blob: 41339c084425532fd58d48499702326145982831 [file] [log] [blame]
developer4c6b6002022-05-30 16:36:44 +08001From 90683f1b8f7961808da41b8782c98721265a2127 Mon Sep 17 00:00:00 2001
2From: Shayne Chen <shayne.chen@mediatek.com>
3Date: Thu, 21 Apr 2022 15:43:19 +0800
4Subject: [PATCH 1/5] mt76: testmode: additional supports
5
6Signed-off-by: Shayne Chen <shayne.chen@mediatek.com>
7---
8 drivers/net/wireless/mediatek/mt76/dma.c | 3 +-
9 drivers/net/wireless/mediatek/mt76/mac80211.c | 12 +
10 drivers/net/wireless/mediatek/mt76/mt76.h | 111 +-
11 .../wireless/mediatek/mt76/mt76_connac_mcu.c | 4 +
12 .../wireless/mediatek/mt76/mt76_connac_mcu.h | 2 +
13 .../net/wireless/mediatek/mt76/mt7915/init.c | 2 +-
14 .../net/wireless/mediatek/mt76/mt7915/mac.c | 37 +-
15 .../net/wireless/mediatek/mt76/mt7915/main.c | 2 +-
16 .../net/wireless/mediatek/mt76/mt7915/mcu.c | 11 +-
17 .../net/wireless/mediatek/mt76/mt7915/mcu.h | 31 +-
18 .../net/wireless/mediatek/mt76/mt7915/mmio.c | 2 +
19 .../wireless/mediatek/mt76/mt7915/mt7915.h | 14 +-
20 .../net/wireless/mediatek/mt76/mt7915/regs.h | 3 +
21 .../wireless/mediatek/mt76/mt7915/testmode.c | 1136 +++++++++++++++--
22 .../wireless/mediatek/mt76/mt7915/testmode.h | 278 ++++
23 drivers/net/wireless/mediatek/mt76/testmode.c | 274 +++-
24 drivers/net/wireless/mediatek/mt76/testmode.h | 75 ++
25 .../net/wireless/mediatek/mt76/tools/fields.c | 80 ++
26 drivers/net/wireless/mediatek/mt76/tx.c | 3 +-
27 19 files changed, 1928 insertions(+), 152 deletions(-)
28
29diff --git a/dma.c b/dma.c
30index 30de8be4..f6f5f129 100644
31--- a/dma.c
32+++ b/dma.c
33@@ -426,8 +426,7 @@ free:
34 if (mt76_is_testmode_skb(dev, skb, &hw)) {
35 struct mt76_phy *phy = hw->priv;
36
37- if (tx_info.skb == phy->test.tx_skb)
38- phy->test.tx_done--;
39+ phy->test.tx_done--;
40 }
41 #endif
42
43diff --git a/mac80211.c b/mac80211.c
44index 31602d7f..49b99f36 100644
45--- a/mac80211.c
46+++ b/mac80211.c
47@@ -55,6 +55,13 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 CHAN5G(60, 5300),
49 CHAN5G(64, 5320),
50
51+ CHAN5G(68, 5340),
52+ CHAN5G(80, 5400),
53+ CHAN5G(84, 5420),
54+ CHAN5G(88, 5440),
55+ CHAN5G(92, 5460),
56+ CHAN5G(96, 5480),
57+
58 CHAN5G(100, 5500),
59 CHAN5G(104, 5520),
60 CHAN5G(108, 5540),
61@@ -75,6 +82,11 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = {
62 CHAN5G(165, 5825),
63 CHAN5G(169, 5845),
64 CHAN5G(173, 5865),
65+
66+ CHAN5G(184, 4920),
67+ CHAN5G(188, 4940),
68+ CHAN5G(192, 4960),
69+ CHAN5G(196, 4980),
70 };
71
72 static const struct ieee80211_channel mt76_channels_6ghz[] = {
73diff --git a/mt76.h b/mt76.h
74index 8f6279c5..3d1e893d 100644
75--- a/mt76.h
76+++ b/mt76.h
77@@ -602,6 +602,21 @@ struct mt76_testmode_ops {
78 int (*set_params)(struct mt76_phy *phy, struct nlattr **tb,
79 enum mt76_testmode_state new_state);
80 int (*dump_stats)(struct mt76_phy *phy, struct sk_buff *msg);
81+ int (*set_eeprom)(struct mt76_phy *phy, u32 offset, u8 *val, u8 action);
82+};
83+
84+struct mt76_testmode_entry_data {
85+ struct sk_buff *tx_skb;
86+
87+ u16 tx_mpdu_len;
88+ u8 tx_rate_idx;
89+ u8 tx_rate_nss;
90+ u8 tx_rate_ldpc;
91+
92+ u8 addr[3][ETH_ALEN];
93+ u8 aid;
94+ u8 ru_alloc;
95+ u8 ru_idx;
96 };
97
98 #define MT_TM_FW_RX_COUNT BIT(0)
99@@ -610,16 +625,11 @@ struct mt76_testmode_data {
100 enum mt76_testmode_state state;
101
102 u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)];
103- struct sk_buff *tx_skb;
104
105 u32 tx_count;
106- u16 tx_mpdu_len;
107
108 u8 tx_rate_mode;
109- u8 tx_rate_idx;
110- u8 tx_rate_nss;
111 u8 tx_rate_sgi;
112- u8 tx_rate_ldpc;
113 u8 tx_rate_stbc;
114 u8 tx_ltf;
115
116@@ -635,10 +645,37 @@ struct mt76_testmode_data {
117 u8 tx_power[4];
118 u8 tx_power_control;
119
120- u8 addr[3][ETH_ALEN];
121+ struct list_head tm_entry_list;
122+ struct mt76_wcid *cur_entry;
123+ u8 entry_num;
124+ union {
125+ struct mt76_testmode_entry_data ed;
126+ struct {
127+ /* must be the same as mt76_testmode_entry_data */
128+ struct sk_buff *tx_skb;
129+
130+ u16 tx_mpdu_len;
131+ u8 tx_rate_idx;
132+ u8 tx_rate_nss;
133+ u8 tx_rate_ldpc;
134+
135+ u8 addr[3][ETH_ALEN];
136+ u8 aid;
137+ u8 ru_alloc;
138+ u8 ru_idx;
139+ };
140+ };
141
142 u8 flag;
143
144+ struct {
145+ u8 type;
146+ u8 enable;
147+ } cfg;
148+
149+ u8 txbf_act;
150+ u16 txbf_param[8];
151+
152 u32 tx_pending;
153 u32 tx_queued;
154 u16 tx_queued_limit;
155@@ -1120,14 +1157,69 @@ static inline bool mt76_testmode_enabled(struct mt76_phy *phy)
156 #endif
157 }
158
159+#ifdef CONFIG_NL80211_TESTMODE
160+static inline struct mt76_wcid *
161+mt76_testmode_first_entry(struct mt76_phy *phy)
162+{
163+ if (list_empty(&phy->test.tm_entry_list) && !phy->test.aid)
164+ return &phy->dev->global_wcid;
165+
166+ return list_first_entry(&phy->test.tm_entry_list,
167+ typeof(struct mt76_wcid),
168+ list);
169+}
170+
171+static inline struct mt76_testmode_entry_data *
172+mt76_testmode_entry_data(struct mt76_phy *phy, struct mt76_wcid *wcid)
173+{
174+ if (!wcid)
175+ return NULL;
176+ if (wcid == &phy->dev->global_wcid)
177+ return &phy->test.ed;
178+
179+ return (struct mt76_testmode_entry_data *)((u8 *)wcid +
180+ phy->hw->sta_data_size);
181+}
182+
183+#define mt76_tm_for_each_entry(phy, wcid, ed) \
184+ for (wcid = mt76_testmode_first_entry(phy), \
185+ ed = mt76_testmode_entry_data(phy, wcid); \
186+ ((phy->test.aid && \
187+ !list_entry_is_head(wcid, &phy->test.tm_entry_list, list)) || \
188+ (!phy->test.aid && wcid == &phy->dev->global_wcid)) && ed; \
189+ wcid = list_next_entry(wcid, list), \
190+ ed = mt76_testmode_entry_data(phy, wcid))
191+#endif
192+
193+static inline bool __mt76_is_testmode_skb(struct mt76_phy *phy,
194+ struct sk_buff *skb)
195+{
196+#ifdef CONFIG_NL80211_TESTMODE
197+ struct mt76_testmode_entry_data *ed = &phy->test.ed;
198+ struct mt76_wcid *wcid;
199+
200+ if (skb == ed->tx_skb)
201+ return true;
202+
203+ mt76_tm_for_each_entry(phy, wcid, ed)
204+ if (skb == ed->tx_skb)
205+ return true;
206+ return false;
207+#else
208+ return false;
209+#endif
210+}
211+
212 static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
213 struct sk_buff *skb,
214 struct ieee80211_hw **hw)
215 {
216 #ifdef CONFIG_NL80211_TESTMODE
217- if (skb == dev->phy.test.tx_skb)
218+ if (mt76_testmode_enabled(&dev->phy) &&
219+ __mt76_is_testmode_skb(&dev->phy, skb))
220 *hw = dev->phy.hw;
221- else if (dev->phy2 && skb == dev->phy2->test.tx_skb)
222+ else if (dev->phy2 && mt76_testmode_enabled(dev->phy2) &&
223+ __mt76_is_testmode_skb(dev->phy2, skb))
224 *hw = dev->phy2->hw;
225 else
226 return false;
227@@ -1227,7 +1319,8 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
228 int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
229 struct netlink_callback *cb, void *data, int len);
230 int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state);
231-int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len);
232+int mt76_testmode_init_skb(struct mt76_phy *phy, u32 len,
233+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN]);
234
235 static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable)
236 {
237diff --git a/mt76_connac_mcu.c b/mt76_connac_mcu.c
238index 4e495d4f..ebb78d33 100644
239--- a/mt76_connac_mcu.c
240+++ b/mt76_connac_mcu.c
241@@ -389,6 +389,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
242 switch (vif->type) {
243 case NL80211_IFTYPE_MESH_POINT:
244 case NL80211_IFTYPE_AP:
245+ case NL80211_IFTYPE_MONITOR:
246 if (vif->p2p)
247 conn_type = CONNECTION_P2P_GC;
248 else
249@@ -570,6 +571,9 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
250 rx->rca2 = 1;
251 rx->rv = 1;
252
253+ if (vif->type == NL80211_IFTYPE_MONITOR)
254+ rx->rca1 = 0;
255+
256 if (!is_connac_v1(dev))
257 return;
258
259diff --git a/mt76_connac_mcu.h b/mt76_connac_mcu.h
260index 82498039..a3bbf5ca 100644
261--- a/mt76_connac_mcu.h
262+++ b/mt76_connac_mcu.h
263@@ -816,6 +816,7 @@ enum {
264 MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13,
265 MCU_EXT_EVENT_THERMAL_PROTECT = 0x22,
266 MCU_EXT_EVENT_ASSERT_DUMP = 0x23,
267+ MCU_EXT_EVENT_BF_STATUS_READ = 0x35,
268 MCU_EXT_EVENT_RDD_REPORT = 0x3a,
269 MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
270 MCU_EXT_EVENT_BCC_NOTIFY = 0x75,
271@@ -993,6 +994,7 @@ enum {
272 MCU_EXT_CMD_PHY_STAT_INFO = 0xad,
273 /* for vendor csi and air monitor */
274 MCU_EXT_CMD_SMESH_CTRL = 0xae,
275+ MCU_EXT_CMD_RX_STAT_USER_CTRL = 0xb3,
276 MCU_EXT_CMD_CERT_CFG = 0xb7,
277 MCU_EXT_CMD_CSI_CTRL = 0xc2,
278 };
279diff --git a/mt7915/init.c b/mt7915/init.c
280index e4f6617f..25a9b5de 100644
281--- a/mt7915/init.c
282+++ b/mt7915/init.c
283@@ -573,7 +573,7 @@ static void mt7915_init_work(struct work_struct *work)
284 struct mt7915_dev *dev = container_of(work, struct mt7915_dev,
285 init_work);
286
287- mt7915_mcu_set_eeprom(dev);
288+ mt7915_mcu_set_eeprom(dev, dev->flash_mode);
289 mt7915_mac_init(dev);
290 mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband);
291 mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband);
292diff --git a/mt7915/mac.c b/mt7915/mac.c
293index ffb0037b..12afb204 100644
294--- a/mt7915/mac.c
295+++ b/mt7915/mac.c
296@@ -914,17 +914,39 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
297 {
298 #ifdef CONFIG_NL80211_TESTMODE
299 struct mt76_testmode_data *td = &phy->mt76->test;
300+ struct mt76_testmode_entry_data *ed;
301+ struct mt76_wcid *wcid;
302 const struct ieee80211_rate *r;
303- u8 bw, mode, nss = td->tx_rate_nss;
304- u8 rate_idx = td->tx_rate_idx;
305+ u8 bw, mode, nss, rate_idx, ldpc;
306 u16 rateval = 0;
307 u32 val;
308 bool cck = false;
309 int band;
310
311- if (skb != phy->mt76->test.tx_skb)
312+ txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
313+ txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
314+ phy->test.spe_idx));
315+
316+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU) {
317+ txwi[1] |= cpu_to_le32(BIT(18));
318+ txwi[2] = 0;
319+ txwi[3] &= ~cpu_to_le32(MT_TXD3_NO_ACK);
320+ le32p_replace_bits(&txwi[3], 0x1f, MT_TXD3_REM_TX_COUNT);
321+
322+ return;
323+ }
324+
325+ mt76_tm_for_each_entry(phy->mt76, wcid, ed)
326+ if (ed->tx_skb == skb)
327+ break;
328+
329+ if (!ed)
330 return;
331
332+ nss = ed->tx_rate_nss;
333+ rate_idx = ed->tx_rate_idx;
334+ ldpc = ed->tx_rate_ldpc;
335+
336 switch (td->tx_rate_mode) {
337 case MT76_TM_TX_MODE_HT:
338 nss = 1 + (rate_idx >> 3);
339@@ -1013,14 +1035,13 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
340 if (mode >= MT_PHY_TYPE_HE_SU)
341 val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf);
342
343- if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
344+ if (ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
345 val |= MT_TXD6_LDPC;
346
347- txwi[1] &= ~cpu_to_le32(MT_TXD1_VTA);
348- txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
349+ if (phy->test.bf_en)
350+ val |= MT_TXD6_TX_IBF | MT_TXD6_TX_EBF;
351+
352 txwi[6] |= cpu_to_le32(val);
353- txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
354- phy->test.spe_idx));
355 #endif
356 }
357
358diff --git a/mt7915/main.c b/mt7915/main.c
359index f2a6d9da..942b8a9a 100644
360--- a/mt7915/main.c
361+++ b/mt7915/main.c
362@@ -221,7 +221,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
363 mvif->phy = phy;
364 mvif->mt76.band_idx = phy->band_idx;
365
366- mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
367+ mvif->mt76.wmm_idx = (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_MONITOR);
368 if (ext_phy)
369 mvif->mt76.wmm_idx += 2;
370
371diff --git a/mt7915/mcu.c b/mt7915/mcu.c
372index 8a3bd33f..8ed8700d 100755
373--- a/mt7915/mcu.c
374+++ b/mt7915/mcu.c
375@@ -360,7 +360,6 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
376 if (mcu_txd->ext_cid) {
377 mcu_txd->ext_cid_ack = 1;
378
379- /* do not use Q_SET for efuse */
380 if (cmd & __MCU_CMD_FIELD_QUERY)
381 mcu_txd->set_query = MCU_Q_QUERY;
382 else
383@@ -536,6 +535,11 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
384 case MCU_EXT_EVENT_BCC_NOTIFY:
385 mt7915_mcu_rx_bcc_notify(dev, skb);
386 break;
387+#ifdef CONFIG_NL80211_TESTMODE
388+ case MCU_EXT_EVENT_BF_STATUS_READ:
389+ mt7915_tm_txbf_status_read(dev, skb);
390+ break;
391+#endif
392 default:
393 break;
394 }
395@@ -565,6 +569,7 @@ void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb)
396 rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP ||
397 rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC ||
398 rxd->ext_eid == MCU_EXT_EVENT_BCC_NOTIFY ||
399+ rxd->ext_eid == MCU_EXT_EVENT_BF_STATUS_READ ||
400 !rxd->seq)
401 mt7915_mcu_rx_unsolicited_event(dev, skb);
402 else
403@@ -3030,14 +3035,14 @@ static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev)
404 return 0;
405 }
406
407-int mt7915_mcu_set_eeprom(struct mt7915_dev *dev)
408+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev, bool flash_mode)
409 {
410 struct mt7915_mcu_eeprom req = {
411 .buffer_mode = EE_MODE_EFUSE,
412 .format = EE_FORMAT_WHOLE,
413 };
414
415- if (dev->flash_mode)
416+ if (flash_mode)
417 return mt7915_mcu_set_eeprom_flash(dev);
418
419 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_BUFFER_MODE),
420diff --git a/mt7915/mcu.h b/mt7915/mcu.h
421index adf71f10..82c21168 100644
422--- a/mt7915/mcu.h
423+++ b/mt7915/mcu.h
424@@ -27,10 +27,15 @@ struct mt7915_mcu_txd {
425
426 enum {
427 MCU_ATE_SET_TRX = 0x1,
428+ MCU_ATE_SET_TSSI = 0x5,
429+ MCU_ATE_SET_DPD = 0x6,
430+ MCU_ATE_SET_RATE_POWER_OFFSET = 0x7,
431+ MCU_ATE_SET_THERMAL_COMP = 0x8,
432 MCU_ATE_SET_FREQ_OFFSET = 0xa,
433 MCU_ATE_SET_PHY_COUNT = 0x11,
434 MCU_ATE_SET_SLOT_TIME = 0x13,
435 MCU_ATE_CLEAN_TXQUEUE = 0x1c,
436+ MCU_ATE_SET_MU_RX_AID = 0x1e,
437 };
438
439 struct mt7915_mcu_rxd {
440@@ -455,6 +460,12 @@ enum {
441
442 enum {
443 MT_BF_SOUNDING_ON = 1,
444+ MT_BF_DATA_PACKET_APPLY = 2,
445+ MT_BF_PFMU_TAG_READ = 5,
446+ MT_BF_PFMU_TAG_WRITE = 6,
447+ MT_BF_PHASE_CAL = 14,
448+ MT_BF_IBF_PHASE_COMP = 15,
449+ MT_BF_PROFILE_WRITE_ALL = 17,
450 MT_BF_TYPE_UPDATE = 20,
451 MT_BF_MODULE_UPDATE = 25
452 };
453@@ -681,12 +692,20 @@ struct mt7915_muru {
454 #define MURU_OFDMA_SCH_TYPE_DL BIT(0)
455 #define MURU_OFDMA_SCH_TYPE_UL BIT(1)
456
457-/* Common Config */
458-#define MURU_COMM_PPDU_FMT BIT(0)
459-#define MURU_COMM_SCH_TYPE BIT(1)
460-#define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_SCH_TYPE)
461-
462-/* DL&UL User config*/
463+/* Common Config */
464+/* #define MURU_COMM_PPDU_FMT BIT(0) */
465+/* #define MURU_COMM_SCH_TYPE BIT(1) */
466+/* #define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_SCH_TYPE) */
467+#define MURU_COMM_PPDU_FMT BIT(0)
468+#define MURU_COMM_SCH_TYPE BIT(1)
469+#define MURU_COMM_BAND BIT(2)
470+#define MURU_COMM_WMM BIT(3)
471+#define MURU_COMM_SPE_IDX BIT(4)
472+#define MURU_COMM_PROC_TYPE BIT(5)
473+#define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_BAND | \
474+ MURU_COMM_WMM | MURU_COMM_SPE_IDX)
475+
476+/* DL&UL User config */
477 #define MURU_USER_CNT BIT(4)
478
479 enum {
480diff --git a/mt7915/mmio.c b/mt7915/mmio.c
481index b3de3a7a..bbf8b16c 100644
482--- a/mt7915/mmio.c
483+++ b/mt7915/mmio.c
484@@ -73,6 +73,7 @@ static const u32 mt7915_offs[] = {
485 [ARB_DRNGR0] = 0x194,
486 [ARB_SCR] = 0x080,
487 [RMAC_MIB_AIRTIME14] = 0x3b8,
488+ [AGG_AALCR0] = 0x048,
489 [AGG_AWSCR0] = 0x05c,
490 [AGG_PCR0] = 0x06c,
491 [AGG_ACR0] = 0x084,
492@@ -147,6 +148,7 @@ static const u32 mt7916_offs[] = {
493 [ARB_DRNGR0] = 0x1e0,
494 [ARB_SCR] = 0x000,
495 [RMAC_MIB_AIRTIME14] = 0x0398,
496+ [AGG_AALCR0] = 0x028,
497 [AGG_AWSCR0] = 0x030,
498 [AGG_PCR0] = 0x040,
499 [AGG_ACR0] = 0x054,
500diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
501index cf0630c8..4b375629 100644
502--- a/mt7915/mt7915.h
503+++ b/mt7915/mt7915.h
504@@ -294,6 +294,9 @@ struct mt7915_phy {
505 u8 last_snr;
506
507 u8 spe_idx;
508+
509+ bool bf_en;
510+ bool bf_ever_en;
511 } test;
512 #endif
513
514@@ -382,6 +385,14 @@ struct mt7915_dev {
515 void __iomem *dcm;
516 void __iomem *sku;
517
518+#ifdef CONFIG_NL80211_TESTMODE
519+ struct {
520+ void *txbf_phase_cal;
521+ void *txbf_pfmu_data;
522+ void *txbf_pfmu_tag;
523+ } test;
524+#endif
525+
526 #ifdef MTK_DEBUG
527 u16 wlan_idx;
528 struct {
529@@ -572,7 +583,7 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
530 struct ieee80211_vif *vif,
531 struct ieee80211_sta *sta,
532 void *data, u32 field);
533-int mt7915_mcu_set_eeprom(struct mt7915_dev *dev);
534+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev, bool flash_mode);
535 int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset);
536 int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num);
537 int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable,
538@@ -605,6 +616,7 @@ int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl);
539 int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level);
540 void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb);
541 void mt7915_mcu_exit(struct mt7915_dev *dev);
542+int mt7915_tm_txbf_status_read(struct mt7915_dev *dev, struct sk_buff *skb);
543
544 static inline u16 mt7915_wtbl_size(struct mt7915_dev *dev)
545 {
546diff --git a/mt7915/regs.h b/mt7915/regs.h
547index 99834310..6ba5e9fe 100644
548--- a/mt7915/regs.h
549+++ b/mt7915/regs.h
550@@ -50,6 +50,7 @@ enum offs_rev {
551 ARB_DRNGR0,
552 ARB_SCR,
553 RMAC_MIB_AIRTIME14,
554+ AGG_AALCR0,
555 AGG_AWSCR0,
556 AGG_PCR0,
557 AGG_ACR0,
558@@ -458,6 +459,8 @@ enum offs_rev {
559 #define MT_WF_AGG_BASE(_band) ((_band) ? 0x820f2000 : 0x820e2000)
560 #define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
561
562+#define MT_AGG_AALCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AALCR0) + \
563+ (_n) * 4))
564 #define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AWSCR0) + \
565 (_n) * 4))
566 #define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_PCR0) + \
567diff --git a/mt7915/testmode.c b/mt7915/testmode.c
568index e8bf616c..0f367e6e 100644
569--- a/mt7915/testmode.c
570+++ b/mt7915/testmode.c
571@@ -9,6 +9,9 @@
572 enum {
573 TM_CHANGED_TXPOWER,
574 TM_CHANGED_FREQ_OFFSET,
575+ TM_CHANGED_AID,
576+ TM_CHANGED_CFG,
577+ TM_CHANGED_TXBF_ACT,
578
579 /* must be last */
580 NUM_TM_CHANGED
581@@ -17,6 +20,9 @@ enum {
582 static const u8 tm_change_map[] = {
583 [TM_CHANGED_TXPOWER] = MT76_TM_ATTR_TX_POWER,
584 [TM_CHANGED_FREQ_OFFSET] = MT76_TM_ATTR_FREQ_OFFSET,
585+ [TM_CHANGED_AID] = MT76_TM_ATTR_AID,
586+ [TM_CHANGED_CFG] = MT76_TM_ATTR_CFG,
587+ [TM_CHANGED_TXBF_ACT] = MT76_TM_ATTR_TXBF_ACT,
588 };
589
590 struct reg_band {
591@@ -33,6 +39,34 @@ struct reg_band {
592 #define TM_REG_MAX_ID 20
593 static struct reg_band reg_backup_list[TM_REG_MAX_ID];
594
595+static u8 mt7915_tm_chan_bw(enum nl80211_chan_width width)
596+{
597+ static const u8 width_to_bw[] = {
598+ [NL80211_CHAN_WIDTH_40] = TM_CBW_40MHZ,
599+ [NL80211_CHAN_WIDTH_80] = TM_CBW_80MHZ,
600+ [NL80211_CHAN_WIDTH_80P80] = TM_CBW_8080MHZ,
601+ [NL80211_CHAN_WIDTH_160] = TM_CBW_160MHZ,
602+ [NL80211_CHAN_WIDTH_5] = TM_CBW_5MHZ,
603+ [NL80211_CHAN_WIDTH_10] = TM_CBW_10MHZ,
604+ [NL80211_CHAN_WIDTH_20] = TM_CBW_20MHZ,
605+ [NL80211_CHAN_WIDTH_20_NOHT] = TM_CBW_20MHZ,
606+ };
607+
608+ if (width >= ARRAY_SIZE(width_to_bw))
609+ return 0;
610+
611+ return width_to_bw[width];
612+}
613+
614+static void
615+mt7915_tm_update_channel(struct mt7915_phy *phy)
616+{
617+ mutex_unlock(&phy->dev->mt76.mutex);
618+ mt7915_set_channel(phy);
619+ mutex_lock(&phy->dev->mt76.mutex);
620+
621+ mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
622+}
623
624 static int
625 mt7915_tm_set_tx_power(struct mt7915_phy *phy)
626@@ -119,18 +153,28 @@ mt7915_tm_set_trx(struct mt7915_phy *phy, int type, bool en)
627 }
628
629 static int
630-mt7915_tm_clean_hwq(struct mt7915_phy *phy, u8 wcid)
631+mt7915_tm_clean_hwq(struct mt7915_phy *phy)
632 {
633+ struct mt76_testmode_entry_data *ed;
634+ struct mt76_wcid *wcid;
635 struct mt7915_dev *dev = phy->dev;
636 struct mt7915_tm_cmd req = {
637 .testmode_en = 1,
638 .param_idx = MCU_ATE_CLEAN_TXQUEUE,
639- .param.clean.wcid = wcid,
640 .param.clean.band = phy != &dev->phy,
641 };
642
643- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
644- sizeof(req), false);
645+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
646+ int ret;
647+
648+ req.param.clean.wcid = wcid->idx;
649+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL),
650+ &req, sizeof(req), false);
651+ if (ret)
652+ return ret;
653+ }
654+
655+ return 0;
656 }
657
658 static int
659@@ -182,11 +226,706 @@ mt7915_tm_set_tam_arb(struct mt7915_phy *phy, bool enable, bool mu)
660 return mt7915_mcu_set_muru_ctrl(dev, MURU_SET_ARB_OP_MODE, op_mode);
661 }
662
663+static int
664+mt7915_tm_set_cfg(struct mt7915_phy *phy)
665+{
666+ static const u8 cfg_cmd[] = {
667+ [MT76_TM_CFG_TSSI] = MCU_ATE_SET_TSSI,
668+ [MT76_TM_CFG_DPD] = MCU_ATE_SET_DPD,
669+ [MT76_TM_CFG_RATE_POWER_OFFSET] = MCU_ATE_SET_RATE_POWER_OFFSET,
670+ [MT76_TM_CFG_THERMAL_COMP] = MCU_ATE_SET_THERMAL_COMP,
671+ };
672+ struct mt76_testmode_data *td = &phy->mt76->test;
673+ struct mt7915_dev *dev = phy->dev;
674+ struct mt7915_tm_cmd req = {
675+ .testmode_en = !(phy->mt76->test.state == MT76_TM_STATE_OFF),
676+ .param_idx = cfg_cmd[td->cfg.type],
677+ .param.cfg.enable = td->cfg.enable,
678+ .param.cfg.band = phy->band_idx,
679+ };
680+
681+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
682+ sizeof(req), false);
683+}
684+
685+static int
686+mt7915_tm_add_txbf(struct mt7915_phy *phy, struct ieee80211_vif *vif,
687+ struct ieee80211_sta *sta, u8 pfmu_idx, u8 nr,
688+ u8 nc, bool ebf)
689+{
690+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
691+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
692+ struct mt7915_dev *dev = phy->dev;
693+ struct sk_buff *skb;
694+ struct sta_rec_bf *bf;
695+ struct tlv *tlv;
696+ u8 ndp_rate;
697+
698+ if (nr == 1)
699+ ndp_rate = 8;
700+ else if (nr == 2)
701+ ndp_rate = 16;
702+ else
703+ ndp_rate = 24;
704+
705+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
706+ &msta->wcid);
707+ if (IS_ERR(skb))
708+ return PTR_ERR(skb);
709+
710+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
711+ bf = (struct sta_rec_bf *)tlv;
712+
713+ bf->pfmu = cpu_to_le16(pfmu_idx);
714+ bf->sounding_phy = 1;
715+ bf->bf_cap = ebf;
716+ bf->ncol = nc;
717+ bf->nrow = nr;
718+ bf->ndp_rate = ndp_rate;
719+ bf->ibf_timeout = 0xff;
720+ bf->tx_mode = MT_PHY_TYPE_HT;
721+
722+ if (ebf) {
723+ bf->mem[0].row = 0;
724+ bf->mem[1].row = 1;
725+ bf->mem[2].row = 2;
726+ bf->mem[3].row = 3;
727+ } else {
728+ bf->mem[0].row = 4;
729+ bf->mem[1].row = 5;
730+ bf->mem[2].row = 6;
731+ bf->mem[3].row = 7;
732+ }
733+
734+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
735+ MCU_EXT_CMD(STA_REC_UPDATE), true);
736+}
737+
738+static int
739+mt7915_tm_entry_add(struct mt7915_phy *phy, u8 aid)
740+{
741+ struct mt76_testmode_data *td = &phy->mt76->test;
742+ struct mt76_testmode_entry_data *ed;
743+ struct ieee80211_sband_iftype_data *sdata;
744+ struct ieee80211_supported_band *sband;
745+ struct ieee80211_sta *sta;
746+ struct mt7915_sta *msta;
747+ int tid, ret;
748+
749+ if (td->entry_num >= MT76_TM_MAX_ENTRY_NUM)
750+ return -EINVAL;
751+
752+ sta = kzalloc(sizeof(*sta) + phy->mt76->hw->sta_data_size +
753+ sizeof(*ed), GFP_KERNEL);
754+ if (!sta)
755+ return -ENOMEM;
756+
757+ msta = (struct mt7915_sta *)sta->drv_priv;
758+ ed = mt76_testmode_entry_data(phy->mt76, &msta->wcid);
759+ memcpy(ed, &td->ed, sizeof(*ed));
760+
761+ if (phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ) {
762+ sband = &phy->mt76->sband_5g.sband;
763+ sdata = phy->iftype[NL80211_BAND_5GHZ];
764+ } else if (phy->mt76->chandef.chan->band == NL80211_BAND_6GHZ) {
765+ sband = &phy->mt76->sband_6g.sband;
766+ sdata = phy->iftype[NL80211_BAND_6GHZ];
767+ } else {
768+ sband = &phy->mt76->sband_2g.sband;
769+ sdata = phy->iftype[NL80211_BAND_2GHZ];
770+ }
771+
772+ memcpy(sta->addr, ed->addr[0], ETH_ALEN);
773+ if (phy->test.bf_en) {
774+ u8 addr[ETH_ALEN] = {0x00, 0x11, 0x11, 0x11, 0x11, 0x11};
775+
776+ memcpy(sta->addr, addr, ETH_ALEN);
777+ }
778+
779+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_HT)
780+ memcpy(&sta->ht_cap, &sband->ht_cap, sizeof(sta->ht_cap));
781+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_VHT)
782+ memcpy(&sta->vht_cap, &sband->vht_cap, sizeof(sta->vht_cap));
783+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_HE_SU)
784+ memcpy(&sta->he_cap, &sdata[NL80211_IFTYPE_STATION].he_cap,
785+ sizeof(sta->he_cap));
786+ sta->aid = aid;
787+ sta->wme = 1;
788+
789+ ret = mt7915_mac_sta_add(&phy->dev->mt76, phy->monitor_vif, sta);
790+ if (ret) {
791+ kfree(sta);
792+ return ret;
793+ }
794+
795+ /* prevent from starting tx ba session */
796+ for (tid = 0; tid < 8; tid++)
797+ set_bit(tid, &msta->ampdu_state);
798+
799+ list_add_tail(&msta->wcid.list, &td->tm_entry_list);
800+ td->entry_num++;
801+
802+ return 0;
803+}
804+
805+static void
806+mt7915_tm_entry_remove(struct mt7915_phy *phy, u8 aid)
807+{
808+ struct mt76_testmode_data *td = &phy->mt76->test;
809+ struct mt76_wcid *wcid, *tmp;
810+
811+ if (list_empty(&td->tm_entry_list))
812+ return;
813+
814+ list_for_each_entry_safe(wcid, tmp, &td->tm_entry_list, list) {
815+ struct ieee80211_sta *sta = wcid_to_sta(wcid);
816+ struct mt7915_dev *dev = phy->dev;
817+
818+ mt7915_mac_sta_remove(&dev->mt76, phy->monitor_vif, sta);
819+ mt76_wcid_mask_clear(dev->mt76.wcid_mask, wcid->idx);
820+
821+ list_del_init(&wcid->list);
822+ kfree(sta);
823+ phy->mt76->test.entry_num--;
824+ }
825+}
826+
827+static int
828+mt7915_tm_set_entry(struct mt7915_phy *phy)
829+{
830+ struct mt76_testmode_data *td = &phy->mt76->test;
831+ struct mt76_testmode_entry_data *ed;
832+ struct mt76_wcid *wcid;
833+
834+ if (!td->aid) {
835+ if (td->state > MT76_TM_STATE_IDLE)
836+ mt76_testmode_set_state(phy->mt76, MT76_TM_STATE_IDLE);
837+ mt7915_tm_entry_remove(phy, td->aid);
838+ return 0;
839+ }
840+
841+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
842+ if (ed->aid == td->aid) {
843+ struct sk_buff *skb;
844+
845+ local_bh_disable();
846+ skb = ed->tx_skb;
847+ memcpy(ed, &td->ed, sizeof(*ed));
848+ ed->tx_skb = skb;
849+ local_bh_enable();
850+
851+ return 0;
852+ }
853+ }
854+
855+ return mt7915_tm_entry_add(phy, td->aid);
856+}
857+
858+static int
859+mt7915_tm_txbf_init(struct mt7915_phy *phy, u16 *val)
860+{
861+ struct mt76_testmode_data *td = &phy->mt76->test;
862+ struct mt7915_dev *dev = phy->dev;
863+ bool enable = val[0];
864+ void *phase_cal, *pfmu_data, *pfmu_tag;
865+ u8 addr[ETH_ALEN] = {0x00, 0x22, 0x22, 0x22, 0x22, 0x22};
866+
867+ if (!enable) {
868+ phy->test.bf_en = 0;
869+ return 0;
870+ }
871+
872+ if (!dev->test.txbf_phase_cal) {
873+ phase_cal = devm_kzalloc(dev->mt76.dev,
874+ sizeof(struct mt7915_tm_txbf_phase) *
875+ MAX_PHASE_GROUP_NUM,
876+ GFP_KERNEL);
877+ if (!phase_cal)
878+ return -ENOMEM;
879+
880+ dev->test.txbf_phase_cal = phase_cal;
881+ }
882+
883+ if (!dev->test.txbf_pfmu_data) {
884+ pfmu_data = devm_kzalloc(dev->mt76.dev, 512, GFP_KERNEL);
885+ if (!pfmu_data)
886+ return -ENOMEM;
887+
888+ dev->test.txbf_pfmu_data = pfmu_data;
889+ }
890+
891+ if (!dev->test.txbf_pfmu_tag) {
892+ pfmu_tag = devm_kzalloc(dev->mt76.dev,
893+ sizeof(struct mt7915_tm_pfmu_tag), GFP_KERNEL);
894+ if (!pfmu_tag)
895+ return -ENOMEM;
896+
897+ dev->test.txbf_pfmu_tag = pfmu_tag;
898+ }
899+
900+ memcpy(phy->monitor_vif->addr, addr, ETH_ALEN);
901+ mt7915_mcu_add_dev_info(phy, phy->monitor_vif, true);
902+
903+ td->tx_rate_mode = MT76_TM_TX_MODE_HT;
904+ td->tx_mpdu_len = 1024;
905+ td->tx_rate_sgi = 0;
906+ td->tx_ipg = 100;
907+ phy->test.bf_en = 1;
908+
909+ return mt7915_tm_set_trx(phy, TM_MAC_TX, true);
910+}
911+
912+static int
913+mt7915_tm_txbf_phase_comp(struct mt7915_phy *phy, u16 *val)
914+{
915+ struct mt7915_dev *dev = phy->dev;
916+ struct {
917+ u8 category;
918+ u8 wlan_idx_lo;
919+ u8 bw;
920+ u8 jp_band;
921+ u8 dbdc_idx;
922+ bool read_from_e2p;
923+ bool disable;
924+ u8 wlan_idx_hi;
925+ u8 buf[40];
926+ } __packed req = {
927+ .category = MT_BF_IBF_PHASE_COMP,
928+ .bw = val[0],
929+ .jp_band = (val[2] == 1) ? 1 : 0,
930+ .dbdc_idx = phy->band_idx,
931+ .read_from_e2p = val[3],
932+ .disable = val[4],
933+ };
934+ struct mt7915_tm_txbf_phase *phase =
935+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
936+
937+ wait_event_timeout(dev->mt76.tx_wait, phase[val[2]].status != 0, HZ);
938+ memcpy(req.buf, &phase[val[2]].phase, sizeof(req.buf));
939+
940+ pr_info("ibf cal process: phase comp info\n");
941+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1,
942+ &req, sizeof(req), 0);
943+
944+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
945+ sizeof(req), true);
946+}
947+
948+static int
949+mt7915_tm_txbf_profile_tag_read(struct mt7915_phy *phy, u8 pfmu_idx)
950+{
951+ struct mt7915_dev *dev = phy->dev;
952+ struct {
953+ u8 format_id;
954+ u8 pfmu_idx;
955+ bool bfer;
956+ u8 dbdc_idx;
957+ } __packed req = {
958+ .format_id = MT_BF_PFMU_TAG_READ,
959+ .pfmu_idx = pfmu_idx,
960+ .bfer = 1,
961+ .dbdc_idx = phy != &dev->phy,
962+ };
963+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
964+
965+ tag->t1.pfmu_idx = 0;
966+
967+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
968+ sizeof(req), true);
969+}
970+
971+static int
972+mt7915_tm_txbf_profile_tag_write(struct mt7915_phy *phy, u8 pfmu_idx,
973+ struct mt7915_tm_pfmu_tag *tag)
974+{
975+ struct mt7915_dev *dev = phy->dev;
976+ struct {
977+ u8 format_id;
978+ u8 pfmu_idx;
979+ bool bfer;
980+ u8 dbdc_idx;
981+ u8 buf[64];
982+ } __packed req = {
983+ .format_id = MT_BF_PFMU_TAG_WRITE,
984+ .pfmu_idx = pfmu_idx,
985+ .bfer = 1,
986+ .dbdc_idx = phy != &dev->phy,
987+ };
988+
989+ memcpy(req.buf, tag, sizeof(*tag));
990+ wait_event_timeout(dev->mt76.tx_wait, tag->t1.pfmu_idx != 0, HZ);
991+
992+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
993+ sizeof(req), false);
994+}
995+
996+static int
997+mt7915_tm_txbf_apply_tx(struct mt7915_phy *phy, u16 wlan_idx, bool ebf,
998+ bool ibf, bool phase_cal)
999+{
1000+#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
1001+#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
1002+ struct mt7915_dev *dev = phy->dev;
1003+ struct {
1004+ u8 category;
1005+ u8 wlan_idx_lo;
1006+ bool ebf;
1007+ bool ibf;
1008+ bool mu_txbf;
1009+ bool phase_cal;
1010+ u8 wlan_idx_hi;
1011+ u8 _rsv;
1012+ } __packed req = {
1013+ .category = MT_BF_DATA_PACKET_APPLY,
1014+ .wlan_idx_lo = to_wcid_lo(wlan_idx),
1015+ .ebf = ebf,
1016+ .ibf = ibf,
1017+ .phase_cal = phase_cal,
1018+ .wlan_idx_hi = to_wcid_hi(wlan_idx),
1019+ };
1020+
1021+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1022+ sizeof(req), false);
1023+}
1024+
1025+static int mt7915_tm_txbf_set_rate(struct mt7915_phy *phy,
1026+ struct mt76_wcid *wcid)
1027+{
1028+ struct mt7915_dev *dev = phy->dev;
1029+ struct mt76_testmode_entry_data *ed = mt76_testmode_entry_data(phy->mt76, wcid);
1030+ struct ieee80211_sta *sta = wcid_to_sta(wcid);
1031+ struct sta_phy rate = {};
1032+
1033+ if (!sta)
1034+ return 0;
1035+
1036+ rate.type = MT_PHY_TYPE_HT;
1037+ rate.bw = mt7915_tm_chan_bw(phy->mt76->chandef.width);
1038+ rate.nss = ed->tx_rate_nss;
1039+ rate.mcs = ed->tx_rate_idx;
1040+ rate.ldpc = (rate.bw || ed->tx_rate_ldpc) * GENMASK(2, 0);
1041+
1042+ return mt7915_mcu_set_fixed_rate_ctrl(dev, phy->monitor_vif, sta,
1043+ &rate, RATE_PARAM_FIXED);
1044+}
1045+
1046+static int
1047+mt7915_tm_txbf_set_tx(struct mt7915_phy *phy, u16 *val)
1048+{
1049+ bool bf_on = val[0], update = val[3];
1050+ /* u16 wlan_idx = val[2]; */
1051+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
1052+ struct mt76_testmode_data *td = &phy->mt76->test;
1053+ struct mt76_wcid *wcid;
1054+
1055+ if (bf_on) {
1056+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
1057+ mt7915_tm_txbf_profile_tag_read(phy, 2);
1058+ tag->t1.invalid_prof = false;
1059+ mt7915_tm_txbf_profile_tag_write(phy, 2, tag);
1060+
1061+ phy->test.bf_ever_en = true;
1062+
1063+ if (update)
1064+ mt7915_tm_txbf_apply_tx(phy, 1, 0, 1, 1);
1065+ } else {
1066+ if (!phy->test.bf_ever_en) {
1067+ if (update)
1068+ mt7915_tm_txbf_apply_tx(phy, 1, 0, 0, 0);
1069+ } else {
1070+ phy->test.bf_ever_en = false;
1071+
1072+ mt7915_tm_txbf_profile_tag_read(phy, 2);
1073+ tag->t1.invalid_prof = true;
1074+ mt7915_tm_txbf_profile_tag_write(phy, 2, tag);
1075+ }
1076+ }
1077+
1078+ wcid = list_first_entry(&td->tm_entry_list, struct mt76_wcid, list);
1079+ mt7915_tm_txbf_set_rate(phy, wcid);
1080+
1081+ return 0;
1082+}
1083+
1084+static int
1085+mt7915_tm_txbf_profile_update(struct mt7915_phy *phy, u16 *val, bool ebf)
1086+{
1087+ static const u8 mode_to_lm[] = {
1088+ [MT76_TM_TX_MODE_CCK] = 0,
1089+ [MT76_TM_TX_MODE_OFDM] = 0,
1090+ [MT76_TM_TX_MODE_HT] = 1,
1091+ [MT76_TM_TX_MODE_VHT] = 2,
1092+ [MT76_TM_TX_MODE_HE_SU] = 3,
1093+ [MT76_TM_TX_MODE_HE_EXT_SU] = 3,
1094+ [MT76_TM_TX_MODE_HE_TB] = 3,
1095+ [MT76_TM_TX_MODE_HE_MU] = 3,
1096+ };
1097+ struct mt76_testmode_data *td = &phy->mt76->test;
1098+ struct mt76_wcid *wcid;
1099+ struct ieee80211_vif *vif = phy->monitor_vif;
1100+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
1101+ u8 pfmu_idx = val[0], nc = val[2], nr;
1102+ int ret;
1103+
1104+ if (td->tx_antenna_mask == 3)
1105+ nr = 1;
1106+ else if (td->tx_antenna_mask == 7)
1107+ nr = 2;
1108+ else
1109+ nr = 3;
1110+
1111+ memset(tag, 0, sizeof(*tag));
1112+ tag->t1.pfmu_idx = pfmu_idx;
1113+ tag->t1.ebf = ebf;
1114+ tag->t1.nr = nr;
1115+ tag->t1.nc = nc;
1116+ tag->t1.invalid_prof = true;
1117+
1118+ tag->t1.snr_sts4 = 0xc0;
1119+ tag->t1.snr_sts5 = 0xff;
1120+ tag->t1.snr_sts6 = 0xff;
1121+ tag->t1.snr_sts7 = 0xff;
1122+
1123+ if (ebf) {
1124+ tag->t1.row_id1 = 0;
1125+ tag->t1.row_id2 = 1;
1126+ tag->t1.row_id3 = 2;
1127+ tag->t1.row_id4 = 3;
1128+ tag->t1.lm = mode_to_lm[MT76_TM_TX_MODE_HT];
1129+ } else {
1130+ tag->t1.row_id1 = 4;
1131+ tag->t1.row_id2 = 5;
1132+ tag->t1.row_id3 = 6;
1133+ tag->t1.row_id4 = 7;
1134+ tag->t1.lm = mode_to_lm[MT76_TM_TX_MODE_OFDM];
1135+
1136+ tag->t2.ibf_timeout = 0xff;
1137+ tag->t2.ibf_nr = nr;
1138+ }
1139+
1140+ ret = mt7915_tm_txbf_profile_tag_write(phy, pfmu_idx, tag);
1141+ if (ret)
1142+ return ret;
1143+
1144+ wcid = list_first_entry(&td->tm_entry_list, struct mt76_wcid, list);
1145+ ret = mt7915_tm_add_txbf(phy, vif, wcid_to_sta(wcid), pfmu_idx, nr, nc, ebf);
1146+ if (ret)
1147+ return ret;
1148+
1149+ if (!ebf)
1150+ return mt7915_tm_txbf_apply_tx(phy, 1, false, true, true);
1151+
1152+ return 0;
1153+}
1154+
1155+static int
1156+mt7915_tm_txbf_phase_cal(struct mt7915_phy *phy, u16 *val)
1157+{
1158+#define GROUP_L 0
1159+#define GROUP_M 1
1160+#define GROUP_H 2
1161+ struct mt7915_dev *dev = phy->dev;
1162+ struct {
1163+ u8 category;
1164+ u8 group_l_m_n;
1165+ u8 group;
1166+ bool sx2;
1167+ u8 cal_type;
1168+ u8 lna_gain_level;
1169+ u8 _rsv[2];
1170+ } __packed req = {
1171+ .category = MT_BF_PHASE_CAL,
1172+ .group = val[0],
1173+ .group_l_m_n = val[1],
1174+ .sx2 = val[2],
1175+ .cal_type = val[3],
1176+ .lna_gain_level = 0, /* for test purpose */
1177+ };
1178+ struct mt7915_tm_txbf_phase *phase =
1179+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1180+
1181+ phase[req.group].status = 0;
1182+
1183+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1184+ sizeof(req), true);
1185+}
1186+
1187+int mt7915_tm_txbf_status_read(struct mt7915_dev *dev, struct sk_buff *skb)
1188+{
1189+#define BF_PFMU_TAG 16
1190+#define BF_CAL_PHASE 21
1191+ u8 format_id;
1192+
1193+ skb_pull(skb, sizeof(struct mt7915_mcu_rxd));
1194+ format_id = *(u8 *)skb->data;
1195+
1196+ if (format_id == BF_PFMU_TAG) {
1197+ struct mt7915_tm_pfmu_tag *tag = dev->test.txbf_pfmu_tag;
1198+
1199+ skb_pull(skb, 8);
1200+ memcpy(tag, skb->data, sizeof(struct mt7915_tm_pfmu_tag));
1201+ } else if (format_id == BF_CAL_PHASE) {
1202+ struct mt7915_tm_ibf_cal_info *cal;
1203+ struct mt7915_tm_txbf_phase *phase =
1204+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1205+
1206+ cal = (struct mt7915_tm_ibf_cal_info *)skb->data;
1207+ switch (cal->cal_type) {
1208+ case IBF_PHASE_CAL_NORMAL:
1209+ case IBF_PHASE_CAL_NORMAL_INSTRUMENT:
1210+ if (cal->group_l_m_n != GROUP_M)
1211+ break;
1212+ phase = &phase[cal->group];
1213+ memcpy(&phase->phase, cal->buf + 16, sizeof(phase->phase));
1214+ phase->status = cal->status;
1215+ break;
1216+ case IBF_PHASE_CAL_VERIFY:
1217+ case IBF_PHASE_CAL_VERIFY_INSTRUMENT:
1218+ break;
1219+ default:
1220+ break;
1221+ }
1222+ }
1223+
1224+ wake_up(&dev->mt76.tx_wait);
1225+
1226+ return 0;
1227+}
1228+
1229+static int
1230+mt7915_tm_txbf_profile_update_all(struct mt7915_phy *phy, u16 *val)
1231+{
1232+ struct mt76_testmode_data *td = &phy->mt76->test;
1233+ u16 pfmu_idx = val[0];
1234+ u16 subc_id = val[1];
1235+ u16 angle11 = val[2];
1236+ u16 angle21 = val[3];
1237+ u16 angle31 = val[4];
1238+ u16 angle41 = val[5];
1239+ s16 phi11 = 0, phi21 = 0, phi31 = 0;
1240+ struct mt7915_tm_pfmu_data *pfmu_data;
1241+
1242+ if (subc_id > 63)
1243+ return -EINVAL;
1244+
1245+ if (td->tx_antenna_mask == 2) {
1246+ phi11 = (s16)(angle21 - angle11);
1247+ } else if (td->tx_antenna_mask == 3) {
1248+ phi11 = (s16)(angle31 - angle11);
1249+ phi21 = (s16)(angle31 - angle21);
1250+ } else {
1251+ phi11 = (s16)(angle41 - angle11);
1252+ phi21 = (s16)(angle41 - angle21);
1253+ phi31 = (s16)(angle41 - angle31);
1254+ }
1255+
1256+ pfmu_data = (struct mt7915_tm_pfmu_data *)phy->dev->test.txbf_pfmu_data;
1257+ pfmu_data = &pfmu_data[subc_id];
1258+
1259+ if (subc_id < 32)
1260+ pfmu_data->subc_idx = cpu_to_le16(subc_id + 224);
1261+ else
1262+ pfmu_data->subc_idx = cpu_to_le16(subc_id - 32);
1263+ pfmu_data->phi11 = cpu_to_le16(phi11);
1264+ pfmu_data->phi21 = cpu_to_le16(phi21);
1265+ pfmu_data->phi31 = cpu_to_le16(phi31);
1266+
1267+ if (subc_id == 63) {
1268+ struct mt7915_dev *dev = phy->dev;
1269+ struct {
1270+ u8 format_id;
1271+ u8 pfmu_idx;
1272+ u8 dbdc_idx;
1273+ u8 _rsv;
1274+ u8 buf[512];
1275+ } __packed req = {
1276+ .format_id = MT_BF_PROFILE_WRITE_ALL,
1277+ .pfmu_idx = pfmu_idx,
1278+ .dbdc_idx = phy != &dev->phy,
1279+ };
1280+
1281+ memcpy(req.buf, dev->test.txbf_pfmu_data, 512);
1282+
1283+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION),
1284+ &req, sizeof(req), true);
1285+ }
1286+
1287+ return 0;
1288+}
1289+
1290+static int
1291+mt7915_tm_txbf_e2p_update(struct mt7915_phy *phy)
1292+{
1293+ struct mt7915_tm_txbf_phase *phase, *p;
1294+ struct mt7915_dev *dev = phy->dev;
1295+ u8 *eeprom = dev->mt76.eeprom.data;
1296+ u16 offset;
1297+ bool is_7976;
1298+ int i;
1299+
1300+ is_7976 = mt7915_check_adie(dev, false) || is_mt7916(&dev->mt76);
1301+ offset = is_7976 ? 0x60a : 0x651;
1302+
1303+ phase = (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1304+ for (i = 0; i < MAX_PHASE_GROUP_NUM; i++) {
1305+ p = &phase[i];
1306+
1307+ if (!p->status)
1308+ continue;
1309+
1310+ /* copy phase cal data to eeprom */
1311+ memcpy(eeprom + offset + i * sizeof(p->phase), &p->phase,
1312+ sizeof(p->phase));
1313+ }
1314+
1315+ return 0;
1316+}
1317+
1318+static int
1319+mt7915_tm_set_txbf(struct mt7915_phy *phy)
1320+{
1321+ struct mt76_testmode_data *td = &phy->mt76->test;
1322+ u16 *val = td->txbf_param;
1323+
1324+ pr_info("ibf cal process: act = %u, val = %u, %u, %u, %u, %u\n",
1325+ td->txbf_act, val[0], val[1], val[2], val[3], val[4]);
1326+
1327+ switch (td->txbf_act) {
1328+ case MT76_TM_TXBF_ACT_INIT:
1329+ return mt7915_tm_txbf_init(phy, val);
1330+ case MT76_TM_TXBF_ACT_UPDATE_CH:
1331+ mt7915_tm_update_channel(phy);
1332+ break;
1333+ case MT76_TM_TXBF_ACT_PHASE_COMP:
1334+ return mt7915_tm_txbf_phase_comp(phy, val);
1335+ case MT76_TM_TXBF_ACT_TX_PREP:
1336+ return mt7915_tm_txbf_set_tx(phy, val);
1337+ case MT76_TM_TXBF_ACT_IBF_PROF_UPDATE:
1338+ return mt7915_tm_txbf_profile_update(phy, val, false);
1339+ case MT76_TM_TXBF_ACT_EBF_PROF_UPDATE:
1340+ return mt7915_tm_txbf_profile_update(phy, val, true);
1341+ case MT76_TM_TXBF_ACT_PHASE_CAL:
1342+ return mt7915_tm_txbf_phase_cal(phy, val);
1343+ case MT76_TM_TXBF_ACT_PROF_UPDATE_ALL:
1344+ return mt7915_tm_txbf_profile_update_all(phy, val);
1345+ case MT76_TM_TXBF_ACT_E2P_UPDATE:
1346+ return mt7915_tm_txbf_e2p_update(phy);
1347+ default:
1348+ break;
1349+ };
1350+
1351+ return 0;
1352+}
1353+
1354 static int
1355 mt7915_tm_set_wmm_qid(struct mt7915_dev *dev, u8 qid, u8 aifs, u8 cw_min,
1356- u16 cw_max, u16 txop)
1357+ u16 cw_max, u16 txop, u8 tx_cmd)
1358 {
1359- struct mt7915_mcu_tx req = { .total = 1 };
1360+ struct mt7915_mcu_tx req = {
1361+ .valid = true,
1362+ .mode = tx_cmd,
1363+ .total = 1,
1364+ };
1365 struct edca *e = &req.edca[0];
1366
1367 e->queue = qid;
1368@@ -261,7 +1000,8 @@ done:
1369
1370 return mt7915_tm_set_wmm_qid(dev,
1371 mt76_connac_lmac_mapping(IEEE80211_AC_BE),
1372- aifsn, cw, cw, 0);
1373+ aifsn, cw, cw, 0,
1374+ mode == MT76_TM_TX_MODE_HE_MU);
1375 }
1376
1377 static int
1378@@ -337,7 +1077,7 @@ mt7915_tm_set_tx_len(struct mt7915_phy *phy, u32 tx_time)
1379 bitrate = cfg80211_calculate_bitrate(&rate);
1380 tx_len = bitrate * tx_time / 10 / 8;
1381
1382- ret = mt76_testmode_alloc_skb(phy->mt76, tx_len);
1383+ ret = mt76_testmode_init_skb(phy->mt76, tx_len, &td->tx_skb, td->addr);
1384 if (ret)
1385 return ret;
1386
1387@@ -455,18 +1195,180 @@ mt7915_tm_init(struct mt7915_phy *phy, bool en)
1388
1389 phy->mt76->test.flag |= MT_TM_FW_RX_COUNT;
1390
1391- if (!en)
1392+ if (!en) {
1393 mt7915_tm_set_tam_arb(phy, en, 0);
1394+
1395+ phy->mt76->test.aid = 0;
1396+ phy->mt76->test.tx_mpdu_len = 0;
1397+ phy->test.bf_en = 0;
1398+ mt7915_tm_set_entry(phy);
1399+ }
1400+}
1401+
1402+static bool
1403+mt7915_tm_check_skb(struct mt7915_phy *phy)
1404+{
1405+ struct mt76_testmode_entry_data *ed;
1406+ struct mt76_wcid *wcid;
1407+
1408+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
1409+ struct ieee80211_tx_info *info;
1410+
1411+ if (!ed->tx_skb)
1412+ return false;
1413+
1414+ info = IEEE80211_SKB_CB(ed->tx_skb);
1415+ info->control.vif = phy->monitor_vif;
1416+ }
1417+
1418+ return true;
1419+}
1420+
1421+static int
1422+mt7915_tm_set_ba(struct mt7915_phy *phy)
1423+{
1424+ struct mt7915_dev *dev = phy->dev;
1425+ struct mt76_testmode_data *td = &phy->mt76->test;
1426+ struct mt76_wcid *wcid;
1427+ struct ieee80211_vif *vif = phy->monitor_vif;
1428+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1429+ struct ieee80211_ampdu_params params = { .buf_size = 256 };
1430+
1431+ list_for_each_entry(wcid, &td->tm_entry_list, list) {
1432+ int tid, ret;
1433+
1434+ params.sta = wcid_to_sta(wcid);
1435+ for (tid = 0; tid < 8; tid++) {
1436+ params.tid = tid;
1437+ ret = mt7915_mcu_add_tx_ba(phy->dev, &params, true);
1438+ if (ret)
1439+ return ret;
1440+ }
1441+ }
1442+
1443+ mt76_wr(dev, MT_AGG_AALCR0(mvif->mt76.band_idx, mvif->mt76.wmm_idx),
1444+ 0x01010101);
1445+
1446+ return 0;
1447+}
1448+
1449+static int
1450+mt7915_tm_set_muru_cfg(struct mt7915_phy *phy, struct mt7915_tm_muru *muru)
1451+{
1452+/* #define MURU_SET_MANUAL_CFG 100 */
1453+ struct mt7915_dev *dev = phy->dev;
1454+ struct {
1455+ __le32 cmd;
1456+ struct mt7915_tm_muru muru;
1457+ } __packed req = {
1458+ .cmd = cpu_to_le32(MURU_SET_MANUAL_CFG),
1459+ };
1460+
1461+ memcpy(&req.muru, muru, sizeof(struct mt7915_tm_muru));
1462+
1463+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
1464+ sizeof(req), false);
1465+}
1466+
1467+static int
1468+mt7915_tm_set_muru_dl(struct mt7915_phy *phy)
1469+{
1470+ struct mt76_testmode_data *td = &phy->mt76->test;
1471+ struct mt76_testmode_entry_data *ed;
1472+ struct mt76_wcid *wcid;
1473+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
1474+ struct ieee80211_vif *vif = phy->monitor_vif;
1475+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1476+ struct mt7915_tm_muru muru = {};
1477+ struct mt7915_tm_muru_comm *comm = &muru.comm;
1478+ struct mt7915_tm_muru_dl *dl = &muru.dl;
1479+ int i;
1480+
1481+ comm->ppdu_format = MURU_PPDU_HE_MU;
1482+ comm->band = mvif->mt76.band_idx;
1483+ comm->wmm_idx = mvif->mt76.wmm_idx;
1484+ comm->spe_idx = phy->test.spe_idx;
1485+
1486+ dl->bw = mt7915_tm_chan_bw(chandef->width);
1487+ dl->gi = td->tx_rate_sgi;;
1488+ dl->ltf = td->tx_ltf;
1489+ dl->tx_mode = MT_PHY_TYPE_HE_MU;
1490+
1491+ for (i = 0; i < sizeof(dl->ru); i++)
1492+ dl->ru[i] = 0x71;
1493+
1494+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
1495+ struct mt7915_tm_muru_dl_usr *dl_usr = &dl->usr[dl->user_num];
1496+
1497+ dl_usr->wlan_idx = cpu_to_le16(wcid->idx);
1498+ dl_usr->ru_alloc_seg = ed->aid < 8 ? 0 : 1;
1499+ dl_usr->ru_idx = ed->ru_idx;
1500+ dl_usr->mcs = ed->tx_rate_idx;
1501+ dl_usr->nss = ed->tx_rate_nss - 1;
1502+ dl_usr->ldpc = ed->tx_rate_ldpc;
1503+ dl->ru[dl->user_num] = ed->ru_alloc;
1504+
1505+ dl->user_num++;
1506+ }
1507+
1508+ muru.cfg_comm = cpu_to_le32(MURU_COMM_SET);
1509+ muru.cfg_dl = cpu_to_le32(MURU_DL_SET);
1510+
1511+ return mt7915_tm_set_muru_cfg(phy, &muru);
1512+}
1513+
1514+static int
1515+mt7915_tm_set_muru_pkt_cnt(struct mt7915_phy *phy, bool enable, u32 tx_count)
1516+{
1517+#define MURU_SET_TX_PKT_CNT 105
1518+#define MURU_SET_TX_EN 106
1519+ struct mt7915_dev *dev = phy->dev;
1520+ struct {
1521+ __le32 cmd;
1522+ u8 band;
1523+ u8 enable;
1524+ u8 _rsv[2];
1525+ __le32 tx_count;
1526+ } __packed req = {
1527+ .band = phy != &dev->phy,
1528+ .enable = enable,
1529+ .tx_count = enable ? cpu_to_le32(tx_count) : 0,
1530+ };
1531+ int ret;
1532+
1533+ req.cmd = enable ? cpu_to_le32(MURU_SET_TX_PKT_CNT) :
1534+ cpu_to_le32(MURU_SET_TX_EN);
1535+
1536+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
1537+ sizeof(req), false);
1538+ if (ret)
1539+ return ret;
1540+
1541+ req.cmd = enable ? cpu_to_le32(MURU_SET_TX_EN) :
1542+ cpu_to_le32(MURU_SET_TX_PKT_CNT);
1543+
1544+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
1545+ sizeof(req), false);
1546 }
1547
1548 static void
1549-mt7915_tm_update_channel(struct mt7915_phy *phy)
1550+mt7915_tm_tx_frames_mu(struct mt7915_phy *phy, bool enable)
1551 {
1552- mutex_unlock(&phy->dev->mt76.mutex);
1553- mt7915_set_channel(phy);
1554- mutex_lock(&phy->dev->mt76.mutex);
1555+ struct mt76_testmode_data *td = &phy->mt76->test;
1556
1557- mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
1558+ if (enable) {
1559+ struct mt7915_dev *dev = phy->dev;
1560+
1561+ mt7915_tm_set_ba(phy);
1562+ mt7915_tm_set_muru_dl(phy);
1563+ mt76_rr(dev, MT_MIB_DR8(phy != &dev->phy));
1564+ } else {
1565+ /* set to zero for counting real tx free num */
1566+ td->tx_done = 0;
1567+ }
1568+
1569+ mt7915_tm_set_muru_pkt_cnt(phy, enable, td->tx_count);
1570+ usleep_range(100000, 200000);
1571 }
1572
1573 static void
1574@@ -475,47 +1377,48 @@ mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
1575 static const u8 spe_idx_map[] = {0, 0, 1, 0, 3, 2, 4, 0,
1576 9, 8, 6, 10, 16, 12, 18, 0};
1577 struct mt76_testmode_data *td = &phy->mt76->test;
1578- struct mt7915_dev *dev = phy->dev;
1579- struct ieee80211_tx_info *info;
1580- u8 duty_cycle = td->tx_duty_cycle;
1581- u32 tx_time = td->tx_time;
1582- u32 ipg = td->tx_ipg;
1583
1584 mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
1585- mt7915_tm_clean_hwq(phy, dev->mt76.global_wcid.idx);
1586+ mt7915_tm_set_trx(phy, TM_MAC_TX, false);
1587
1588 if (en) {
1589- mt7915_tm_update_channel(phy);
1590+ u32 tx_time = td->tx_time, ipg = td->tx_ipg;
1591+ u8 duty_cycle = td->tx_duty_cycle;
1592+
1593+ if (!phy->test.bf_en)
1594+ mt7915_tm_update_channel(phy);
1595
1596 if (td->tx_spe_idx) {
1597 phy->test.spe_idx = td->tx_spe_idx;
1598 } else {
1599 phy->test.spe_idx = spe_idx_map[td->tx_antenna_mask];
1600 }
1601- }
1602
1603- mt7915_tm_set_tam_arb(phy, en,
1604- td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU);
1605+ /* if all three params are set, duty_cycle will be ignored */
1606+ if (duty_cycle && tx_time && !ipg) {
1607+ ipg = tx_time * 100 / duty_cycle - tx_time;
1608+ } else if (duty_cycle && !tx_time && ipg) {
1609+ if (duty_cycle < 100)
1610+ tx_time = duty_cycle * ipg / (100 - duty_cycle);
1611+ }
1612
1613- /* if all three params are set, duty_cycle will be ignored */
1614- if (duty_cycle && tx_time && !ipg) {
1615- ipg = tx_time * 100 / duty_cycle - tx_time;
1616- } else if (duty_cycle && !tx_time && ipg) {
1617- if (duty_cycle < 100)
1618- tx_time = duty_cycle * ipg / (100 - duty_cycle);
1619- }
1620+ mt7915_tm_set_ipg_params(phy, ipg, td->tx_rate_mode);
1621+ mt7915_tm_set_tx_len(phy, tx_time);
1622
1623- mt7915_tm_set_ipg_params(phy, ipg, td->tx_rate_mode);
1624- mt7915_tm_set_tx_len(phy, tx_time);
1625+ if (ipg)
1626+ td->tx_queued_limit = MT76_TM_TIMEOUT * 1000000 / ipg / 2;
1627
1628- if (ipg)
1629- td->tx_queued_limit = MT76_TM_TIMEOUT * 1000000 / ipg / 2;
1630+ if (!mt7915_tm_check_skb(phy))
1631+ return;
1632+ } else {
1633+ mt7915_tm_clean_hwq(phy);
1634+ }
1635
1636- if (!en || !td->tx_skb)
1637- return;
1638+ mt7915_tm_set_tam_arb(phy, en,
1639+ td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU);
1640
1641- info = IEEE80211_SKB_CB(td->tx_skb);
1642- info->control.vif = phy->monitor_vif;
1643+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
1644+ mt7915_tm_tx_frames_mu(phy, en);
1645
1646 mt7915_tm_set_trx(phy, TM_MAC_TX, en);
1647 }
1648@@ -544,10 +1447,6 @@ mt7915_tm_get_rx_stats(struct mt7915_phy *phy, bool clear)
1649 return ret;
1650
1651 rs_band = (struct mt7915_tm_rx_stat_band *)skb->data;
1652- /* pr_info("mdrdy_cnt = %d\n", le32_to_cpu(rs_band->mdrdy_cnt)); */
1653- /* pr_info("fcs_err = %d\n", le16_to_cpu(rs_band->fcs_err)); */
1654- /* pr_info("len_mismatch = %d\n", le16_to_cpu(rs_band->len_mismatch)); */
1655- /* pr_info("fcs_ok = %d\n", le16_to_cpu(rs_band->fcs_succ)); */
1656
1657 if (!clear) {
1658 enum mt76_rxq_id q = req.band ? MT_RXQ_EXT : MT_RXQ_MAIN;
1659@@ -562,13 +1461,61 @@ mt7915_tm_get_rx_stats(struct mt7915_phy *phy, bool clear)
1660 return 0;
1661 }
1662
1663+static int
1664+mt7915_tm_set_rx_user_idx(struct mt7915_phy *phy, u8 aid)
1665+{
1666+ struct mt7915_dev *dev = phy->dev;
1667+ struct mt76_wcid *wcid = NULL;
1668+ struct mt76_testmode_entry_data *ed;
1669+ struct {
1670+ u8 band;
1671+ u8 _rsv;
1672+ __le16 wlan_idx;
1673+ } __packed req = {
1674+ .band = phy->band_idx,
1675+ };
1676+
1677+ mt76_tm_for_each_entry(phy->mt76, wcid, ed)
1678+ if (ed->aid == aid)
1679+ break;
1680+
1681+ if (!wcid)
1682+ return -EINVAL;
1683+
1684+ req.wlan_idx = cpu_to_le16(wcid->idx);
1685+
1686+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_STAT_USER_CTRL),
1687+ &req, sizeof(req), false);
1688+}
1689+
1690+static int
1691+mt7915_tm_set_muru_aid(struct mt7915_phy *phy, u16 aid)
1692+{
1693+ struct mt7915_dev *dev = phy->dev;
1694+ struct mt7915_tm_cmd req = {
1695+ .testmode_en = 1,
1696+ .param_idx = MCU_ATE_SET_MU_RX_AID,
1697+ .param.rx_aid.band = cpu_to_le32(phy->band_idx),
1698+ .param.rx_aid.aid = cpu_to_le16(aid),
1699+ };
1700+
1701+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
1702+ sizeof(req), false);
1703+}
1704+
1705 static void
1706 mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
1707 {
1708+ struct mt76_testmode_data *td = &phy->mt76->test;
1709+
1710+ mt7915_tm_set_trx(phy, TM_MAC_TX, false);
1711 mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
1712
1713 if (en) {
1714- mt7915_tm_update_channel(phy);
1715+ if (!phy->test.bf_en)
1716+ mt7915_tm_update_channel(phy);
1717+ if (td->aid)
1718+ mt7915_tm_set_rx_user_idx(phy, td->aid);
1719
1720 /* read-clear */
1721 mt7915_tm_get_rx_stats(phy, true);
1722@@ -576,9 +1523,12 @@ mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
1723 /* clear fw count */
1724 mt7915_tm_set_phy_count(phy, 0);
1725 mt7915_tm_set_phy_count(phy, 1);
1726-
1727- mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
1728 }
1729+
1730+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
1731+ mt7915_tm_set_muru_aid(phy, en ? td->aid : 0xf800);
1732+
1733+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
1734 }
1735
1736 static int
1737@@ -615,35 +1565,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
1738 tx_cont->center_ch = freq1;
1739 tx_cont->tx_ant = td->tx_antenna_mask;
1740 tx_cont->band = phy != &dev->phy;
1741-
1742- switch (chandef->width) {
1743- case NL80211_CHAN_WIDTH_40:
1744- tx_cont->bw = CMD_CBW_40MHZ;
1745- break;
1746- case NL80211_CHAN_WIDTH_80:
1747- tx_cont->bw = CMD_CBW_80MHZ;
1748- break;
1749- case NL80211_CHAN_WIDTH_80P80:
1750- tx_cont->bw = CMD_CBW_8080MHZ;
1751- break;
1752- case NL80211_CHAN_WIDTH_160:
1753- tx_cont->bw = CMD_CBW_160MHZ;
1754- break;
1755- case NL80211_CHAN_WIDTH_5:
1756- tx_cont->bw = CMD_CBW_5MHZ;
1757- break;
1758- case NL80211_CHAN_WIDTH_10:
1759- tx_cont->bw = CMD_CBW_10MHZ;
1760- break;
1761- case NL80211_CHAN_WIDTH_20:
1762- tx_cont->bw = CMD_CBW_20MHZ;
1763- break;
1764- case NL80211_CHAN_WIDTH_20_NOHT:
1765- tx_cont->bw = CMD_CBW_20MHZ;
1766- break;
1767- default:
1768- return -EINVAL;
1769- }
1770+ tx_cont->bw = mt7915_tm_chan_bw(chandef->width);
1771
1772 if (!en) {
1773 req.op.rf.param.func_data = cpu_to_le32(phy != &dev->phy);
1774@@ -727,6 +1649,12 @@ mt7915_tm_update_params(struct mt7915_phy *phy, u32 changed)
1775 mt7915_tm_set_freq_offset(phy, en, en ? td->freq_offset : 0);
1776 if (changed & BIT(TM_CHANGED_TXPOWER))
1777 mt7915_tm_set_tx_power(phy);
1778+ if (changed & BIT(TM_CHANGED_AID))
1779+ mt7915_tm_set_entry(phy);
1780+ if (changed & BIT(TM_CHANGED_CFG))
1781+ mt7915_tm_set_cfg(phy);
1782+ if (changed & BIT(TM_CHANGED_TXBF_ACT))
1783+ mt7915_tm_set_txbf(phy);
1784 }
1785
1786 static int
1787@@ -800,6 +1728,7 @@ static int
1788 mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
1789 {
1790 struct mt7915_phy *phy = mphy->priv;
1791+ struct mt7915_dev *dev = phy->dev;
1792 void *rx, *rssi;
1793 int i;
1794
1795@@ -845,11 +1774,68 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
1796
1797 nla_nest_end(msg, rx);
1798
1799+ if (mphy->test.tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
1800+ mphy->test.tx_done += mt76_rr(dev, MT_MIB_DR8(phy != &dev->phy));
1801+
1802 return mt7915_tm_get_rx_stats(phy, false);
1803 }
1804
1805+static int
1806+mt7915_tm_write_back_to_efuse(struct mt7915_dev *dev)
1807+{
1808+ struct mt7915_mcu_eeprom_info req = {};
1809+ u8 *eeprom = dev->mt76.eeprom.data;
1810+ int i, ret = -EINVAL;
1811+
1812+ /* prevent from damaging chip id in efuse */
1813+ if (mt76_chip(&dev->mt76) != get_unaligned_le16(eeprom))
1814+ goto out;
1815+
1816+ for (i = 0; i < mt7915_eeprom_size(dev); i += MT76_TM_EEPROM_BLOCK_SIZE) {
1817+ req.addr = cpu_to_le32(i);
1818+ memcpy(&req.data, eeprom + i, MT76_TM_EEPROM_BLOCK_SIZE);
1819+
1820+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_ACCESS),
1821+ &req, sizeof(req), true);
1822+ if (ret)
1823+ return ret;
1824+ }
1825+
1826+out:
1827+ return ret;
1828+}
1829+
1830+static int
1831+mt7915_tm_set_eeprom(struct mt76_phy *mphy, u32 offset, u8 *val, u8 action)
1832+{
1833+ struct mt7915_phy *phy = mphy->priv;
1834+ struct mt7915_dev *dev = phy->dev;
1835+ u8 *eeprom = dev->mt76.eeprom.data;
1836+ int ret = 0;
1837+
1838+ if (offset >= mt7915_eeprom_size(dev))
1839+ return -EINVAL;
1840+
1841+ switch (action) {
1842+ case MT76_TM_EEPROM_ACTION_UPDATE_DATA:
1843+ memcpy(eeprom + offset, val, MT76_TM_EEPROM_BLOCK_SIZE);
1844+ break;
1845+ case MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE:
1846+ ret = mt7915_mcu_set_eeprom(dev, true);
1847+ break;
1848+ case MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE:
1849+ ret = mt7915_tm_write_back_to_efuse(dev);
1850+ break;
1851+ default:
1852+ break;
1853+ }
1854+
1855+ return ret;
1856+}
1857+
1858 const struct mt76_testmode_ops mt7915_testmode_ops = {
1859 .set_state = mt7915_tm_set_state,
1860 .set_params = mt7915_tm_set_params,
1861 .dump_stats = mt7915_tm_dump_stats,
1862+ .set_eeprom = mt7915_tm_set_eeprom,
1863 };
1864diff --git a/mt7915/testmode.h b/mt7915/testmode.h
1865index a1c54c89..01b08e9e 100644
1866--- a/mt7915/testmode.h
1867+++ b/mt7915/testmode.h
1868@@ -4,6 +4,8 @@
1869 #ifndef __MT7915_TESTMODE_H
1870 #define __MT7915_TESTMODE_H
1871
1872+#include "mcu.h"
1873+
1874 struct mt7915_tm_trx {
1875 u8 type;
1876 u8 enable;
1877@@ -39,6 +41,11 @@ struct mt7915_tm_cfg {
1878 u8 _rsv[2];
1879 };
1880
1881+struct mt7915_tm_mu_rx_aid {
1882+ __le32 band;
1883+ __le16 aid;
1884+};
1885+
1886 struct mt7915_tm_cmd {
1887 u8 testmode_en;
1888 u8 param_idx;
1889@@ -50,6 +57,7 @@ struct mt7915_tm_cmd {
1890 struct mt7915_tm_slot_time slot;
1891 struct mt7915_tm_clean_txq clean;
1892 struct mt7915_tm_cfg cfg;
1893+ struct mt7915_tm_mu_rx_aid rx_aid;
1894 u8 test[72];
1895 } param;
1896 } __packed;
1897@@ -109,6 +117,16 @@ enum {
1898 TAM_ARB_OP_MODE_FORCE_SU = 5,
1899 };
1900
1901+enum {
1902+ TM_CBW_20MHZ,
1903+ TM_CBW_40MHZ,
1904+ TM_CBW_80MHZ,
1905+ TM_CBW_10MHZ,
1906+ TM_CBW_5MHZ,
1907+ TM_CBW_160MHZ,
1908+ TM_CBW_8080MHZ,
1909+};
1910+
1911 struct mt7915_tm_rx_stat_band {
1912 u8 category;
1913
1914@@ -130,4 +148,264 @@ struct mt7915_tm_rx_stat_band {
1915 __le16 mdrdy_cnt_ofdm;
1916 };
1917
1918+struct mt7915_tm_muru_comm {
1919+ u8 ppdu_format;
1920+ u8 sch_type;
1921+ u8 band;
1922+ u8 wmm_idx;
1923+ u8 spe_idx;
1924+ u8 proc_type;
1925+};
1926+
1927+struct mt7915_tm_muru_dl_usr {
1928+ __le16 wlan_idx;
1929+ u8 ru_alloc_seg;
1930+ u8 ru_idx;
1931+ u8 ldpc;
1932+ u8 nss;
1933+ u8 mcs;
1934+ u8 mu_group_idx;
1935+ u8 vht_groud_id;
1936+ u8 vht_up;
1937+ u8 he_start_stream;
1938+ u8 he_mu_spatial;
1939+ u8 ack_policy;
1940+ __le16 tx_power_alpha;
1941+};
1942+
1943+struct mt7915_tm_muru_dl {
1944+ u8 user_num;
1945+ u8 tx_mode;
1946+ u8 bw;
1947+ u8 gi;
1948+ u8 ltf;
1949+ /* sigB */
1950+ u8 mcs;
1951+ u8 dcm;
1952+ u8 cmprs;
1953+
1954+ u8 tx_power;
1955+ u8 ru[8];
1956+ u8 c26[2];
1957+ u8 ack_policy;
1958+
1959+ struct mt7915_tm_muru_dl_usr usr[16];
1960+};
1961+
1962+struct mt7915_tm_muru_ul_usr {
1963+ __le16 wlan_idx;
1964+ u8 ru_alloc;
1965+ u8 ru_idx;
1966+ u8 ldpc;
1967+ u8 nss;
1968+ u8 mcs;
1969+ u8 target_rssi;
1970+ __le32 trig_pkt_size;
1971+};
1972+
1973+struct mt7915_tm_muru_ul {
1974+ u8 user_num;
1975+
1976+ /* UL TX */
1977+ u8 trig_type;
1978+ __le16 trig_cnt;
1979+ __le16 trig_intv;
1980+ u8 bw;
1981+ u8 gi_ltf;
1982+ __le16 ul_len;
1983+ u8 pad;
1984+ u8 trig_ta[ETH_ALEN];
1985+ u8 ru[8];
1986+ u8 c26[2];
1987+
1988+ struct mt7915_tm_muru_ul_usr usr[16];
1989+ /* HE TB RX Debug */
1990+ __le32 rx_hetb_nonsf_en_bitmap;
1991+ __le32 rx_hetb_cfg[2];
1992+
1993+ /* DL TX */
1994+ u8 ba_type;
1995+};
1996+
1997+struct mt7915_tm_muru {
1998+ __le32 cfg_comm;
1999+ __le32 cfg_dl;
2000+ __le32 cfg_ul;
2001+
2002+ struct mt7915_tm_muru_comm comm;
2003+ struct mt7915_tm_muru_dl dl;
2004+ struct mt7915_tm_muru_ul ul;
2005+};
2006+
2007+#define MURU_PPDU_HE_MU BIT(3)
2008+
2009+/* Common Config */
2010+/* #define MURU_COMM_PPDU_FMT BIT(0) */
2011+/* #define MURU_COMM_SCH_TYPE BIT(1) */
2012+/* #define MURU_COMM_BAND BIT(2) */
2013+/* #define MURU_COMM_WMM BIT(3) */
2014+/* #define MURU_COMM_SPE_IDX BIT(4) */
2015+/* #define MURU_COMM_PROC_TYPE BIT(5) */
2016+/* #define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_BAND | \ */
2017+/* MURU_COMM_WMM | MURU_COMM_SPE_IDX) */
2018+/* DL Config */
2019+#define MURU_DL_BW BIT(0)
2020+#define MURU_DL_GI BIT(1)
2021+#define MURU_DL_TX_MODE BIT(2)
2022+#define MURU_DL_TONE_PLAN BIT(3)
2023+#define MURU_DL_USER_CNT BIT(4)
2024+#define MURU_DL_LTF BIT(5)
2025+#define MURU_DL_SIGB_MCS BIT(6)
2026+#define MURU_DL_SIGB_DCM BIT(7)
2027+#define MURU_DL_SIGB_CMPRS BIT(8)
2028+#define MURU_DL_ACK_POLICY BIT(9)
2029+#define MURU_DL_TXPOWER BIT(10)
2030+/* DL Per User Config */
2031+#define MURU_DL_USER_WLAN_ID BIT(16)
2032+#define MURU_DL_USER_COD BIT(17)
2033+#define MURU_DL_USER_MCS BIT(18)
2034+#define MURU_DL_USER_NSS BIT(19)
2035+#define MURU_DL_USER_RU_ALLOC BIT(20)
2036+#define MURU_DL_USER_MUMIMO_GRP BIT(21)
2037+#define MURU_DL_USER_MUMIMO_VHT BIT(22)
2038+#define MURU_DL_USER_ACK_POLICY BIT(23)
2039+#define MURU_DL_USER_MUMIMO_HE BIT(24)
2040+#define MURU_DL_USER_PWR_ALPHA BIT(25)
2041+#define MURU_DL_SET (GENMASK(7, 0) | GENMASK(20, 16) | BIT(25))
2042+
2043+#define MAX_PHASE_GROUP_NUM 9
2044+
2045+struct mt7915_tm_txbf_phase {
2046+ u8 status;
2047+ struct {
2048+ u8 r0_uh;
2049+ u8 r0_h;
2050+ u8 r0_m;
2051+ u8 r0_l;
2052+ u8 r0_ul;
2053+ u8 r1_uh;
2054+ u8 r1_h;
2055+ u8 r1_m;
2056+ u8 r1_l;
2057+ u8 r1_ul;
2058+ u8 r2_uh;
2059+ u8 r2_h;
2060+ u8 r2_m;
2061+ u8 r2_l;
2062+ u8 r2_ul;
2063+ u8 r3_uh;
2064+ u8 r3_h;
2065+ u8 r3_m;
2066+ u8 r3_l;
2067+ u8 r3_ul;
2068+ u8 r2_uh_sx2;
2069+ u8 r2_h_sx2;
2070+ u8 r2_m_sx2;
2071+ u8 r2_l_sx2;
2072+ u8 r2_ul_sx2;
2073+ u8 r3_uh_sx2;
2074+ u8 r3_h_sx2;
2075+ u8 r3_m_sx2;
2076+ u8 r3_l_sx2;
2077+ u8 r3_ul_sx2;
2078+ u8 m_t0_h;
2079+ u8 m_t1_h;
2080+ u8 m_t2_h;
2081+ u8 m_t2_h_sx2;
2082+ u8 r0_reserved;
2083+ u8 r1_reserved;
2084+ u8 r2_reserved;
2085+ u8 r3_reserved;
2086+ u8 r2_sx2_reserved;
2087+ u8 r3_sx2_reserved;
2088+ } phase;
2089+};
2090+
2091+struct mt7915_tm_pfmu_tag1 {
2092+ __le32 pfmu_idx:10;
2093+ __le32 ebf:1;
2094+ __le32 data_bw:2;
2095+ __le32 lm:2;
2096+ __le32 is_mu:1;
2097+ __le32 nr:3, nc:3;
2098+ __le32 codebook:2;
2099+ __le32 ngroup:2;
2100+ __le32 _rsv:2;
2101+ __le32 invalid_prof:1;
2102+ __le32 rmsd:3;
2103+
2104+ __le32 col_id1:6, row_id1:10;
2105+ __le32 col_id2:6, row_id2:10;
2106+ __le32 col_id3:6, row_id3:10;
2107+ __le32 col_id4:6, row_id4:10;
2108+
2109+ __le32 ru_start_id:7;
2110+ __le32 _rsv1:1;
2111+ __le32 ru_end_id:7;
2112+ __le32 _rsv2:1;
2113+ __le32 mob_cal_en:1;
2114+ __le32 _rsv3:15;
2115+
2116+ __le32 snr_sts0:8, snr_sts1:8, snr_sts2:8, snr_sts3:8;
2117+ __le32 snr_sts4:8, snr_sts5:8, snr_sts6:8, snr_sts7:8;
2118+
2119+ __le32 _rsv4;
2120+} __packed;
2121+
2122+struct mt7915_tm_pfmu_tag2 {
2123+ __le32 smart_ant:24;
2124+ __le32 se_idx:5;
2125+ __le32 _rsv:3;
2126+
2127+ __le32 _rsv1:8;
2128+ __le32 rmsd_thres:3;
2129+ __le32 _rsv2:5;
2130+ __le32 ibf_timeout:8;
2131+ __le32 _rsv3:8;
2132+
2133+ __le32 _rsv4:16;
2134+ __le32 ibf_data_bw:2;
2135+ __le32 ibf_nc:3;
2136+ __le32 ibf_nr:3;
2137+ __le32 ibf_ru:8;
2138+
2139+ __le32 mob_delta_t:8;
2140+ __le32 mob_lq_result:7;
2141+ __le32 _rsv5:1;
2142+ __le32 _rsv6:16;
2143+
2144+ __le32 _rsv7;
2145+} __packed;
2146+
2147+struct mt7915_tm_pfmu_tag {
2148+ struct mt7915_tm_pfmu_tag1 t1;
2149+ struct mt7915_tm_pfmu_tag2 t2;
2150+};
2151+
2152+struct mt7915_tm_pfmu_data {
2153+ __le16 subc_idx;
2154+ __le16 phi11;
2155+ __le16 phi21;
2156+ __le16 phi31;
2157+};
2158+
2159+struct mt7915_tm_ibf_cal_info {
2160+ u8 format_id;
2161+ u8 group_l_m_n;
2162+ u8 group;
2163+ bool sx2;
2164+ u8 status;
2165+ u8 cal_type;
2166+ u8 _rsv[2];
2167+ u8 buf[1000];
2168+} __packed;
2169+
2170+enum {
2171+ IBF_PHASE_CAL_UNSPEC,
2172+ IBF_PHASE_CAL_NORMAL,
2173+ IBF_PHASE_CAL_VERIFY,
2174+ IBF_PHASE_CAL_NORMAL_INSTRUMENT,
2175+ IBF_PHASE_CAL_VERIFY_INSTRUMENT,
2176+};
2177+
2178 #endif
2179diff --git a/testmode.c b/testmode.c
2180index e6d1f702..2c699ac8 100644
2181--- a/testmode.c
2182+++ b/testmode.c
2183@@ -25,28 +25,15 @@ const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
2184 };
2185 EXPORT_SYMBOL_GPL(mt76_tm_policy);
2186
2187-void mt76_testmode_tx_pending(struct mt76_phy *phy)
2188+static void
2189+mt76_testmode_queue_tx(struct mt76_phy *phy, struct mt76_wcid *wcid,
2190+ struct sk_buff *skb, struct mt76_queue *q, u16 limit)
2191 {
2192 struct mt76_testmode_data *td = &phy->test;
2193 struct mt76_dev *dev = phy->dev;
2194- struct mt76_wcid *wcid = &dev->global_wcid;
2195- struct sk_buff *skb = td->tx_skb;
2196- struct mt76_queue *q;
2197- u16 tx_queued_limit;
2198- int qid;
2199-
2200- if (!skb || !td->tx_pending)
2201- return;
2202+ u16 count = limit;
2203
2204- qid = skb_get_queue_mapping(skb);
2205- q = phy->q_tx[qid];
2206-
2207- tx_queued_limit = td->tx_queued_limit ? td->tx_queued_limit : 1000;
2208-
2209- spin_lock_bh(&q->lock);
2210-
2211- while (td->tx_pending > 0 &&
2212- td->tx_queued - td->tx_done < tx_queued_limit &&
2213+ while (td->tx_pending > 0 && count &&
2214 q->queued < q->ndesc / 2) {
2215 int ret;
2216
2217@@ -55,13 +42,65 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy)
2218 if (ret < 0)
2219 break;
2220
2221+ count--;
2222 td->tx_pending--;
2223 td->tx_queued++;
2224+
2225+ if (td->tx_rate_mode != MT76_TM_TX_MODE_HE_MU)
2226+ if (td->tx_queued - td->tx_done >= limit)
2227+ break;
2228 }
2229
2230 dev->queue_ops->kick(dev, q);
2231+}
2232+
2233+void mt76_testmode_tx_pending(struct mt76_phy *phy)
2234+{
2235+ struct mt76_testmode_data *td = &phy->test;
2236+ struct mt76_testmode_entry_data *ed;
2237+ struct mt76_queue *q;
2238+ int qid;
2239+ u16 tx_queued_limit;
2240+ u32 remain;
2241+ bool is_mu;
2242+
2243+ if (!td->tx_pending)
2244+ return;
2245+
2246+ /* tx_queued_limit = td->tx_queued_limit ?: 100; */
2247+ tx_queued_limit = 100;
2248+
2249+ if (!td->aid) {
2250+ qid = skb_get_queue_mapping(td->tx_skb);
2251+ q = phy->q_tx[qid];
2252+ spin_lock_bh(&q->lock);
2253+ mt76_testmode_queue_tx(phy, &phy->dev->global_wcid,
2254+ td->tx_skb, q, tx_queued_limit);
2255+ spin_unlock_bh(&q->lock);
2256+
2257+ return;
2258+ }
2259+
2260+ is_mu = td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU;
2261+ ed = mt76_testmode_entry_data(phy, td->cur_entry);
2262+ qid = skb_get_queue_mapping(ed->tx_skb);
2263+ q = phy->q_tx[qid];
2264+
2265+ spin_lock_bh(&q->lock);
2266+
2267+ remain = is_mu ? 1 : (td->tx_pending % td->tx_count) ?: td->tx_count;
2268+ if (remain < tx_queued_limit)
2269+ tx_queued_limit = remain;
2270+
2271+ mt76_testmode_queue_tx(phy, td->cur_entry, ed->tx_skb, q, tx_queued_limit);
2272+
2273+ if (td->tx_pending % td->tx_count == 0 || is_mu)
2274+ td->cur_entry = list_next_entry(td->cur_entry, list);
2275
2276 spin_unlock_bh(&q->lock);
2277+
2278+ if (is_mu && td->tx_pending)
2279+ mt76_worker_schedule(&phy->dev->tx_worker);
2280 }
2281
2282 static u32
2283@@ -87,15 +126,31 @@ mt76_testmode_max_mpdu_len(struct mt76_phy *phy, u8 tx_rate_mode)
2284 }
2285
2286 static void
2287-mt76_testmode_free_skb(struct mt76_phy *phy)
2288+mt76_testmode_free_skb(struct sk_buff **tx_skb)
2289+{
2290+ if (!(*tx_skb))
2291+ return;
2292+
2293+ dev_kfree_skb(*tx_skb);
2294+ *tx_skb = NULL;
2295+}
2296+
2297+static void
2298+mt76_testmode_free_skb_all(struct mt76_phy *phy)
2299 {
2300 struct mt76_testmode_data *td = &phy->test;
2301+ struct mt76_testmode_entry_data *ed = &td->ed;
2302+ struct mt76_wcid *wcid;
2303+
2304+ mt76_testmode_free_skb(&ed->tx_skb);
2305
2306- dev_kfree_skb(td->tx_skb);
2307- td->tx_skb = NULL;
2308+ mt76_tm_for_each_entry(phy, wcid, ed)
2309+ mt76_testmode_free_skb(&ed->tx_skb);
2310 }
2311
2312-int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
2313+static int
2314+mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len,
2315+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN])
2316 {
2317 #define MT_TXP_MAX_LEN 4095
2318 u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
2319@@ -117,7 +172,8 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
2320 nfrags = len / MT_TXP_MAX_LEN;
2321 head_len = nfrags ? MT_TXP_MAX_LEN : len;
2322
2323- if (len > IEEE80211_MAX_FRAME_LEN)
2324+ if (len > IEEE80211_MAX_FRAME_LEN ||
2325+ td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
2326 fc |= IEEE80211_STYPE_QOS_DATA;
2327
2328 head = alloc_skb(head_len, GFP_KERNEL);
2329@@ -126,9 +182,9 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
2330
2331 hdr = __skb_put_zero(head, head_len);
2332 hdr->frame_control = cpu_to_le16(fc);
2333- memcpy(hdr->addr1, td->addr[0], ETH_ALEN);
2334- memcpy(hdr->addr2, td->addr[1], ETH_ALEN);
2335- memcpy(hdr->addr3, td->addr[2], ETH_ALEN);
2336+ memcpy(hdr->addr1, addr[0], ETH_ALEN);
2337+ memcpy(hdr->addr2, addr[1], ETH_ALEN);
2338+ memcpy(hdr->addr3, addr[2], ETH_ALEN);
2339 skb_set_queue_mapping(head, IEEE80211_AC_BE);
2340
2341 info = IEEE80211_SKB_CB(head);
2342@@ -152,7 +208,7 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
2343
2344 frag = alloc_skb(frag_len, GFP_KERNEL);
2345 if (!frag) {
2346- mt76_testmode_free_skb(phy);
2347+ mt76_testmode_free_skb(tx_skb);
2348 dev_kfree_skb(head);
2349 return -ENOMEM;
2350 }
2351@@ -165,15 +221,14 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
2352 frag_tail = &(*frag_tail)->next;
2353 }
2354
2355- mt76_testmode_free_skb(phy);
2356- td->tx_skb = head;
2357+ mt76_testmode_free_skb(tx_skb);
2358+ *tx_skb = head;
2359
2360 return 0;
2361 }
2362-EXPORT_SYMBOL(mt76_testmode_alloc_skb);
2363
2364-static int
2365-mt76_testmode_tx_init(struct mt76_phy *phy)
2366+int mt76_testmode_init_skb(struct mt76_phy *phy, u32 len,
2367+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN])
2368 {
2369 struct mt76_testmode_data *td = &phy->test;
2370 struct ieee80211_tx_info *info;
2371@@ -181,7 +236,7 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
2372 u8 max_nss = hweight8(phy->antenna_mask);
2373 int ret;
2374
2375- ret = mt76_testmode_alloc_skb(phy, td->tx_mpdu_len);
2376+ ret = mt76_testmode_alloc_skb(phy, len, tx_skb, addr);
2377 if (ret)
2378 return ret;
2379
2380@@ -191,7 +246,7 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
2381 if (td->tx_antenna_mask)
2382 max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask));
2383
2384- info = IEEE80211_SKB_CB(td->tx_skb);
2385+ info = IEEE80211_SKB_CB(*tx_skb);
2386 rate = &info->control.rates[0];
2387 rate->count = 1;
2388 rate->idx = td->tx_rate_idx;
2389@@ -263,6 +318,25 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
2390 out:
2391 return 0;
2392 }
2393+EXPORT_SYMBOL(mt76_testmode_init_skb);
2394+
2395+static int
2396+mt76_testmode_tx_init(struct mt76_phy *phy)
2397+{
2398+ struct mt76_testmode_entry_data *ed;
2399+ struct mt76_wcid *wcid;
2400+
2401+ mt76_tm_for_each_entry(phy, wcid, ed) {
2402+ int ret;
2403+
2404+ ret = mt76_testmode_init_skb(phy, ed->tx_mpdu_len,
2405+ &ed->tx_skb, ed->addr);
2406+ if (ret)
2407+ return ret;
2408+ }
2409+
2410+ return 0;
2411+}
2412
2413 static void
2414 mt76_testmode_tx_start(struct mt76_phy *phy)
2415@@ -273,6 +347,14 @@ mt76_testmode_tx_start(struct mt76_phy *phy)
2416 td->tx_queued = 0;
2417 td->tx_done = 0;
2418 td->tx_pending = td->tx_count;
2419+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
2420+ td->tx_pending = 1;
2421+ if (td->entry_num) {
2422+ td->tx_pending *= td->entry_num;
2423+ td->cur_entry = list_first_entry(&td->tm_entry_list,
2424+ struct mt76_wcid, list);
2425+ }
2426+
2427 mt76_worker_schedule(&dev->tx_worker);
2428 }
2429
2430@@ -291,7 +373,7 @@ mt76_testmode_tx_stop(struct mt76_phy *phy)
2431 wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued,
2432 MT76_TM_TIMEOUT * HZ);
2433
2434- mt76_testmode_free_skb(phy);
2435+ mt76_testmode_free_skb_all(phy);
2436 }
2437
2438 static inline void
2439@@ -322,6 +404,8 @@ mt76_testmode_init_defaults(struct mt76_phy *phy)
2440 memcpy(td->addr[0], phy->macaddr, ETH_ALEN);
2441 memcpy(td->addr[1], phy->macaddr, ETH_ALEN);
2442 memcpy(td->addr[2], phy->macaddr, ETH_ALEN);
2443+
2444+ INIT_LIST_HEAD(&phy->test.tm_entry_list);
2445 }
2446
2447 static int
2448@@ -331,8 +415,12 @@ __mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state)
2449 struct mt76_dev *dev = phy->dev;
2450 int err;
2451
2452- if (prev_state == MT76_TM_STATE_TX_FRAMES)
2453+ if (prev_state == MT76_TM_STATE_TX_FRAMES) {
2454+ /* MU needs to clean hwq for free done event */
2455+ if (phy->test.tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
2456+ dev->test_ops->set_state(phy, MT76_TM_STATE_IDLE);
2457 mt76_testmode_tx_stop(phy);
2458+ }
2459
2460 if (state == MT76_TM_STATE_TX_FRAMES) {
2461 err = mt76_testmode_tx_init(phy);
2462@@ -402,6 +490,44 @@ mt76_tm_get_u8(struct nlattr *attr, u8 *dest, u8 min, u8 max)
2463 return 0;
2464 }
2465
2466+static int
2467+mt76_testmode_set_eeprom(struct mt76_phy *phy, struct nlattr **tb)
2468+{
2469+ struct mt76_dev *dev = phy->dev;
2470+ u8 action, val[MT76_TM_EEPROM_BLOCK_SIZE];
2471+ u32 offset = 0;
2472+ int err = -EINVAL;
2473+
2474+ if (!dev->test_ops->set_eeprom)
2475+ return -EOPNOTSUPP;
2476+
2477+ if (mt76_tm_get_u8(tb[MT76_TM_ATTR_EEPROM_ACTION], &action,
2478+ 0, MT76_TM_EEPROM_ACTION_MAX))
2479+ goto out;
2480+
2481+ if (tb[MT76_TM_ATTR_EEPROM_OFFSET]) {
2482+ struct nlattr *cur;
2483+ int rem, idx = 0;
2484+
2485+ offset = nla_get_u32(tb[MT76_TM_ATTR_EEPROM_OFFSET]);
2486+ if (!!(offset % MT76_TM_EEPROM_BLOCK_SIZE) ||
2487+ !tb[MT76_TM_ATTR_EEPROM_VAL])
2488+ goto out;
2489+
2490+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_EEPROM_VAL], rem) {
2491+ if (nla_len(cur) != 1 || idx >= ARRAY_SIZE(val))
2492+ goto out;
2493+
2494+ val[idx++] = nla_get_u8(cur);
2495+ }
2496+ }
2497+
2498+ err = dev->test_ops->set_eeprom(phy, offset, val, action);
2499+
2500+out:
2501+ return err;
2502+}
2503+
2504 int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2505 void *data, int len)
2506 {
2507@@ -425,6 +551,11 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2508
2509 mutex_lock(&dev->mutex);
2510
2511+ if (tb[MT76_TM_ATTR_EEPROM_ACTION]) {
2512+ err = mt76_testmode_set_eeprom(phy, tb);
2513+ goto out;
2514+ }
2515+
2516 if (tb[MT76_TM_ATTR_RESET]) {
2517 mt76_testmode_set_state(phy, MT76_TM_STATE_OFF);
2518 memset(td, 0, sizeof(*td));
2519@@ -452,7 +583,10 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2520 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_DUTY_CYCLE],
2521 &td->tx_duty_cycle, 0, 99) ||
2522 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_POWER_CONTROL],
2523- &td->tx_power_control, 0, 1))
2524+ &td->tx_power_control, 0, 1) ||
2525+ mt76_tm_get_u8(tb[MT76_TM_ATTR_AID], &td->aid, 0, 16) ||
2526+ mt76_tm_get_u8(tb[MT76_TM_ATTR_RU_ALLOC], &td->ru_alloc, 0, 0xff) ||
2527+ mt76_tm_get_u8(tb[MT76_TM_ATTR_RU_IDX], &td->ru_idx, 0, 68))
2528 goto out;
2529
2530 if (tb[MT76_TM_ATTR_TX_LENGTH]) {
2531@@ -484,8 +618,7 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2532
2533 if (tb[MT76_TM_ATTR_TX_POWER]) {
2534 struct nlattr *cur;
2535- int idx = 0;
2536- int rem;
2537+ int rem, idx = 0;
2538
2539 nla_for_each_nested(cur, tb[MT76_TM_ATTR_TX_POWER], rem) {
2540 if (nla_len(cur) != 1 ||
2541@@ -505,11 +638,45 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2542 if (nla_len(cur) != ETH_ALEN || idx >= 3)
2543 goto out;
2544
2545- memcpy(td->addr[idx], nla_data(cur), ETH_ALEN);
2546+ memcpy(td->addr[idx++], nla_data(cur), ETH_ALEN);
2547+ }
2548+ }
2549+
2550+ if (tb[MT76_TM_ATTR_CFG]) {
2551+ struct nlattr *cur;
2552+ int rem, idx = 0;
2553+
2554+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_CFG], rem) {
2555+ if (nla_len(cur) != 1 || idx >= 2)
2556+ goto out;
2557+
2558+ if (idx == 0)
2559+ td->cfg.type = nla_get_u8(cur);
2560+ else
2561+ td->cfg.enable = nla_get_u8(cur);
2562 idx++;
2563 }
2564 }
2565
2566+ if (tb[MT76_TM_ATTR_TXBF_ACT]) {
2567+ struct nlattr *cur;
2568+ int rem, idx = 0;
2569+
2570+ if (!tb[MT76_TM_ATTR_TXBF_PARAM] ||
2571+ mt76_tm_get_u8(tb[MT76_TM_ATTR_TXBF_ACT], &td->txbf_act,
2572+ 0, MT76_TM_TXBF_ACT_MAX))
2573+ goto out;
2574+
2575+ memset(td->txbf_param, 0, sizeof(td->txbf_param));
2576+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_TXBF_PARAM], rem) {
2577+ if (nla_len(cur) != 2 ||
2578+ idx >= ARRAY_SIZE(td->txbf_param))
2579+ goto out;
2580+
2581+ td->txbf_param[idx++] = nla_get_u16(cur);
2582+ }
2583+ }
2584+
2585 if (dev->test_ops->set_params) {
2586 err = dev->test_ops->set_params(phy, tb, state);
2587 if (err)
2588@@ -574,6 +741,7 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
2589 struct mt76_phy *phy = hw->priv;
2590 struct mt76_dev *dev = phy->dev;
2591 struct mt76_testmode_data *td = &phy->test;
2592+ struct mt76_testmode_entry_data *ed = &td->ed;
2593 struct nlattr *tb[NUM_MT76_TM_ATTRS] = {};
2594 int err = 0;
2595 void *a;
2596@@ -606,6 +774,19 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
2597 goto out;
2598 }
2599
2600+ if (tb[MT76_TM_ATTR_AID]) {
2601+ struct mt76_wcid *wcid;
2602+ u8 aid;
2603+
2604+ err = mt76_tm_get_u8(tb[MT76_TM_ATTR_AID], &aid, 1, 16);
2605+ if (err)
2606+ goto out;
2607+
2608+ mt76_tm_for_each_entry(phy, wcid, ed)
2609+ if (ed->aid == aid)
2610+ ed = mt76_testmode_entry_data(phy, wcid);
2611+ }
2612+
2613 mt76_testmode_init_defaults(phy);
2614
2615 err = -EMSGSIZE;
2616@@ -618,12 +799,8 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
2617 goto out;
2618
2619 if (nla_put_u32(msg, MT76_TM_ATTR_TX_COUNT, td->tx_count) ||
2620- nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_mpdu_len) ||
2621 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_MODE, td->tx_rate_mode) ||
2622- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, td->tx_rate_nss) ||
2623- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, td->tx_rate_idx) ||
2624 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_SGI, td->tx_rate_sgi) ||
2625- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, td->tx_rate_ldpc) ||
2626 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_STBC, td->tx_rate_stbc) ||
2627 (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_LTF) &&
2628 nla_put_u8(msg, MT76_TM_ATTR_TX_LTF, td->tx_ltf)) ||
2629@@ -643,6 +820,15 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
2630 nla_put_u8(msg, MT76_TM_ATTR_FREQ_OFFSET, td->freq_offset)))
2631 goto out;
2632
2633+ if (nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, ed->tx_mpdu_len) ||
2634+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, ed->tx_rate_nss) ||
2635+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, ed->tx_rate_idx) ||
2636+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, ed->tx_rate_ldpc) ||
2637+ nla_put_u8(msg, MT76_TM_ATTR_AID, ed->aid) ||
2638+ nla_put_u8(msg, MT76_TM_ATTR_RU_ALLOC, ed->ru_alloc) ||
2639+ nla_put_u8(msg, MT76_TM_ATTR_RU_IDX, ed->ru_idx))
2640+ goto out;
2641+
2642 if (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER)) {
2643 a = nla_nest_start(msg, MT76_TM_ATTR_TX_POWER);
2644 if (!a)
2645diff --git a/testmode.h b/testmode.h
2646index 89613266..57949f2b 100644
2647--- a/testmode.h
2648+++ b/testmode.h
2649@@ -6,6 +6,8 @@
2650 #define __MT76_TESTMODE_H
2651
2652 #define MT76_TM_TIMEOUT 10
2653+#define MT76_TM_MAX_ENTRY_NUM 16
2654+#define MT76_TM_EEPROM_BLOCK_SIZE 16
2655
2656 /**
2657 * enum mt76_testmode_attr - testmode attributes inside NL80211_ATTR_TESTDATA
2658@@ -47,6 +49,15 @@
2659 * @MT76_TM_ATTR_DRV_DATA: driver specific netlink attrs (nested)
2660 *
2661 * @MT76_TM_ATTR_MAC_ADDRS: array of nested MAC addresses (nested)
2662+ *
2663+ * @MT76_TM_ATTR_EEPROM_ACTION: eeprom setting actions
2664+ * (u8, see &enum mt76_testmode_eeprom_action)
2665+ * @MT76_TM_ATTR_EEPROM_OFFSET: offset of eeprom data block for writing (u32)
2666+ * @MT76_TM_ATTR_EEPROM_VAL: values for writing into a 16-byte data block
2667+ * (nested, u8 attrs)
2668+ *
2669+ * @MT76_TM_ATTR_CFG: config testmode rf feature (nested, see &mt76_testmode_cfg)
2670+ *
2671 */
2672 enum mt76_testmode_attr {
2673 MT76_TM_ATTR_UNSPEC,
2674@@ -84,6 +95,17 @@ enum mt76_testmode_attr {
2675 MT76_TM_ATTR_DRV_DATA,
2676
2677 MT76_TM_ATTR_MAC_ADDRS,
2678+ MT76_TM_ATTR_AID,
2679+ MT76_TM_ATTR_RU_ALLOC,
2680+ MT76_TM_ATTR_RU_IDX,
2681+
2682+ MT76_TM_ATTR_EEPROM_ACTION,
2683+ MT76_TM_ATTR_EEPROM_OFFSET,
2684+ MT76_TM_ATTR_EEPROM_VAL,
2685+
2686+ MT76_TM_ATTR_CFG,
2687+ MT76_TM_ATTR_TXBF_ACT,
2688+ MT76_TM_ATTR_TXBF_PARAM,
2689
2690 /* keep last */
2691 NUM_MT76_TM_ATTRS,
2692@@ -198,4 +220,57 @@ enum mt76_testmode_tx_mode {
2693
2694 extern const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS];
2695
2696+/**
2697+ * enum mt76_testmode_eeprom_action - eeprom setting actions
2698+ *
2699+ * @MT76_TM_EEPROM_ACTION_UPDATE_DATA: update rf values to specific
2700+ * eeprom data block
2701+ * @MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE: send updated eeprom data to fw
2702+ * @MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE: write eeprom data back to efuse
2703+ */
2704+enum mt76_testmode_eeprom_action {
2705+ MT76_TM_EEPROM_ACTION_UPDATE_DATA,
2706+ MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE,
2707+ MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE,
2708+
2709+ /* keep last */
2710+ NUM_MT76_TM_EEPROM_ACTION,
2711+ MT76_TM_EEPROM_ACTION_MAX = NUM_MT76_TM_EEPROM_ACTION - 1,
2712+};
2713+
2714+/**
2715+ * enum mt76_testmode_cfg - packet tx phy mode
2716+ *
2717+ * @MT76_TM_EEPROM_ACTION_UPDATE_DATA: update rf values to specific
2718+ * eeprom data block
2719+ * @MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE: send updated eeprom data to fw
2720+ * @MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE: write eeprom data back to efuse
2721+ */
2722+enum mt76_testmode_cfg {
2723+ MT76_TM_CFG_TSSI,
2724+ MT76_TM_CFG_DPD,
2725+ MT76_TM_CFG_RATE_POWER_OFFSET,
2726+ MT76_TM_CFG_THERMAL_COMP,
2727+
2728+ /* keep last */
2729+ NUM_MT76_TM_CFG,
2730+ MT76_TM_CFG_MAX = NUM_MT76_TM_CFG - 1,
2731+};
2732+
2733+enum mt76_testmode_txbf_act {
2734+ MT76_TM_TXBF_ACT_INIT,
2735+ MT76_TM_TXBF_ACT_UPDATE_CH,
2736+ MT76_TM_TXBF_ACT_PHASE_COMP,
2737+ MT76_TM_TXBF_ACT_TX_PREP,
2738+ MT76_TM_TXBF_ACT_IBF_PROF_UPDATE,
2739+ MT76_TM_TXBF_ACT_EBF_PROF_UPDATE,
2740+ MT76_TM_TXBF_ACT_PHASE_CAL,
2741+ MT76_TM_TXBF_ACT_PROF_UPDATE_ALL,
2742+ MT76_TM_TXBF_ACT_E2P_UPDATE,
2743+
2744+ /* keep last */
2745+ NUM_MT76_TM_TXBF_ACT,
2746+ MT76_TM_TXBF_ACT_MAX = NUM_MT76_TM_TXBF_ACT - 1,
2747+};
2748+
2749 #endif
2750diff --git a/tools/fields.c b/tools/fields.c
2751index e3f69089..6e36ab27 100644
2752--- a/tools/fields.c
2753+++ b/tools/fields.c
2754@@ -10,6 +10,7 @@ static const char * const testmode_state[] = {
2755 [MT76_TM_STATE_IDLE] = "idle",
2756 [MT76_TM_STATE_TX_FRAMES] = "tx_frames",
2757 [MT76_TM_STATE_RX_FRAMES] = "rx_frames",
2758+ [MT76_TM_STATE_TX_CONT] = "tx_cont",
2759 };
2760
2761 static const char * const testmode_tx_mode[] = {
2762@@ -201,6 +202,63 @@ static void print_extra_stats(const struct tm_field *field, struct nlattr **tb)
2763 printf("%srx_per=%.02f%%\n", prefix, 100 * failed / total);
2764 }
2765
2766+static bool parse_mac(const struct tm_field *field, int idx,
2767+ struct nl_msg *msg, const char *val)
2768+{
2769+#define ETH_ALEN 6
2770+ bool ret = true;
2771+ char *str, *cur, *ap;
2772+ void *a;
2773+
2774+ ap = str = strdup(val);
2775+
2776+ a = nla_nest_start(msg, idx);
2777+
2778+ idx = 0;
2779+ while ((cur = strsep(&ap, ",")) != NULL) {
2780+ unsigned char addr[ETH_ALEN];
2781+ char *val, *tmp = cur;
2782+ int i = 0;
2783+
2784+ while ((val = strsep(&tmp, ":")) != NULL) {
2785+ if (i >= ETH_ALEN)
2786+ break;
2787+
2788+ addr[i++] = strtoul(val, NULL, 16);
2789+ }
2790+
2791+ nla_put(msg, idx, ETH_ALEN, addr);
2792+
2793+ idx++;
2794+ }
2795+
2796+ nla_nest_end(msg, a);
2797+
2798+ free(str);
2799+
2800+ return ret;
2801+}
2802+
2803+static void print_mac(const struct tm_field *field, struct nlattr *attr)
2804+{
2805+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
2806+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
2807+ unsigned char addr[3][6];
2808+ struct nlattr *cur;
2809+ int idx = 0;
2810+ int rem;
2811+
2812+ nla_for_each_nested(cur, attr, rem) {
2813+ if (nla_len(cur) != 6)
2814+ continue;
2815+ memcpy(addr[idx++], nla_data(cur), 6);
2816+ }
2817+
2818+ printf("" MACSTR "," MACSTR "," MACSTR "",
2819+ MAC2STR(addr[0]), MAC2STR(addr[1]), MAC2STR(addr[2]));
2820+
2821+ return;
2822+}
2823
2824 #define FIELD_GENERIC(_field, _name, ...) \
2825 [FIELD_NAME(_field)] = { \
2826@@ -250,6 +308,13 @@ static void print_extra_stats(const struct tm_field *field, struct nlattr **tb)
2827 ##__VA_ARGS__ \
2828 )
2829
2830+#define FIELD_MAC(_field, _name) \
2831+ [FIELD_NAME(_field)] = { \
2832+ .name = _name, \
2833+ .parse = parse_mac, \
2834+ .print = print_mac \
2835+ }
2836+
2837 #define FIELD_NAME(_field) MT76_TM_RX_ATTR_##_field
2838 static const struct tm_field rx_fields[NUM_MT76_TM_RX_ATTRS] = {
2839 FIELD_RO(s32, FREQ_OFFSET, "freq_offset"),
2840@@ -300,10 +365,18 @@ static const struct tm_field testdata_fields[NUM_MT76_TM_ATTRS] = {
2841 FIELD(u8, TX_RATE_LDPC, "tx_rate_ldpc"),
2842 FIELD(u8, TX_RATE_STBC, "tx_rate_stbc"),
2843 FIELD(u8, TX_LTF, "tx_ltf"),
2844+ FIELD(u8, TX_DUTY_CYCLE, "tx_duty_cycle"),
2845+ FIELD(u32, TX_IPG, "tx_ipg"),
2846+ FIELD(u32, TX_TIME, "tx_time"),
2847 FIELD(u8, TX_POWER_CONTROL, "tx_power_control"),
2848 FIELD_ARRAY(u8, TX_POWER, "tx_power"),
2849 FIELD(u8, TX_ANTENNA, "tx_antenna"),
2850+ FIELD(u8, TX_SPE_IDX, "tx_spe_idx"),
2851 FIELD(u32, FREQ_OFFSET, "freq_offset"),
2852+ FIELD(u8, AID, "aid"),
2853+ FIELD(u8, RU_ALLOC, "ru_alloc"),
2854+ FIELD(u8, RU_IDX, "ru_idx"),
2855+ FIELD_MAC(MAC_ADDRS, "mac_addrs"),
2856 FIELD_NESTED_RO(STATS, stats, "",
2857 .print_extra = print_extra_stats),
2858 };
2859@@ -322,9 +395,16 @@ static struct nla_policy testdata_policy[NUM_MT76_TM_ATTRS] = {
2860 [MT76_TM_ATTR_TX_RATE_LDPC] = { .type = NLA_U8 },
2861 [MT76_TM_ATTR_TX_RATE_STBC] = { .type = NLA_U8 },
2862 [MT76_TM_ATTR_TX_LTF] = { .type = NLA_U8 },
2863+ [MT76_TM_ATTR_TX_DUTY_CYCLE] = { .type = NLA_U8 },
2864+ [MT76_TM_ATTR_TX_IPG] = { .type = NLA_U32 },
2865+ [MT76_TM_ATTR_TX_TIME] = { .type = NLA_U32 },
2866 [MT76_TM_ATTR_TX_POWER_CONTROL] = { .type = NLA_U8 },
2867 [MT76_TM_ATTR_TX_ANTENNA] = { .type = NLA_U8 },
2868+ [MT76_TM_ATTR_TX_SPE_IDX] = { .type = NLA_U8 },
2869 [MT76_TM_ATTR_FREQ_OFFSET] = { .type = NLA_U32 },
2870+ [MT76_TM_ATTR_AID] = { .type = NLA_U8 },
2871+ [MT76_TM_ATTR_RU_ALLOC] = { .type = NLA_U8 },
2872+ [MT76_TM_ATTR_RU_IDX] = { .type = NLA_U8 },
2873 [MT76_TM_ATTR_STATS] = { .type = NLA_NESTED },
2874 };
2875
2876diff --git a/tx.c b/tx.c
2877index 02067edc..0457c3eb 100644
2878--- a/tx.c
2879+++ b/tx.c
2880@@ -245,8 +245,7 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *
2881 if (mt76_is_testmode_skb(dev, skb, &hw)) {
2882 struct mt76_phy *phy = hw->priv;
2883
2884- if (skb == phy->test.tx_skb)
2885- phy->test.tx_done++;
2886+ phy->test.tx_done++;
2887 if (phy->test.tx_queued == phy->test.tx_done)
2888 wake_up(&dev->tx_wait);
2889
2890--
28912.25.1
2892