blob: 2179d7444c88376fef7eeee04410846578b0019f [file] [log] [blame]
developer20747c12022-09-16 14:09:40 +08001From ee15bb2a93f96ebe3169c9bbda9855da4314d3b4 Mon Sep 17 00:00:00 2001
developer4c6b6002022-05-30 16:36:44 +08002From: Shayne Chen <shayne.chen@mediatek.com>
3Date: Thu, 21 Apr 2022 15:43:19 +0800
developer20747c12022-09-16 14:09:40 +08004Subject: [PATCH 1111/1124] mt76: testmode: additional supports
developer4c6b6002022-05-30 16:36:44 +08005
6Signed-off-by: Shayne Chen <shayne.chen@mediatek.com>
7---
developerf40484f2022-08-25 15:33:33 +08008 dma.c | 3 +-
9 mac80211.c | 12 +
developer1d9fede2022-08-29 15:24:07 +080010 mt76.h | 108 ++++-
developerf40484f2022-08-25 15:33:33 +080011 mt76_connac_mcu.c | 4 +
12 mt76_connac_mcu.h | 2 +
13 mt7915/init.c | 2 +-
14 mt7915/mac.c | 35 +-
15 mt7915/main.c | 2 +-
16 mt7915/mcu.c | 10 +-
17 mt7915/mcu.h | 28 +-
18 mt7915/mmio.c | 2 +
19 mt7915/mt7915.h | 14 +-
20 mt7915/regs.h | 3 +
21 mt7915/testmode.c | 1171 ++++++++++++++++++++++++++++++++++++++++++---
22 mt7915/testmode.h | 278 +++++++++++
23 testmode.c | 275 +++++++++--
24 testmode.h | 75 +++
25 tools/fields.c | 80 ++++
26 tx.c | 3 +-
developer1d9fede2022-08-29 15:24:07 +080027 19 files changed, 1961 insertions(+), 146 deletions(-)
developer4c6b6002022-05-30 16:36:44 +080028
29diff --git a/dma.c b/dma.c
developer20747c12022-09-16 14:09:40 +080030index 40cb9109..8ea09e6e 100644
developer4c6b6002022-05-30 16:36:44 +080031--- a/dma.c
32+++ b/dma.c
33@@ -426,8 +426,7 @@ free:
34 if (mt76_is_testmode_skb(dev, skb, &hw)) {
35 struct mt76_phy *phy = hw->priv;
36
37- if (tx_info.skb == phy->test.tx_skb)
38- phy->test.tx_done--;
39+ phy->test.tx_done--;
40 }
41 #endif
42
43diff --git a/mac80211.c b/mac80211.c
developer20747c12022-09-16 14:09:40 +080044index c5ef5940..f34a93e0 100644
developer4c6b6002022-05-30 16:36:44 +080045--- a/mac80211.c
46+++ b/mac80211.c
47@@ -55,6 +55,13 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 CHAN5G(60, 5300),
49 CHAN5G(64, 5320),
50
51+ CHAN5G(68, 5340),
52+ CHAN5G(80, 5400),
53+ CHAN5G(84, 5420),
54+ CHAN5G(88, 5440),
55+ CHAN5G(92, 5460),
56+ CHAN5G(96, 5480),
57+
58 CHAN5G(100, 5500),
59 CHAN5G(104, 5520),
60 CHAN5G(108, 5540),
61@@ -75,6 +82,11 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = {
62 CHAN5G(165, 5825),
63 CHAN5G(169, 5845),
64 CHAN5G(173, 5865),
65+
66+ CHAN5G(184, 4920),
67+ CHAN5G(188, 4940),
68+ CHAN5G(192, 4960),
69+ CHAN5G(196, 4980),
70 };
71
72 static const struct ieee80211_channel mt76_channels_6ghz[] = {
73diff --git a/mt76.h b/mt76.h
developer20747c12022-09-16 14:09:40 +080074index b5453d35..0e4eea16 100644
developer4c6b6002022-05-30 16:36:44 +080075--- a/mt76.h
76+++ b/mt76.h
developer20747c12022-09-16 14:09:40 +080077@@ -636,6 +636,21 @@ struct mt76_testmode_ops {
developer4c6b6002022-05-30 16:36:44 +080078 int (*set_params)(struct mt76_phy *phy, struct nlattr **tb,
79 enum mt76_testmode_state new_state);
80 int (*dump_stats)(struct mt76_phy *phy, struct sk_buff *msg);
81+ int (*set_eeprom)(struct mt76_phy *phy, u32 offset, u8 *val, u8 action);
82+};
83+
84+struct mt76_testmode_entry_data {
85+ struct sk_buff *tx_skb;
86+
87+ u16 tx_mpdu_len;
88+ u8 tx_rate_idx;
89+ u8 tx_rate_nss;
90+ u8 tx_rate_ldpc;
91+
92+ u8 addr[3][ETH_ALEN];
93+ u8 aid;
94+ u8 ru_alloc;
95+ u8 ru_idx;
96 };
97
98 #define MT_TM_FW_RX_COUNT BIT(0)
developer20747c12022-09-16 14:09:40 +080099@@ -644,16 +659,11 @@ struct mt76_testmode_data {
developer4c6b6002022-05-30 16:36:44 +0800100 enum mt76_testmode_state state;
101
102 u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)];
103- struct sk_buff *tx_skb;
104
105 u32 tx_count;
106- u16 tx_mpdu_len;
107
108 u8 tx_rate_mode;
109- u8 tx_rate_idx;
110- u8 tx_rate_nss;
111 u8 tx_rate_sgi;
112- u8 tx_rate_ldpc;
113 u8 tx_rate_stbc;
114 u8 tx_ltf;
115
developer20747c12022-09-16 14:09:40 +0800116@@ -669,10 +679,37 @@ struct mt76_testmode_data {
developer4c6b6002022-05-30 16:36:44 +0800117 u8 tx_power[4];
118 u8 tx_power_control;
119
120- u8 addr[3][ETH_ALEN];
121+ struct list_head tm_entry_list;
122+ struct mt76_wcid *cur_entry;
123+ u8 entry_num;
124+ union {
125+ struct mt76_testmode_entry_data ed;
126+ struct {
127+ /* must be the same as mt76_testmode_entry_data */
128+ struct sk_buff *tx_skb;
129+
130+ u16 tx_mpdu_len;
131+ u8 tx_rate_idx;
132+ u8 tx_rate_nss;
133+ u8 tx_rate_ldpc;
134+
135+ u8 addr[3][ETH_ALEN];
136+ u8 aid;
137+ u8 ru_alloc;
138+ u8 ru_idx;
139+ };
140+ };
141
142 u8 flag;
143
144+ struct {
145+ u8 type;
146+ u8 enable;
147+ } cfg;
148+
149+ u8 txbf_act;
150+ u16 txbf_param[8];
151+
152 u32 tx_pending;
153 u32 tx_queued;
154 u16 tx_queued_limit;
developer20747c12022-09-16 14:09:40 +0800155@@ -1129,6 +1166,59 @@ static inline bool mt76_testmode_enabled(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +0800156 #endif
157 }
158
159+#ifdef CONFIG_NL80211_TESTMODE
160+static inline struct mt76_wcid *
161+mt76_testmode_first_entry(struct mt76_phy *phy)
162+{
163+ if (list_empty(&phy->test.tm_entry_list) && !phy->test.aid)
164+ return &phy->dev->global_wcid;
165+
166+ return list_first_entry(&phy->test.tm_entry_list,
167+ typeof(struct mt76_wcid),
168+ list);
169+}
170+
171+static inline struct mt76_testmode_entry_data *
172+mt76_testmode_entry_data(struct mt76_phy *phy, struct mt76_wcid *wcid)
173+{
174+ if (!wcid)
175+ return NULL;
176+ if (wcid == &phy->dev->global_wcid)
177+ return &phy->test.ed;
178+
179+ return (struct mt76_testmode_entry_data *)((u8 *)wcid +
180+ phy->hw->sta_data_size);
181+}
182+
183+#define mt76_tm_for_each_entry(phy, wcid, ed) \
184+ for (wcid = mt76_testmode_first_entry(phy), \
185+ ed = mt76_testmode_entry_data(phy, wcid); \
186+ ((phy->test.aid && \
187+ !list_entry_is_head(wcid, &phy->test.tm_entry_list, list)) || \
188+ (!phy->test.aid && wcid == &phy->dev->global_wcid)) && ed; \
189+ wcid = list_next_entry(wcid, list), \
190+ ed = mt76_testmode_entry_data(phy, wcid))
191+#endif
192+
193+static inline bool __mt76_is_testmode_skb(struct mt76_phy *phy,
194+ struct sk_buff *skb)
195+{
196+#ifdef CONFIG_NL80211_TESTMODE
197+ struct mt76_testmode_entry_data *ed = &phy->test.ed;
198+ struct mt76_wcid *wcid;
199+
200+ if (skb == ed->tx_skb)
201+ return true;
202+
203+ mt76_tm_for_each_entry(phy, wcid, ed)
204+ if (skb == ed->tx_skb)
205+ return true;
206+ return false;
207+#else
208+ return false;
209+#endif
210+}
211+
212 static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
213 struct sk_buff *skb,
214 struct ieee80211_hw **hw)
developer20747c12022-09-16 14:09:40 +0800215@@ -1139,7 +1229,8 @@ static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
developer1d9fede2022-08-29 15:24:07 +0800216 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
217 struct mt76_phy *phy = dev->phys[i];
218
219- if (phy && skb == phy->test.tx_skb) {
220+ if (phy && mt76_testmode_enabled(phy) &&
221+ __mt76_is_testmode_skb(phy, skb)) {
222 *hw = dev->phys[i]->hw;
223 return true;
224 }
developer20747c12022-09-16 14:09:40 +0800225@@ -1240,7 +1331,8 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer4c6b6002022-05-30 16:36:44 +0800226 int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
227 struct netlink_callback *cb, void *data, int len);
228 int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state);
229-int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len);
230+int mt76_testmode_init_skb(struct mt76_phy *phy, u32 len,
231+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN]);
232
233 static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable)
234 {
235diff --git a/mt76_connac_mcu.c b/mt76_connac_mcu.c
developer20747c12022-09-16 14:09:40 +0800236index c1ed9fef..36a2914e 100644
developer4c6b6002022-05-30 16:36:44 +0800237--- a/mt76_connac_mcu.c
238+++ b/mt76_connac_mcu.c
developer20747c12022-09-16 14:09:40 +0800239@@ -393,6 +393,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
developer4c6b6002022-05-30 16:36:44 +0800240 switch (vif->type) {
241 case NL80211_IFTYPE_MESH_POINT:
242 case NL80211_IFTYPE_AP:
243+ case NL80211_IFTYPE_MONITOR:
244 if (vif->p2p)
245 conn_type = CONNECTION_P2P_GC;
246 else
developer20747c12022-09-16 14:09:40 +0800247@@ -574,6 +575,9 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
developer4c6b6002022-05-30 16:36:44 +0800248 rx->rca2 = 1;
249 rx->rv = 1;
250
251+ if (vif->type == NL80211_IFTYPE_MONITOR)
252+ rx->rca1 = 0;
253+
254 if (!is_connac_v1(dev))
255 return;
256
257diff --git a/mt76_connac_mcu.h b/mt76_connac_mcu.h
developer20747c12022-09-16 14:09:40 +0800258index 9fb75e6e..9b9878ac 100644
developer4c6b6002022-05-30 16:36:44 +0800259--- a/mt76_connac_mcu.h
260+++ b/mt76_connac_mcu.h
developer20747c12022-09-16 14:09:40 +0800261@@ -975,6 +975,7 @@ enum {
developer4c6b6002022-05-30 16:36:44 +0800262 MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13,
263 MCU_EXT_EVENT_THERMAL_PROTECT = 0x22,
264 MCU_EXT_EVENT_ASSERT_DUMP = 0x23,
265+ MCU_EXT_EVENT_BF_STATUS_READ = 0x35,
266 MCU_EXT_EVENT_RDD_REPORT = 0x3a,
267 MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
268 MCU_EXT_EVENT_BCC_NOTIFY = 0x75,
developer20747c12022-09-16 14:09:40 +0800269@@ -1152,6 +1153,7 @@ enum {
developer4c6b6002022-05-30 16:36:44 +0800270 MCU_EXT_CMD_PHY_STAT_INFO = 0xad,
271 /* for vendor csi and air monitor */
272 MCU_EXT_CMD_SMESH_CTRL = 0xae,
273+ MCU_EXT_CMD_RX_STAT_USER_CTRL = 0xb3,
274 MCU_EXT_CMD_CERT_CFG = 0xb7,
275 MCU_EXT_CMD_CSI_CTRL = 0xc2,
276 };
277diff --git a/mt7915/init.c b/mt7915/init.c
developer20747c12022-09-16 14:09:40 +0800278index 141c5ad8..538ff5c3 100644
developer4c6b6002022-05-30 16:36:44 +0800279--- a/mt7915/init.c
280+++ b/mt7915/init.c
developerf64861f2022-06-22 11:44:53 +0800281@@ -576,7 +576,7 @@ static void mt7915_init_work(struct work_struct *work)
developer4c6b6002022-05-30 16:36:44 +0800282 struct mt7915_dev *dev = container_of(work, struct mt7915_dev,
283 init_work);
284
285- mt7915_mcu_set_eeprom(dev);
286+ mt7915_mcu_set_eeprom(dev, dev->flash_mode);
287 mt7915_mac_init(dev);
288 mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband);
289 mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband);
290diff --git a/mt7915/mac.c b/mt7915/mac.c
developer20747c12022-09-16 14:09:40 +0800291index d44af409..471d533b 100644
developer4c6b6002022-05-30 16:36:44 +0800292--- a/mt7915/mac.c
293+++ b/mt7915/mac.c
developerf40484f2022-08-25 15:33:33 +0800294@@ -565,16 +565,38 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
developer4c6b6002022-05-30 16:36:44 +0800295 {
296 #ifdef CONFIG_NL80211_TESTMODE
297 struct mt76_testmode_data *td = &phy->mt76->test;
298+ struct mt76_testmode_entry_data *ed;
299+ struct mt76_wcid *wcid;
300 const struct ieee80211_rate *r;
301- u8 bw, mode, nss = td->tx_rate_nss;
302- u8 rate_idx = td->tx_rate_idx;
303+ u8 bw, mode, nss, rate_idx, ldpc;
304 u16 rateval = 0;
305 u32 val;
306 bool cck = false;
307 int band;
308
309- if (skb != phy->mt76->test.tx_skb)
310+ txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
311+ txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
312+ phy->test.spe_idx));
313+
314+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU) {
315+ txwi[1] |= cpu_to_le32(BIT(18));
316+ txwi[2] = 0;
317+ txwi[3] &= ~cpu_to_le32(MT_TXD3_NO_ACK);
318+ le32p_replace_bits(&txwi[3], 0x1f, MT_TXD3_REM_TX_COUNT);
319+
developerf40484f2022-08-25 15:33:33 +0800320 return;
developer4c6b6002022-05-30 16:36:44 +0800321+ }
322+
323+ mt76_tm_for_each_entry(phy->mt76, wcid, ed)
324+ if (ed->tx_skb == skb)
325+ break;
326+
327+ if (!ed)
developerf40484f2022-08-25 15:33:33 +0800328+ return;
329+
developer4c6b6002022-05-30 16:36:44 +0800330+ nss = ed->tx_rate_nss;
331+ rate_idx = ed->tx_rate_idx;
332+ ldpc = ed->tx_rate_ldpc;
developerf40484f2022-08-25 15:33:33 +0800333
developer4c6b6002022-05-30 16:36:44 +0800334 switch (td->tx_rate_mode) {
335 case MT76_TM_TX_MODE_HT:
developerf40484f2022-08-25 15:33:33 +0800336@@ -664,13 +686,14 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
developer4c6b6002022-05-30 16:36:44 +0800337 if (mode >= MT_PHY_TYPE_HE_SU)
338 val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf);
339
340- if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
341+ if (ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
342 val |= MT_TXD6_LDPC;
343
developerf40484f2022-08-25 15:33:33 +0800344 txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
developer4c6b6002022-05-30 16:36:44 +0800345+ if (phy->test.bf_en)
346+ val |= MT_TXD6_TX_IBF | MT_TXD6_TX_EBF;
347+
348 txwi[6] |= cpu_to_le32(val);
349- txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
350- phy->test.spe_idx));
351 #endif
352 }
353
354diff --git a/mt7915/main.c b/mt7915/main.c
developer20747c12022-09-16 14:09:40 +0800355index 5b25604e..ff9fd19d 100644
developer4c6b6002022-05-30 16:36:44 +0800356--- a/mt7915/main.c
357+++ b/mt7915/main.c
developer4721e252022-06-21 16:41:28 +0800358@@ -224,7 +224,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
developer4c6b6002022-05-30 16:36:44 +0800359 mvif->phy = phy;
360 mvif->mt76.band_idx = phy->band_idx;
361
362- mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
363+ mvif->mt76.wmm_idx = (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_MONITOR);
364 if (ext_phy)
365 mvif->mt76.wmm_idx += 2;
366
367diff --git a/mt7915/mcu.c b/mt7915/mcu.c
developer20747c12022-09-16 14:09:40 +0800368index c8c8be49..ff0e91e5 100644
developer4c6b6002022-05-30 16:36:44 +0800369--- a/mt7915/mcu.c
370+++ b/mt7915/mcu.c
developerf64861f2022-06-22 11:44:53 +0800371@@ -434,6 +434,11 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
developer4c6b6002022-05-30 16:36:44 +0800372 case MCU_EXT_EVENT_BCC_NOTIFY:
373 mt7915_mcu_rx_bcc_notify(dev, skb);
374 break;
375+#ifdef CONFIG_NL80211_TESTMODE
376+ case MCU_EXT_EVENT_BF_STATUS_READ:
377+ mt7915_tm_txbf_status_read(dev, skb);
378+ break;
379+#endif
380 default:
381 break;
382 }
developerf64861f2022-06-22 11:44:53 +0800383@@ -465,6 +470,7 @@ void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb)
developer4c6b6002022-05-30 16:36:44 +0800384 rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP ||
385 rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC ||
386 rxd->ext_eid == MCU_EXT_EVENT_BCC_NOTIFY ||
387+ rxd->ext_eid == MCU_EXT_EVENT_BF_STATUS_READ ||
388 !rxd->seq)
389 mt7915_mcu_rx_unsolicited_event(dev, skb);
390 else
developer20747c12022-09-16 14:09:40 +0800391@@ -2826,14 +2832,14 @@ static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev)
developer4c6b6002022-05-30 16:36:44 +0800392 return 0;
393 }
394
395-int mt7915_mcu_set_eeprom(struct mt7915_dev *dev)
396+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev, bool flash_mode)
397 {
398 struct mt7915_mcu_eeprom req = {
399 .buffer_mode = EE_MODE_EFUSE,
400 .format = EE_FORMAT_WHOLE,
401 };
402
403- if (dev->flash_mode)
404+ if (flash_mode)
405 return mt7915_mcu_set_eeprom_flash(dev);
406
407 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_BUFFER_MODE),
408diff --git a/mt7915/mcu.h b/mt7915/mcu.h
developer20747c12022-09-16 14:09:40 +0800409index 0a77ad0d..ad85e56c 100644
developer4c6b6002022-05-30 16:36:44 +0800410--- a/mt7915/mcu.h
411+++ b/mt7915/mcu.h
developerf64861f2022-06-22 11:44:53 +0800412@@ -8,10 +8,15 @@
developer4c6b6002022-05-30 16:36:44 +0800413
414 enum {
415 MCU_ATE_SET_TRX = 0x1,
416+ MCU_ATE_SET_TSSI = 0x5,
417+ MCU_ATE_SET_DPD = 0x6,
418+ MCU_ATE_SET_RATE_POWER_OFFSET = 0x7,
419+ MCU_ATE_SET_THERMAL_COMP = 0x8,
420 MCU_ATE_SET_FREQ_OFFSET = 0xa,
421 MCU_ATE_SET_PHY_COUNT = 0x11,
422 MCU_ATE_SET_SLOT_TIME = 0x13,
423 MCU_ATE_CLEAN_TXQUEUE = 0x1c,
424+ MCU_ATE_SET_MU_RX_AID = 0x1e,
425 };
426
developerf64861f2022-06-22 11:44:53 +0800427 struct mt7915_mcu_thermal_ctrl {
developerd59e4772022-07-14 13:48:49 +0800428@@ -432,6 +437,12 @@ enum {
developer4c6b6002022-05-30 16:36:44 +0800429
430 enum {
431 MT_BF_SOUNDING_ON = 1,
432+ MT_BF_DATA_PACKET_APPLY = 2,
433+ MT_BF_PFMU_TAG_READ = 5,
434+ MT_BF_PFMU_TAG_WRITE = 6,
435+ MT_BF_PHASE_CAL = 14,
436+ MT_BF_IBF_PHASE_COMP = 15,
437+ MT_BF_PROFILE_WRITE_ALL = 17,
438 MT_BF_TYPE_UPDATE = 20,
439 MT_BF_MODULE_UPDATE = 25
440 };
developerd59e4772022-07-14 13:48:49 +0800441@@ -665,10 +676,19 @@ struct mt7915_muru {
developer4c6b6002022-05-30 16:36:44 +0800442 #define MURU_OFDMA_SCH_TYPE_UL BIT(1)
443
developerf64861f2022-06-22 11:44:53 +0800444 /* Common Config */
developer4c6b6002022-05-30 16:36:44 +0800445-#define MURU_COMM_PPDU_FMT BIT(0)
446-#define MURU_COMM_SCH_TYPE BIT(1)
447-#define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_SCH_TYPE)
developer4c6b6002022-05-30 16:36:44 +0800448-/* DL&UL User config*/
developer4c6b6002022-05-30 16:36:44 +0800449+/* #define MURU_COMM_PPDU_FMT BIT(0) */
450+/* #define MURU_COMM_SCH_TYPE BIT(1) */
451+/* #define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_SCH_TYPE) */
developer4721e252022-06-21 16:41:28 +0800452+#define MURU_COMM_PPDU_FMT BIT(0)
453+#define MURU_COMM_SCH_TYPE BIT(1)
454+#define MURU_COMM_BAND BIT(2)
455+#define MURU_COMM_WMM BIT(3)
456+#define MURU_COMM_SPE_IDX BIT(4)
457+#define MURU_COMM_PROC_TYPE BIT(5)
458+#define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_BAND | \
459+ MURU_COMM_WMM | MURU_COMM_SPE_IDX)
developer4c6b6002022-05-30 16:36:44 +0800460+
461+/* DL&UL User config */
462 #define MURU_USER_CNT BIT(4)
463
464 enum {
465diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developer20747c12022-09-16 14:09:40 +0800466index fd2f70e3..088c9f3e 100644
developer4c6b6002022-05-30 16:36:44 +0800467--- a/mt7915/mmio.c
468+++ b/mt7915/mmio.c
developerf64861f2022-06-22 11:44:53 +0800469@@ -76,6 +76,7 @@ static const u32 mt7915_offs[] = {
developer4c6b6002022-05-30 16:36:44 +0800470 [ARB_DRNGR0] = 0x194,
471 [ARB_SCR] = 0x080,
472 [RMAC_MIB_AIRTIME14] = 0x3b8,
473+ [AGG_AALCR0] = 0x048,
474 [AGG_AWSCR0] = 0x05c,
475 [AGG_PCR0] = 0x06c,
476 [AGG_ACR0] = 0x084,
developer20747c12022-09-16 14:09:40 +0800477@@ -151,6 +152,7 @@ static const u32 mt7916_offs[] = {
developer4c6b6002022-05-30 16:36:44 +0800478 [ARB_DRNGR0] = 0x1e0,
479 [ARB_SCR] = 0x000,
480 [RMAC_MIB_AIRTIME14] = 0x0398,
481+ [AGG_AALCR0] = 0x028,
482 [AGG_AWSCR0] = 0x030,
483 [AGG_PCR0] = 0x040,
484 [AGG_ACR0] = 0x054,
485diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
developer20747c12022-09-16 14:09:40 +0800486index e8feba36..798c9337 100644
developer4c6b6002022-05-30 16:36:44 +0800487--- a/mt7915/mt7915.h
488+++ b/mt7915/mt7915.h
developer20747c12022-09-16 14:09:40 +0800489@@ -301,6 +301,9 @@ struct mt7915_phy {
developer4c6b6002022-05-30 16:36:44 +0800490 u8 last_snr;
491
492 u8 spe_idx;
493+
494+ bool bf_en;
495+ bool bf_ever_en;
496 } test;
497 #endif
498
developer20747c12022-09-16 14:09:40 +0800499@@ -392,6 +395,14 @@ struct mt7915_dev {
developer4c6b6002022-05-30 16:36:44 +0800500 void __iomem *dcm;
501 void __iomem *sku;
502
503+#ifdef CONFIG_NL80211_TESTMODE
504+ struct {
505+ void *txbf_phase_cal;
506+ void *txbf_pfmu_data;
507+ void *txbf_pfmu_tag;
508+ } test;
509+#endif
510+
511 #ifdef MTK_DEBUG
512 u16 wlan_idx;
513 struct {
developer20747c12022-09-16 14:09:40 +0800514@@ -570,7 +581,7 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
developer4c6b6002022-05-30 16:36:44 +0800515 struct ieee80211_vif *vif,
516 struct ieee80211_sta *sta,
517 void *data, u32 field);
518-int mt7915_mcu_set_eeprom(struct mt7915_dev *dev);
519+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev, bool flash_mode);
520 int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset);
521 int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num);
522 int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable,
developer20747c12022-09-16 14:09:40 +0800523@@ -603,6 +614,7 @@ int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl);
developer4c6b6002022-05-30 16:36:44 +0800524 int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level);
525 void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb);
526 void mt7915_mcu_exit(struct mt7915_dev *dev);
527+int mt7915_tm_txbf_status_read(struct mt7915_dev *dev, struct sk_buff *skb);
528
529 static inline u16 mt7915_wtbl_size(struct mt7915_dev *dev)
530 {
531diff --git a/mt7915/regs.h b/mt7915/regs.h
developer20747c12022-09-16 14:09:40 +0800532index e2b0ff7d..d7f71033 100644
developer4c6b6002022-05-30 16:36:44 +0800533--- a/mt7915/regs.h
534+++ b/mt7915/regs.h
developer1d9fede2022-08-29 15:24:07 +0800535@@ -45,6 +45,7 @@ enum offs_rev {
developer4c6b6002022-05-30 16:36:44 +0800536 ARB_DRNGR0,
537 ARB_SCR,
538 RMAC_MIB_AIRTIME14,
539+ AGG_AALCR0,
540 AGG_AWSCR0,
541 AGG_PCR0,
542 AGG_ACR0,
developer20747c12022-09-16 14:09:40 +0800543@@ -462,6 +463,8 @@ enum offs_rev {
developer4c6b6002022-05-30 16:36:44 +0800544 #define MT_WF_AGG_BASE(_band) ((_band) ? 0x820f2000 : 0x820e2000)
545 #define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
546
547+#define MT_AGG_AALCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AALCR0) + \
548+ (_n) * 4))
549 #define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AWSCR0) + \
550 (_n) * 4))
551 #define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_PCR0) + \
552diff --git a/mt7915/testmode.c b/mt7915/testmode.c
developer20747c12022-09-16 14:09:40 +0800553index 35345d35..bc4cd809 100644
developer4c6b6002022-05-30 16:36:44 +0800554--- a/mt7915/testmode.c
555+++ b/mt7915/testmode.c
556@@ -9,6 +9,9 @@
557 enum {
558 TM_CHANGED_TXPOWER,
559 TM_CHANGED_FREQ_OFFSET,
560+ TM_CHANGED_AID,
561+ TM_CHANGED_CFG,
562+ TM_CHANGED_TXBF_ACT,
563
564 /* must be last */
565 NUM_TM_CHANGED
566@@ -17,6 +20,9 @@ enum {
567 static const u8 tm_change_map[] = {
568 [TM_CHANGED_TXPOWER] = MT76_TM_ATTR_TX_POWER,
569 [TM_CHANGED_FREQ_OFFSET] = MT76_TM_ATTR_FREQ_OFFSET,
570+ [TM_CHANGED_AID] = MT76_TM_ATTR_AID,
571+ [TM_CHANGED_CFG] = MT76_TM_ATTR_CFG,
572+ [TM_CHANGED_TXBF_ACT] = MT76_TM_ATTR_TXBF_ACT,
573 };
574
575 struct reg_band {
developerc6f56bb2022-06-14 18:36:30 +0800576@@ -33,6 +39,38 @@ struct reg_band {
developer4c6b6002022-05-30 16:36:44 +0800577 #define TM_REG_MAX_ID 20
578 static struct reg_band reg_backup_list[TM_REG_MAX_ID];
579
developerc6f56bb2022-06-14 18:36:30 +0800580+static void mt7915_tm_update_entry(struct mt7915_phy *phy);
581+
developer4c6b6002022-05-30 16:36:44 +0800582+static u8 mt7915_tm_chan_bw(enum nl80211_chan_width width)
583+{
584+ static const u8 width_to_bw[] = {
585+ [NL80211_CHAN_WIDTH_40] = TM_CBW_40MHZ,
586+ [NL80211_CHAN_WIDTH_80] = TM_CBW_80MHZ,
587+ [NL80211_CHAN_WIDTH_80P80] = TM_CBW_8080MHZ,
588+ [NL80211_CHAN_WIDTH_160] = TM_CBW_160MHZ,
589+ [NL80211_CHAN_WIDTH_5] = TM_CBW_5MHZ,
590+ [NL80211_CHAN_WIDTH_10] = TM_CBW_10MHZ,
591+ [NL80211_CHAN_WIDTH_20] = TM_CBW_20MHZ,
592+ [NL80211_CHAN_WIDTH_20_NOHT] = TM_CBW_20MHZ,
593+ };
594+
595+ if (width >= ARRAY_SIZE(width_to_bw))
596+ return 0;
597+
598+ return width_to_bw[width];
599+}
600+
601+static void
602+mt7915_tm_update_channel(struct mt7915_phy *phy)
603+{
604+ mutex_unlock(&phy->dev->mt76.mutex);
605+ mt7915_set_channel(phy);
606+ mutex_lock(&phy->dev->mt76.mutex);
607+
608+ mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
developerc6f56bb2022-06-14 18:36:30 +0800609+
610+ mt7915_tm_update_entry(phy);
developer4c6b6002022-05-30 16:36:44 +0800611+}
612
613 static int
614 mt7915_tm_set_tx_power(struct mt7915_phy *phy)
developerc6f56bb2022-06-14 18:36:30 +0800615@@ -119,18 +157,28 @@ mt7915_tm_set_trx(struct mt7915_phy *phy, int type, bool en)
developer4c6b6002022-05-30 16:36:44 +0800616 }
617
618 static int
619-mt7915_tm_clean_hwq(struct mt7915_phy *phy, u8 wcid)
620+mt7915_tm_clean_hwq(struct mt7915_phy *phy)
621 {
622+ struct mt76_testmode_entry_data *ed;
623+ struct mt76_wcid *wcid;
624 struct mt7915_dev *dev = phy->dev;
625 struct mt7915_tm_cmd req = {
626 .testmode_en = 1,
627 .param_idx = MCU_ATE_CLEAN_TXQUEUE,
628- .param.clean.wcid = wcid,
629 .param.clean.band = phy != &dev->phy,
630 };
631
632- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
633- sizeof(req), false);
634+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
635+ int ret;
636+
637+ req.param.clean.wcid = wcid->idx;
638+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL),
639+ &req, sizeof(req), false);
640+ if (ret)
641+ return ret;
642+ }
643+
644+ return 0;
645 }
646
647 static int
developerf64861f2022-06-22 11:44:53 +0800648@@ -182,12 +230,738 @@ mt7915_tm_set_tam_arb(struct mt7915_phy *phy, bool enable, bool mu)
developer4c6b6002022-05-30 16:36:44 +0800649 return mt7915_mcu_set_muru_ctrl(dev, MURU_SET_ARB_OP_MODE, op_mode);
650 }
651
652+static int
653+mt7915_tm_set_cfg(struct mt7915_phy *phy)
654+{
655+ static const u8 cfg_cmd[] = {
656+ [MT76_TM_CFG_TSSI] = MCU_ATE_SET_TSSI,
657+ [MT76_TM_CFG_DPD] = MCU_ATE_SET_DPD,
658+ [MT76_TM_CFG_RATE_POWER_OFFSET] = MCU_ATE_SET_RATE_POWER_OFFSET,
659+ [MT76_TM_CFG_THERMAL_COMP] = MCU_ATE_SET_THERMAL_COMP,
660+ };
661+ struct mt76_testmode_data *td = &phy->mt76->test;
662+ struct mt7915_dev *dev = phy->dev;
663+ struct mt7915_tm_cmd req = {
664+ .testmode_en = !(phy->mt76->test.state == MT76_TM_STATE_OFF),
665+ .param_idx = cfg_cmd[td->cfg.type],
666+ .param.cfg.enable = td->cfg.enable,
667+ .param.cfg.band = phy->band_idx,
668+ };
669+
670+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
671+ sizeof(req), false);
672+}
673+
674+static int
675+mt7915_tm_add_txbf(struct mt7915_phy *phy, struct ieee80211_vif *vif,
676+ struct ieee80211_sta *sta, u8 pfmu_idx, u8 nr,
677+ u8 nc, bool ebf)
678+{
679+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
680+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
681+ struct mt7915_dev *dev = phy->dev;
682+ struct sk_buff *skb;
683+ struct sta_rec_bf *bf;
684+ struct tlv *tlv;
685+ u8 ndp_rate;
686+
687+ if (nr == 1)
688+ ndp_rate = 8;
689+ else if (nr == 2)
690+ ndp_rate = 16;
691+ else
692+ ndp_rate = 24;
693+
694+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
695+ &msta->wcid);
696+ if (IS_ERR(skb))
697+ return PTR_ERR(skb);
698+
699+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
700+ bf = (struct sta_rec_bf *)tlv;
701+
702+ bf->pfmu = cpu_to_le16(pfmu_idx);
703+ bf->sounding_phy = 1;
704+ bf->bf_cap = ebf;
705+ bf->ncol = nc;
706+ bf->nrow = nr;
707+ bf->ndp_rate = ndp_rate;
708+ bf->ibf_timeout = 0xff;
709+ bf->tx_mode = MT_PHY_TYPE_HT;
710+
711+ if (ebf) {
712+ bf->mem[0].row = 0;
713+ bf->mem[1].row = 1;
714+ bf->mem[2].row = 2;
715+ bf->mem[3].row = 3;
716+ } else {
717+ bf->mem[0].row = 4;
718+ bf->mem[1].row = 5;
719+ bf->mem[2].row = 6;
720+ bf->mem[3].row = 7;
721+ }
722+
723+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
724+ MCU_EXT_CMD(STA_REC_UPDATE), true);
725+}
726+
727+static int
728+mt7915_tm_entry_add(struct mt7915_phy *phy, u8 aid)
729+{
730+ struct mt76_testmode_data *td = &phy->mt76->test;
731+ struct mt76_testmode_entry_data *ed;
732+ struct ieee80211_sband_iftype_data *sdata;
733+ struct ieee80211_supported_band *sband;
734+ struct ieee80211_sta *sta;
735+ struct mt7915_sta *msta;
736+ int tid, ret;
737+
738+ if (td->entry_num >= MT76_TM_MAX_ENTRY_NUM)
739+ return -EINVAL;
740+
741+ sta = kzalloc(sizeof(*sta) + phy->mt76->hw->sta_data_size +
742+ sizeof(*ed), GFP_KERNEL);
743+ if (!sta)
744+ return -ENOMEM;
745+
746+ msta = (struct mt7915_sta *)sta->drv_priv;
747+ ed = mt76_testmode_entry_data(phy->mt76, &msta->wcid);
748+ memcpy(ed, &td->ed, sizeof(*ed));
749+
750+ if (phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ) {
751+ sband = &phy->mt76->sband_5g.sband;
752+ sdata = phy->iftype[NL80211_BAND_5GHZ];
753+ } else if (phy->mt76->chandef.chan->band == NL80211_BAND_6GHZ) {
754+ sband = &phy->mt76->sband_6g.sband;
755+ sdata = phy->iftype[NL80211_BAND_6GHZ];
756+ } else {
757+ sband = &phy->mt76->sband_2g.sband;
758+ sdata = phy->iftype[NL80211_BAND_2GHZ];
759+ }
760+
761+ memcpy(sta->addr, ed->addr[0], ETH_ALEN);
762+ if (phy->test.bf_en) {
763+ u8 addr[ETH_ALEN] = {0x00, 0x11, 0x11, 0x11, 0x11, 0x11};
764+
765+ memcpy(sta->addr, addr, ETH_ALEN);
766+ }
767+
768+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_HT)
769+ memcpy(&sta->ht_cap, &sband->ht_cap, sizeof(sta->ht_cap));
770+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_VHT)
771+ memcpy(&sta->vht_cap, &sband->vht_cap, sizeof(sta->vht_cap));
772+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_HE_SU)
773+ memcpy(&sta->he_cap, &sdata[NL80211_IFTYPE_STATION].he_cap,
774+ sizeof(sta->he_cap));
775+ sta->aid = aid;
776+ sta->wme = 1;
777+
778+ ret = mt7915_mac_sta_add(&phy->dev->mt76, phy->monitor_vif, sta);
779+ if (ret) {
780+ kfree(sta);
781+ return ret;
782+ }
783+
784+ /* prevent from starting tx ba session */
785+ for (tid = 0; tid < 8; tid++)
786+ set_bit(tid, &msta->ampdu_state);
787+
788+ list_add_tail(&msta->wcid.list, &td->tm_entry_list);
789+ td->entry_num++;
790+
791+ return 0;
792+}
793+
794+static void
795+mt7915_tm_entry_remove(struct mt7915_phy *phy, u8 aid)
796+{
797+ struct mt76_testmode_data *td = &phy->mt76->test;
798+ struct mt76_wcid *wcid, *tmp;
799+
800+ if (list_empty(&td->tm_entry_list))
801+ return;
802+
803+ list_for_each_entry_safe(wcid, tmp, &td->tm_entry_list, list) {
developerc6f56bb2022-06-14 18:36:30 +0800804+ struct mt76_testmode_entry_data *ed;
developer4c6b6002022-05-30 16:36:44 +0800805+ struct mt7915_dev *dev = phy->dev;
developerc6f56bb2022-06-14 18:36:30 +0800806+ struct ieee80211_sta *sta;
developer4c6b6002022-05-30 16:36:44 +0800807+
developerc6f56bb2022-06-14 18:36:30 +0800808+ ed = mt76_testmode_entry_data(phy->mt76, wcid);
809+ if (aid && ed->aid != aid)
810+ continue;
811+
812+ sta = wcid_to_sta(wcid);
developer4c6b6002022-05-30 16:36:44 +0800813+ mt7915_mac_sta_remove(&dev->mt76, phy->monitor_vif, sta);
814+ mt76_wcid_mask_clear(dev->mt76.wcid_mask, wcid->idx);
815+
816+ list_del_init(&wcid->list);
817+ kfree(sta);
818+ phy->mt76->test.entry_num--;
819+ }
820+}
821+
822+static int
823+mt7915_tm_set_entry(struct mt7915_phy *phy)
824+{
825+ struct mt76_testmode_data *td = &phy->mt76->test;
826+ struct mt76_testmode_entry_data *ed;
827+ struct mt76_wcid *wcid;
828+
829+ if (!td->aid) {
830+ if (td->state > MT76_TM_STATE_IDLE)
831+ mt76_testmode_set_state(phy->mt76, MT76_TM_STATE_IDLE);
832+ mt7915_tm_entry_remove(phy, td->aid);
833+ return 0;
834+ }
835+
836+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
837+ if (ed->aid == td->aid) {
838+ struct sk_buff *skb;
839+
840+ local_bh_disable();
841+ skb = ed->tx_skb;
842+ memcpy(ed, &td->ed, sizeof(*ed));
843+ ed->tx_skb = skb;
844+ local_bh_enable();
845+
846+ return 0;
847+ }
848+ }
849+
850+ return mt7915_tm_entry_add(phy, td->aid);
851+}
852+
developerc6f56bb2022-06-14 18:36:30 +0800853+static void
854+mt7915_tm_update_entry(struct mt7915_phy *phy)
855+{
856+ struct mt76_testmode_data *td = &phy->mt76->test;
857+ struct mt76_testmode_entry_data *ed, tmp;
858+ struct mt76_wcid *wcid, *last;
859+
860+ if (!td->aid || phy->test.bf_en)
861+ return;
862+
863+ memcpy(&tmp, &td->ed, sizeof(tmp));
864+ last = list_last_entry(&td->tm_entry_list,
865+ struct mt76_wcid, list);
866+
867+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
868+ memcpy(&td->ed, ed, sizeof(td->ed));
869+ mt7915_tm_entry_remove(phy, td->aid);
870+ mt7915_tm_entry_add(phy, td->aid);
871+ if (wcid == last)
872+ break;
873+ }
874+
875+ memcpy(&td->ed, &tmp, sizeof(td->ed));
876+}
877+
developer4c6b6002022-05-30 16:36:44 +0800878+static int
879+mt7915_tm_txbf_init(struct mt7915_phy *phy, u16 *val)
880+{
881+ struct mt76_testmode_data *td = &phy->mt76->test;
882+ struct mt7915_dev *dev = phy->dev;
883+ bool enable = val[0];
884+ void *phase_cal, *pfmu_data, *pfmu_tag;
885+ u8 addr[ETH_ALEN] = {0x00, 0x22, 0x22, 0x22, 0x22, 0x22};
886+
887+ if (!enable) {
888+ phy->test.bf_en = 0;
889+ return 0;
890+ }
891+
892+ if (!dev->test.txbf_phase_cal) {
893+ phase_cal = devm_kzalloc(dev->mt76.dev,
894+ sizeof(struct mt7915_tm_txbf_phase) *
895+ MAX_PHASE_GROUP_NUM,
896+ GFP_KERNEL);
897+ if (!phase_cal)
898+ return -ENOMEM;
899+
900+ dev->test.txbf_phase_cal = phase_cal;
901+ }
902+
903+ if (!dev->test.txbf_pfmu_data) {
904+ pfmu_data = devm_kzalloc(dev->mt76.dev, 512, GFP_KERNEL);
905+ if (!pfmu_data)
906+ return -ENOMEM;
907+
908+ dev->test.txbf_pfmu_data = pfmu_data;
909+ }
910+
911+ if (!dev->test.txbf_pfmu_tag) {
912+ pfmu_tag = devm_kzalloc(dev->mt76.dev,
913+ sizeof(struct mt7915_tm_pfmu_tag), GFP_KERNEL);
914+ if (!pfmu_tag)
915+ return -ENOMEM;
916+
917+ dev->test.txbf_pfmu_tag = pfmu_tag;
918+ }
919+
920+ memcpy(phy->monitor_vif->addr, addr, ETH_ALEN);
921+ mt7915_mcu_add_dev_info(phy, phy->monitor_vif, true);
922+
923+ td->tx_rate_mode = MT76_TM_TX_MODE_HT;
924+ td->tx_mpdu_len = 1024;
925+ td->tx_rate_sgi = 0;
926+ td->tx_ipg = 100;
927+ phy->test.bf_en = 1;
928+
929+ return mt7915_tm_set_trx(phy, TM_MAC_TX, true);
930+}
931+
932+static int
933+mt7915_tm_txbf_phase_comp(struct mt7915_phy *phy, u16 *val)
934+{
935+ struct mt7915_dev *dev = phy->dev;
936+ struct {
937+ u8 category;
938+ u8 wlan_idx_lo;
939+ u8 bw;
940+ u8 jp_band;
941+ u8 dbdc_idx;
942+ bool read_from_e2p;
943+ bool disable;
944+ u8 wlan_idx_hi;
945+ u8 buf[40];
946+ } __packed req = {
947+ .category = MT_BF_IBF_PHASE_COMP,
948+ .bw = val[0],
949+ .jp_band = (val[2] == 1) ? 1 : 0,
950+ .dbdc_idx = phy->band_idx,
951+ .read_from_e2p = val[3],
952+ .disable = val[4],
953+ };
954+ struct mt7915_tm_txbf_phase *phase =
955+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
956+
957+ wait_event_timeout(dev->mt76.tx_wait, phase[val[2]].status != 0, HZ);
958+ memcpy(req.buf, &phase[val[2]].phase, sizeof(req.buf));
959+
960+ pr_info("ibf cal process: phase comp info\n");
961+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1,
962+ &req, sizeof(req), 0);
963+
964+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
965+ sizeof(req), true);
966+}
967+
968+static int
969+mt7915_tm_txbf_profile_tag_read(struct mt7915_phy *phy, u8 pfmu_idx)
970+{
971+ struct mt7915_dev *dev = phy->dev;
972+ struct {
973+ u8 format_id;
974+ u8 pfmu_idx;
975+ bool bfer;
976+ u8 dbdc_idx;
977+ } __packed req = {
978+ .format_id = MT_BF_PFMU_TAG_READ,
979+ .pfmu_idx = pfmu_idx,
980+ .bfer = 1,
981+ .dbdc_idx = phy != &dev->phy,
982+ };
983+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
984+
985+ tag->t1.pfmu_idx = 0;
986+
987+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
988+ sizeof(req), true);
989+}
990+
991+static int
992+mt7915_tm_txbf_profile_tag_write(struct mt7915_phy *phy, u8 pfmu_idx,
993+ struct mt7915_tm_pfmu_tag *tag)
994+{
995+ struct mt7915_dev *dev = phy->dev;
996+ struct {
997+ u8 format_id;
998+ u8 pfmu_idx;
999+ bool bfer;
1000+ u8 dbdc_idx;
1001+ u8 buf[64];
1002+ } __packed req = {
1003+ .format_id = MT_BF_PFMU_TAG_WRITE,
1004+ .pfmu_idx = pfmu_idx,
1005+ .bfer = 1,
1006+ .dbdc_idx = phy != &dev->phy,
1007+ };
1008+
1009+ memcpy(req.buf, tag, sizeof(*tag));
1010+ wait_event_timeout(dev->mt76.tx_wait, tag->t1.pfmu_idx != 0, HZ);
1011+
1012+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1013+ sizeof(req), false);
1014+}
1015+
1016+static int
1017+mt7915_tm_txbf_apply_tx(struct mt7915_phy *phy, u16 wlan_idx, bool ebf,
1018+ bool ibf, bool phase_cal)
1019+{
1020+#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
1021+#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
1022+ struct mt7915_dev *dev = phy->dev;
1023+ struct {
1024+ u8 category;
1025+ u8 wlan_idx_lo;
1026+ bool ebf;
1027+ bool ibf;
1028+ bool mu_txbf;
1029+ bool phase_cal;
1030+ u8 wlan_idx_hi;
1031+ u8 _rsv;
1032+ } __packed req = {
1033+ .category = MT_BF_DATA_PACKET_APPLY,
1034+ .wlan_idx_lo = to_wcid_lo(wlan_idx),
1035+ .ebf = ebf,
1036+ .ibf = ibf,
1037+ .phase_cal = phase_cal,
1038+ .wlan_idx_hi = to_wcid_hi(wlan_idx),
1039+ };
1040+
1041+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1042+ sizeof(req), false);
1043+}
1044+
1045+static int mt7915_tm_txbf_set_rate(struct mt7915_phy *phy,
1046+ struct mt76_wcid *wcid)
1047+{
1048+ struct mt7915_dev *dev = phy->dev;
1049+ struct mt76_testmode_entry_data *ed = mt76_testmode_entry_data(phy->mt76, wcid);
1050+ struct ieee80211_sta *sta = wcid_to_sta(wcid);
1051+ struct sta_phy rate = {};
1052+
1053+ if (!sta)
1054+ return 0;
1055+
1056+ rate.type = MT_PHY_TYPE_HT;
1057+ rate.bw = mt7915_tm_chan_bw(phy->mt76->chandef.width);
1058+ rate.nss = ed->tx_rate_nss;
1059+ rate.mcs = ed->tx_rate_idx;
1060+ rate.ldpc = (rate.bw || ed->tx_rate_ldpc) * GENMASK(2, 0);
1061+
1062+ return mt7915_mcu_set_fixed_rate_ctrl(dev, phy->monitor_vif, sta,
1063+ &rate, RATE_PARAM_FIXED);
1064+}
1065+
1066+static int
1067+mt7915_tm_txbf_set_tx(struct mt7915_phy *phy, u16 *val)
1068+{
1069+ bool bf_on = val[0], update = val[3];
1070+ /* u16 wlan_idx = val[2]; */
1071+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
1072+ struct mt76_testmode_data *td = &phy->mt76->test;
1073+ struct mt76_wcid *wcid;
1074+
1075+ if (bf_on) {
1076+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
1077+ mt7915_tm_txbf_profile_tag_read(phy, 2);
1078+ tag->t1.invalid_prof = false;
1079+ mt7915_tm_txbf_profile_tag_write(phy, 2, tag);
1080+
1081+ phy->test.bf_ever_en = true;
1082+
1083+ if (update)
1084+ mt7915_tm_txbf_apply_tx(phy, 1, 0, 1, 1);
1085+ } else {
1086+ if (!phy->test.bf_ever_en) {
1087+ if (update)
1088+ mt7915_tm_txbf_apply_tx(phy, 1, 0, 0, 0);
1089+ } else {
1090+ phy->test.bf_ever_en = false;
1091+
1092+ mt7915_tm_txbf_profile_tag_read(phy, 2);
1093+ tag->t1.invalid_prof = true;
1094+ mt7915_tm_txbf_profile_tag_write(phy, 2, tag);
1095+ }
1096+ }
1097+
1098+ wcid = list_first_entry(&td->tm_entry_list, struct mt76_wcid, list);
1099+ mt7915_tm_txbf_set_rate(phy, wcid);
1100+
1101+ return 0;
1102+}
1103+
1104+static int
1105+mt7915_tm_txbf_profile_update(struct mt7915_phy *phy, u16 *val, bool ebf)
1106+{
1107+ static const u8 mode_to_lm[] = {
1108+ [MT76_TM_TX_MODE_CCK] = 0,
1109+ [MT76_TM_TX_MODE_OFDM] = 0,
1110+ [MT76_TM_TX_MODE_HT] = 1,
1111+ [MT76_TM_TX_MODE_VHT] = 2,
1112+ [MT76_TM_TX_MODE_HE_SU] = 3,
1113+ [MT76_TM_TX_MODE_HE_EXT_SU] = 3,
1114+ [MT76_TM_TX_MODE_HE_TB] = 3,
1115+ [MT76_TM_TX_MODE_HE_MU] = 3,
1116+ };
1117+ struct mt76_testmode_data *td = &phy->mt76->test;
1118+ struct mt76_wcid *wcid;
1119+ struct ieee80211_vif *vif = phy->monitor_vif;
1120+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
1121+ u8 pfmu_idx = val[0], nc = val[2], nr;
1122+ int ret;
1123+
1124+ if (td->tx_antenna_mask == 3)
1125+ nr = 1;
1126+ else if (td->tx_antenna_mask == 7)
1127+ nr = 2;
1128+ else
1129+ nr = 3;
1130+
1131+ memset(tag, 0, sizeof(*tag));
1132+ tag->t1.pfmu_idx = pfmu_idx;
1133+ tag->t1.ebf = ebf;
1134+ tag->t1.nr = nr;
1135+ tag->t1.nc = nc;
1136+ tag->t1.invalid_prof = true;
1137+
1138+ tag->t1.snr_sts4 = 0xc0;
1139+ tag->t1.snr_sts5 = 0xff;
1140+ tag->t1.snr_sts6 = 0xff;
1141+ tag->t1.snr_sts7 = 0xff;
1142+
1143+ if (ebf) {
1144+ tag->t1.row_id1 = 0;
1145+ tag->t1.row_id2 = 1;
1146+ tag->t1.row_id3 = 2;
1147+ tag->t1.row_id4 = 3;
1148+ tag->t1.lm = mode_to_lm[MT76_TM_TX_MODE_HT];
1149+ } else {
1150+ tag->t1.row_id1 = 4;
1151+ tag->t1.row_id2 = 5;
1152+ tag->t1.row_id3 = 6;
1153+ tag->t1.row_id4 = 7;
1154+ tag->t1.lm = mode_to_lm[MT76_TM_TX_MODE_OFDM];
1155+
1156+ tag->t2.ibf_timeout = 0xff;
1157+ tag->t2.ibf_nr = nr;
1158+ }
1159+
1160+ ret = mt7915_tm_txbf_profile_tag_write(phy, pfmu_idx, tag);
1161+ if (ret)
1162+ return ret;
1163+
1164+ wcid = list_first_entry(&td->tm_entry_list, struct mt76_wcid, list);
1165+ ret = mt7915_tm_add_txbf(phy, vif, wcid_to_sta(wcid), pfmu_idx, nr, nc, ebf);
1166+ if (ret)
1167+ return ret;
1168+
1169+ if (!ebf)
1170+ return mt7915_tm_txbf_apply_tx(phy, 1, false, true, true);
1171+
1172+ return 0;
1173+}
1174+
1175+static int
1176+mt7915_tm_txbf_phase_cal(struct mt7915_phy *phy, u16 *val)
1177+{
1178+#define GROUP_L 0
1179+#define GROUP_M 1
1180+#define GROUP_H 2
1181+ struct mt7915_dev *dev = phy->dev;
1182+ struct {
1183+ u8 category;
1184+ u8 group_l_m_n;
1185+ u8 group;
1186+ bool sx2;
1187+ u8 cal_type;
1188+ u8 lna_gain_level;
1189+ u8 _rsv[2];
1190+ } __packed req = {
1191+ .category = MT_BF_PHASE_CAL,
1192+ .group = val[0],
1193+ .group_l_m_n = val[1],
1194+ .sx2 = val[2],
1195+ .cal_type = val[3],
1196+ .lna_gain_level = 0, /* for test purpose */
1197+ };
1198+ struct mt7915_tm_txbf_phase *phase =
1199+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1200+
1201+ phase[req.group].status = 0;
1202+
1203+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1204+ sizeof(req), true);
1205+}
1206+
1207+int mt7915_tm_txbf_status_read(struct mt7915_dev *dev, struct sk_buff *skb)
1208+{
1209+#define BF_PFMU_TAG 16
1210+#define BF_CAL_PHASE 21
1211+ u8 format_id;
1212+
developerf64861f2022-06-22 11:44:53 +08001213+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
developer4c6b6002022-05-30 16:36:44 +08001214+ format_id = *(u8 *)skb->data;
1215+
1216+ if (format_id == BF_PFMU_TAG) {
1217+ struct mt7915_tm_pfmu_tag *tag = dev->test.txbf_pfmu_tag;
1218+
1219+ skb_pull(skb, 8);
1220+ memcpy(tag, skb->data, sizeof(struct mt7915_tm_pfmu_tag));
1221+ } else if (format_id == BF_CAL_PHASE) {
1222+ struct mt7915_tm_ibf_cal_info *cal;
1223+ struct mt7915_tm_txbf_phase *phase =
1224+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1225+
1226+ cal = (struct mt7915_tm_ibf_cal_info *)skb->data;
1227+ switch (cal->cal_type) {
1228+ case IBF_PHASE_CAL_NORMAL:
1229+ case IBF_PHASE_CAL_NORMAL_INSTRUMENT:
1230+ if (cal->group_l_m_n != GROUP_M)
1231+ break;
1232+ phase = &phase[cal->group];
1233+ memcpy(&phase->phase, cal->buf + 16, sizeof(phase->phase));
1234+ phase->status = cal->status;
1235+ break;
1236+ case IBF_PHASE_CAL_VERIFY:
1237+ case IBF_PHASE_CAL_VERIFY_INSTRUMENT:
1238+ break;
1239+ default:
1240+ break;
1241+ }
1242+ }
1243+
1244+ wake_up(&dev->mt76.tx_wait);
1245+
1246+ return 0;
1247+}
1248+
1249+static int
1250+mt7915_tm_txbf_profile_update_all(struct mt7915_phy *phy, u16 *val)
1251+{
1252+ struct mt76_testmode_data *td = &phy->mt76->test;
1253+ u16 pfmu_idx = val[0];
1254+ u16 subc_id = val[1];
1255+ u16 angle11 = val[2];
1256+ u16 angle21 = val[3];
1257+ u16 angle31 = val[4];
1258+ u16 angle41 = val[5];
1259+ s16 phi11 = 0, phi21 = 0, phi31 = 0;
1260+ struct mt7915_tm_pfmu_data *pfmu_data;
1261+
1262+ if (subc_id > 63)
1263+ return -EINVAL;
1264+
1265+ if (td->tx_antenna_mask == 2) {
1266+ phi11 = (s16)(angle21 - angle11);
1267+ } else if (td->tx_antenna_mask == 3) {
1268+ phi11 = (s16)(angle31 - angle11);
1269+ phi21 = (s16)(angle31 - angle21);
1270+ } else {
1271+ phi11 = (s16)(angle41 - angle11);
1272+ phi21 = (s16)(angle41 - angle21);
1273+ phi31 = (s16)(angle41 - angle31);
1274+ }
1275+
1276+ pfmu_data = (struct mt7915_tm_pfmu_data *)phy->dev->test.txbf_pfmu_data;
1277+ pfmu_data = &pfmu_data[subc_id];
1278+
1279+ if (subc_id < 32)
1280+ pfmu_data->subc_idx = cpu_to_le16(subc_id + 224);
1281+ else
1282+ pfmu_data->subc_idx = cpu_to_le16(subc_id - 32);
1283+ pfmu_data->phi11 = cpu_to_le16(phi11);
1284+ pfmu_data->phi21 = cpu_to_le16(phi21);
1285+ pfmu_data->phi31 = cpu_to_le16(phi31);
1286+
1287+ if (subc_id == 63) {
1288+ struct mt7915_dev *dev = phy->dev;
1289+ struct {
1290+ u8 format_id;
1291+ u8 pfmu_idx;
1292+ u8 dbdc_idx;
1293+ u8 _rsv;
1294+ u8 buf[512];
1295+ } __packed req = {
1296+ .format_id = MT_BF_PROFILE_WRITE_ALL,
1297+ .pfmu_idx = pfmu_idx,
1298+ .dbdc_idx = phy != &dev->phy,
1299+ };
1300+
1301+ memcpy(req.buf, dev->test.txbf_pfmu_data, 512);
1302+
1303+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION),
1304+ &req, sizeof(req), true);
1305+ }
1306+
1307+ return 0;
1308+}
1309+
1310+static int
1311+mt7915_tm_txbf_e2p_update(struct mt7915_phy *phy)
1312+{
1313+ struct mt7915_tm_txbf_phase *phase, *p;
1314+ struct mt7915_dev *dev = phy->dev;
1315+ u8 *eeprom = dev->mt76.eeprom.data;
1316+ u16 offset;
1317+ bool is_7976;
1318+ int i;
1319+
1320+ is_7976 = mt7915_check_adie(dev, false) || is_mt7916(&dev->mt76);
1321+ offset = is_7976 ? 0x60a : 0x651;
1322+
1323+ phase = (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1324+ for (i = 0; i < MAX_PHASE_GROUP_NUM; i++) {
1325+ p = &phase[i];
1326+
1327+ if (!p->status)
1328+ continue;
1329+
1330+ /* copy phase cal data to eeprom */
1331+ memcpy(eeprom + offset + i * sizeof(p->phase), &p->phase,
1332+ sizeof(p->phase));
1333+ }
1334+
1335+ return 0;
1336+}
1337+
1338+static int
1339+mt7915_tm_set_txbf(struct mt7915_phy *phy)
1340+{
1341+ struct mt76_testmode_data *td = &phy->mt76->test;
1342+ u16 *val = td->txbf_param;
1343+
1344+ pr_info("ibf cal process: act = %u, val = %u, %u, %u, %u, %u\n",
1345+ td->txbf_act, val[0], val[1], val[2], val[3], val[4]);
1346+
1347+ switch (td->txbf_act) {
1348+ case MT76_TM_TXBF_ACT_INIT:
1349+ return mt7915_tm_txbf_init(phy, val);
1350+ case MT76_TM_TXBF_ACT_UPDATE_CH:
1351+ mt7915_tm_update_channel(phy);
1352+ break;
1353+ case MT76_TM_TXBF_ACT_PHASE_COMP:
1354+ return mt7915_tm_txbf_phase_comp(phy, val);
1355+ case MT76_TM_TXBF_ACT_TX_PREP:
1356+ return mt7915_tm_txbf_set_tx(phy, val);
1357+ case MT76_TM_TXBF_ACT_IBF_PROF_UPDATE:
1358+ return mt7915_tm_txbf_profile_update(phy, val, false);
1359+ case MT76_TM_TXBF_ACT_EBF_PROF_UPDATE:
1360+ return mt7915_tm_txbf_profile_update(phy, val, true);
1361+ case MT76_TM_TXBF_ACT_PHASE_CAL:
1362+ return mt7915_tm_txbf_phase_cal(phy, val);
1363+ case MT76_TM_TXBF_ACT_PROF_UPDATE_ALL:
1364+ return mt7915_tm_txbf_profile_update_all(phy, val);
1365+ case MT76_TM_TXBF_ACT_E2P_UPDATE:
1366+ return mt7915_tm_txbf_e2p_update(phy);
1367+ default:
1368+ break;
1369+ };
1370+
1371+ return 0;
1372+}
1373+
1374 static int
developerf64861f2022-06-22 11:44:53 +08001375 mt7915_tm_set_wmm_qid(struct mt7915_phy *phy, u8 qid, u8 aifs, u8 cw_min,
developer4c6b6002022-05-30 16:36:44 +08001376- u16 cw_max, u16 txop)
1377+ u16 cw_max, u16 txop, u8 tx_cmd)
1378 {
developerf64861f2022-06-22 11:44:53 +08001379 struct mt7915_vif *mvif = (struct mt7915_vif *)phy->monitor_vif->drv_priv;
developer4c6b6002022-05-30 16:36:44 +08001380- struct mt7915_mcu_tx req = { .total = 1 };
1381+ struct mt7915_mcu_tx req = {
1382+ .valid = true,
1383+ .mode = tx_cmd,
1384+ .total = 1,
1385+ };
1386 struct edca *e = &req.edca[0];
1387
developerf64861f2022-06-22 11:44:53 +08001388 e->queue = qid + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS;
1389@@ -262,7 +1036,8 @@ done:
developer4c6b6002022-05-30 16:36:44 +08001390
developerf64861f2022-06-22 11:44:53 +08001391 return mt7915_tm_set_wmm_qid(phy,
developer4c6b6002022-05-30 16:36:44 +08001392 mt76_connac_lmac_mapping(IEEE80211_AC_BE),
1393- aifsn, cw, cw, 0);
1394+ aifsn, cw, cw, 0,
1395+ mode == MT76_TM_TX_MODE_HE_MU);
1396 }
1397
1398 static int
developerf64861f2022-06-22 11:44:53 +08001399@@ -338,7 +1113,7 @@ mt7915_tm_set_tx_len(struct mt7915_phy *phy, u32 tx_time)
developer4c6b6002022-05-30 16:36:44 +08001400 bitrate = cfg80211_calculate_bitrate(&rate);
1401 tx_len = bitrate * tx_time / 10 / 8;
1402
1403- ret = mt76_testmode_alloc_skb(phy->mt76, tx_len);
1404+ ret = mt76_testmode_init_skb(phy->mt76, tx_len, &td->tx_skb, td->addr);
1405 if (ret)
1406 return ret;
1407
developerd59e4772022-07-14 13:48:49 +08001408@@ -456,64 +1231,227 @@ mt7915_tm_init(struct mt7915_phy *phy, bool en)
developer4c6b6002022-05-30 16:36:44 +08001409
1410 phy->mt76->test.flag |= MT_TM_FW_RX_COUNT;
1411
1412- if (!en)
1413+ if (!en) {
1414 mt7915_tm_set_tam_arb(phy, en, 0);
1415+
1416+ phy->mt76->test.aid = 0;
1417+ phy->mt76->test.tx_mpdu_len = 0;
1418+ phy->test.bf_en = 0;
1419+ mt7915_tm_set_entry(phy);
1420+ }
1421+}
1422+
1423+static bool
1424+mt7915_tm_check_skb(struct mt7915_phy *phy)
1425+{
1426+ struct mt76_testmode_entry_data *ed;
1427+ struct mt76_wcid *wcid;
1428+
1429+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
1430+ struct ieee80211_tx_info *info;
1431+
1432+ if (!ed->tx_skb)
1433+ return false;
1434+
1435+ info = IEEE80211_SKB_CB(ed->tx_skb);
1436+ info->control.vif = phy->monitor_vif;
1437+ }
1438+
1439+ return true;
1440+}
1441+
1442+static int
1443+mt7915_tm_set_ba(struct mt7915_phy *phy)
1444+{
1445+ struct mt7915_dev *dev = phy->dev;
1446+ struct mt76_testmode_data *td = &phy->mt76->test;
1447+ struct mt76_wcid *wcid;
1448+ struct ieee80211_vif *vif = phy->monitor_vif;
1449+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1450+ struct ieee80211_ampdu_params params = { .buf_size = 256 };
1451+
1452+ list_for_each_entry(wcid, &td->tm_entry_list, list) {
1453+ int tid, ret;
1454+
1455+ params.sta = wcid_to_sta(wcid);
1456+ for (tid = 0; tid < 8; tid++) {
1457+ params.tid = tid;
1458+ ret = mt7915_mcu_add_tx_ba(phy->dev, &params, true);
1459+ if (ret)
1460+ return ret;
1461+ }
1462+ }
1463+
1464+ mt76_wr(dev, MT_AGG_AALCR0(mvif->mt76.band_idx, mvif->mt76.wmm_idx),
1465+ 0x01010101);
1466+
1467+ return 0;
1468+}
1469+
1470+static int
1471+mt7915_tm_set_muru_cfg(struct mt7915_phy *phy, struct mt7915_tm_muru *muru)
1472+{
1473+/* #define MURU_SET_MANUAL_CFG 100 */
1474+ struct mt7915_dev *dev = phy->dev;
1475+ struct {
1476+ __le32 cmd;
1477+ struct mt7915_tm_muru muru;
1478+ } __packed req = {
1479+ .cmd = cpu_to_le32(MURU_SET_MANUAL_CFG),
1480+ };
1481+
1482+ memcpy(&req.muru, muru, sizeof(struct mt7915_tm_muru));
1483+
1484+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
1485+ sizeof(req), false);
1486+}
1487+
1488+static int
1489+mt7915_tm_set_muru_dl(struct mt7915_phy *phy)
1490+{
1491+ struct mt76_testmode_data *td = &phy->mt76->test;
1492+ struct mt76_testmode_entry_data *ed;
1493+ struct mt76_wcid *wcid;
1494+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
1495+ struct ieee80211_vif *vif = phy->monitor_vif;
1496+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1497+ struct mt7915_tm_muru muru = {};
1498+ struct mt7915_tm_muru_comm *comm = &muru.comm;
1499+ struct mt7915_tm_muru_dl *dl = &muru.dl;
1500+ int i;
1501+
1502+ comm->ppdu_format = MURU_PPDU_HE_MU;
1503+ comm->band = mvif->mt76.band_idx;
1504+ comm->wmm_idx = mvif->mt76.wmm_idx;
1505+ comm->spe_idx = phy->test.spe_idx;
1506+
1507+ dl->bw = mt7915_tm_chan_bw(chandef->width);
1508+ dl->gi = td->tx_rate_sgi;;
1509+ dl->ltf = td->tx_ltf;
1510+ dl->tx_mode = MT_PHY_TYPE_HE_MU;
1511+
1512+ for (i = 0; i < sizeof(dl->ru); i++)
1513+ dl->ru[i] = 0x71;
1514+
1515+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
1516+ struct mt7915_tm_muru_dl_usr *dl_usr = &dl->usr[dl->user_num];
1517+
1518+ dl_usr->wlan_idx = cpu_to_le16(wcid->idx);
1519+ dl_usr->ru_alloc_seg = ed->aid < 8 ? 0 : 1;
1520+ dl_usr->ru_idx = ed->ru_idx;
1521+ dl_usr->mcs = ed->tx_rate_idx;
1522+ dl_usr->nss = ed->tx_rate_nss - 1;
1523+ dl_usr->ldpc = ed->tx_rate_ldpc;
1524+ dl->ru[dl->user_num] = ed->ru_alloc;
1525+
1526+ dl->user_num++;
1527+ }
1528+
1529+ muru.cfg_comm = cpu_to_le32(MURU_COMM_SET);
1530+ muru.cfg_dl = cpu_to_le32(MURU_DL_SET);
1531+
1532+ return mt7915_tm_set_muru_cfg(phy, &muru);
1533+}
1534+
1535+static int
1536+mt7915_tm_set_muru_pkt_cnt(struct mt7915_phy *phy, bool enable, u32 tx_count)
1537+{
1538+#define MURU_SET_TX_PKT_CNT 105
1539+#define MURU_SET_TX_EN 106
1540+ struct mt7915_dev *dev = phy->dev;
1541+ struct {
1542+ __le32 cmd;
1543+ u8 band;
1544+ u8 enable;
1545+ u8 _rsv[2];
1546+ __le32 tx_count;
1547+ } __packed req = {
1548+ .band = phy != &dev->phy,
1549+ .enable = enable,
1550+ .tx_count = enable ? cpu_to_le32(tx_count) : 0,
1551+ };
1552+ int ret;
1553+
1554+ req.cmd = enable ? cpu_to_le32(MURU_SET_TX_PKT_CNT) :
1555+ cpu_to_le32(MURU_SET_TX_EN);
1556+
1557+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
1558+ sizeof(req), false);
1559+ if (ret)
1560+ return ret;
1561+
1562+ req.cmd = enable ? cpu_to_le32(MURU_SET_TX_EN) :
1563+ cpu_to_le32(MURU_SET_TX_PKT_CNT);
1564+
1565+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
1566+ sizeof(req), false);
1567 }
1568
1569 static void
1570-mt7915_tm_update_channel(struct mt7915_phy *phy)
1571+mt7915_tm_tx_frames_mu(struct mt7915_phy *phy, bool enable)
1572 {
1573- mutex_unlock(&phy->dev->mt76.mutex);
1574- mt7915_set_channel(phy);
1575- mutex_lock(&phy->dev->mt76.mutex);
1576+ struct mt76_testmode_data *td = &phy->mt76->test;
1577
1578- mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
1579+ if (enable) {
1580+ struct mt7915_dev *dev = phy->dev;
1581+
1582+ mt7915_tm_set_ba(phy);
1583+ mt7915_tm_set_muru_dl(phy);
1584+ mt76_rr(dev, MT_MIB_DR8(phy != &dev->phy));
1585+ } else {
1586+ /* set to zero for counting real tx free num */
1587+ td->tx_done = 0;
1588+ }
1589+
1590+ mt7915_tm_set_muru_pkt_cnt(phy, enable, td->tx_count);
1591+ usleep_range(100000, 200000);
1592 }
1593
1594 static void
developerd59e4772022-07-14 13:48:49 +08001595 mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
1596 {
developer4c6b6002022-05-30 16:36:44 +08001597 struct mt76_testmode_data *td = &phy->mt76->test;
1598- struct mt7915_dev *dev = phy->dev;
1599- struct ieee80211_tx_info *info;
1600- u8 duty_cycle = td->tx_duty_cycle;
1601- u32 tx_time = td->tx_time;
1602- u32 ipg = td->tx_ipg;
1603
1604 mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
1605- mt7915_tm_clean_hwq(phy, dev->mt76.global_wcid.idx);
1606+ mt7915_tm_set_trx(phy, TM_MAC_TX, false);
1607
1608 if (en) {
1609- mt7915_tm_update_channel(phy);
1610+ u32 tx_time = td->tx_time, ipg = td->tx_ipg;
1611+ u8 duty_cycle = td->tx_duty_cycle;
1612+
1613+ if (!phy->test.bf_en)
1614+ mt7915_tm_update_channel(phy);
1615
developerd59e4772022-07-14 13:48:49 +08001616 if (td->tx_spe_idx)
developer4c6b6002022-05-30 16:36:44 +08001617 phy->test.spe_idx = td->tx_spe_idx;
developerd59e4772022-07-14 13:48:49 +08001618 else
1619 phy->test.spe_idx = mt76_connac_spe_idx(td->tx_antenna_mask);
developer4c6b6002022-05-30 16:36:44 +08001620- }
1621
1622- mt7915_tm_set_tam_arb(phy, en,
1623- td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU);
1624+ /* if all three params are set, duty_cycle will be ignored */
1625+ if (duty_cycle && tx_time && !ipg) {
1626+ ipg = tx_time * 100 / duty_cycle - tx_time;
1627+ } else if (duty_cycle && !tx_time && ipg) {
1628+ if (duty_cycle < 100)
1629+ tx_time = duty_cycle * ipg / (100 - duty_cycle);
1630+ }
1631
1632- /* if all three params are set, duty_cycle will be ignored */
1633- if (duty_cycle && tx_time && !ipg) {
1634- ipg = tx_time * 100 / duty_cycle - tx_time;
1635- } else if (duty_cycle && !tx_time && ipg) {
1636- if (duty_cycle < 100)
1637- tx_time = duty_cycle * ipg / (100 - duty_cycle);
1638- }
1639+ mt7915_tm_set_ipg_params(phy, ipg, td->tx_rate_mode);
1640+ mt7915_tm_set_tx_len(phy, tx_time);
1641
1642- mt7915_tm_set_ipg_params(phy, ipg, td->tx_rate_mode);
1643- mt7915_tm_set_tx_len(phy, tx_time);
1644+ if (ipg)
1645+ td->tx_queued_limit = MT76_TM_TIMEOUT * 1000000 / ipg / 2;
1646
1647- if (ipg)
1648- td->tx_queued_limit = MT76_TM_TIMEOUT * 1000000 / ipg / 2;
1649+ if (!mt7915_tm_check_skb(phy))
1650+ return;
1651+ } else {
1652+ mt7915_tm_clean_hwq(phy);
1653+ }
1654
1655- if (!en || !td->tx_skb)
1656- return;
1657+ mt7915_tm_set_tam_arb(phy, en,
1658+ td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU);
1659
1660- info = IEEE80211_SKB_CB(td->tx_skb);
1661- info->control.vif = phy->monitor_vif;
1662+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
1663+ mt7915_tm_tx_frames_mu(phy, en);
1664
1665 mt7915_tm_set_trx(phy, TM_MAC_TX, en);
1666 }
developerd59e4772022-07-14 13:48:49 +08001667@@ -542,10 +1480,6 @@ mt7915_tm_get_rx_stats(struct mt7915_phy *phy, bool clear)
developer4c6b6002022-05-30 16:36:44 +08001668 return ret;
1669
1670 rs_band = (struct mt7915_tm_rx_stat_band *)skb->data;
1671- /* pr_info("mdrdy_cnt = %d\n", le32_to_cpu(rs_band->mdrdy_cnt)); */
1672- /* pr_info("fcs_err = %d\n", le16_to_cpu(rs_band->fcs_err)); */
1673- /* pr_info("len_mismatch = %d\n", le16_to_cpu(rs_band->len_mismatch)); */
1674- /* pr_info("fcs_ok = %d\n", le16_to_cpu(rs_band->fcs_succ)); */
1675
1676 if (!clear) {
developer1d9fede2022-08-29 15:24:07 +08001677 enum mt76_rxq_id q = req.band ? MT_RXQ_BAND1 : MT_RXQ_MAIN;
developerd59e4772022-07-14 13:48:49 +08001678@@ -560,13 +1494,61 @@ mt7915_tm_get_rx_stats(struct mt7915_phy *phy, bool clear)
developer4c6b6002022-05-30 16:36:44 +08001679 return 0;
1680 }
1681
1682+static int
1683+mt7915_tm_set_rx_user_idx(struct mt7915_phy *phy, u8 aid)
1684+{
1685+ struct mt7915_dev *dev = phy->dev;
1686+ struct mt76_wcid *wcid = NULL;
1687+ struct mt76_testmode_entry_data *ed;
1688+ struct {
1689+ u8 band;
1690+ u8 _rsv;
1691+ __le16 wlan_idx;
1692+ } __packed req = {
1693+ .band = phy->band_idx,
1694+ };
1695+
1696+ mt76_tm_for_each_entry(phy->mt76, wcid, ed)
1697+ if (ed->aid == aid)
1698+ break;
1699+
1700+ if (!wcid)
1701+ return -EINVAL;
1702+
1703+ req.wlan_idx = cpu_to_le16(wcid->idx);
1704+
1705+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_STAT_USER_CTRL),
1706+ &req, sizeof(req), false);
1707+}
1708+
1709+static int
1710+mt7915_tm_set_muru_aid(struct mt7915_phy *phy, u16 aid)
1711+{
1712+ struct mt7915_dev *dev = phy->dev;
1713+ struct mt7915_tm_cmd req = {
1714+ .testmode_en = 1,
1715+ .param_idx = MCU_ATE_SET_MU_RX_AID,
1716+ .param.rx_aid.band = cpu_to_le32(phy->band_idx),
1717+ .param.rx_aid.aid = cpu_to_le16(aid),
1718+ };
1719+
1720+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
1721+ sizeof(req), false);
1722+}
1723+
1724 static void
1725 mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
1726 {
1727+ struct mt76_testmode_data *td = &phy->mt76->test;
1728+
1729+ mt7915_tm_set_trx(phy, TM_MAC_TX, false);
1730 mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
1731
1732 if (en) {
1733- mt7915_tm_update_channel(phy);
1734+ if (!phy->test.bf_en)
1735+ mt7915_tm_update_channel(phy);
1736+ if (td->aid)
1737+ mt7915_tm_set_rx_user_idx(phy, td->aid);
1738
1739 /* read-clear */
1740 mt7915_tm_get_rx_stats(phy, true);
developerd59e4772022-07-14 13:48:49 +08001741@@ -574,9 +1556,12 @@ mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
developer4c6b6002022-05-30 16:36:44 +08001742 /* clear fw count */
1743 mt7915_tm_set_phy_count(phy, 0);
1744 mt7915_tm_set_phy_count(phy, 1);
1745-
1746- mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
1747 }
1748+
1749+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
1750+ mt7915_tm_set_muru_aid(phy, en ? td->aid : 0xf800);
1751+
1752+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
1753 }
1754
1755 static int
developerd59e4772022-07-14 13:48:49 +08001756@@ -613,35 +1598,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
developer4c6b6002022-05-30 16:36:44 +08001757 tx_cont->center_ch = freq1;
1758 tx_cont->tx_ant = td->tx_antenna_mask;
1759 tx_cont->band = phy != &dev->phy;
1760-
1761- switch (chandef->width) {
1762- case NL80211_CHAN_WIDTH_40:
1763- tx_cont->bw = CMD_CBW_40MHZ;
1764- break;
1765- case NL80211_CHAN_WIDTH_80:
1766- tx_cont->bw = CMD_CBW_80MHZ;
1767- break;
1768- case NL80211_CHAN_WIDTH_80P80:
1769- tx_cont->bw = CMD_CBW_8080MHZ;
1770- break;
1771- case NL80211_CHAN_WIDTH_160:
1772- tx_cont->bw = CMD_CBW_160MHZ;
1773- break;
1774- case NL80211_CHAN_WIDTH_5:
1775- tx_cont->bw = CMD_CBW_5MHZ;
1776- break;
1777- case NL80211_CHAN_WIDTH_10:
1778- tx_cont->bw = CMD_CBW_10MHZ;
1779- break;
1780- case NL80211_CHAN_WIDTH_20:
1781- tx_cont->bw = CMD_CBW_20MHZ;
1782- break;
1783- case NL80211_CHAN_WIDTH_20_NOHT:
1784- tx_cont->bw = CMD_CBW_20MHZ;
1785- break;
1786- default:
1787- return -EINVAL;
1788- }
1789+ tx_cont->bw = mt7915_tm_chan_bw(chandef->width);
1790
1791 if (!en) {
1792 req.op.rf.param.func_data = cpu_to_le32(phy != &dev->phy);
developerd59e4772022-07-14 13:48:49 +08001793@@ -725,6 +1682,12 @@ mt7915_tm_update_params(struct mt7915_phy *phy, u32 changed)
developer4c6b6002022-05-30 16:36:44 +08001794 mt7915_tm_set_freq_offset(phy, en, en ? td->freq_offset : 0);
1795 if (changed & BIT(TM_CHANGED_TXPOWER))
1796 mt7915_tm_set_tx_power(phy);
1797+ if (changed & BIT(TM_CHANGED_AID))
1798+ mt7915_tm_set_entry(phy);
1799+ if (changed & BIT(TM_CHANGED_CFG))
1800+ mt7915_tm_set_cfg(phy);
1801+ if (changed & BIT(TM_CHANGED_TXBF_ACT))
1802+ mt7915_tm_set_txbf(phy);
1803 }
1804
1805 static int
developer5e5d6802022-09-01 10:53:06 +08001806@@ -804,6 +1767,7 @@ static int
developer4c6b6002022-05-30 16:36:44 +08001807 mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
1808 {
1809 struct mt7915_phy *phy = mphy->priv;
1810+ struct mt7915_dev *dev = phy->dev;
1811 void *rx, *rssi;
1812 int i;
1813
developer5e5d6802022-09-01 10:53:06 +08001814@@ -849,11 +1813,68 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
developer4c6b6002022-05-30 16:36:44 +08001815
1816 nla_nest_end(msg, rx);
1817
1818+ if (mphy->test.tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
1819+ mphy->test.tx_done += mt76_rr(dev, MT_MIB_DR8(phy != &dev->phy));
1820+
1821 return mt7915_tm_get_rx_stats(phy, false);
1822 }
1823
1824+static int
1825+mt7915_tm_write_back_to_efuse(struct mt7915_dev *dev)
1826+{
1827+ struct mt7915_mcu_eeprom_info req = {};
1828+ u8 *eeprom = dev->mt76.eeprom.data;
1829+ int i, ret = -EINVAL;
1830+
1831+ /* prevent from damaging chip id in efuse */
1832+ if (mt76_chip(&dev->mt76) != get_unaligned_le16(eeprom))
1833+ goto out;
1834+
1835+ for (i = 0; i < mt7915_eeprom_size(dev); i += MT76_TM_EEPROM_BLOCK_SIZE) {
1836+ req.addr = cpu_to_le32(i);
1837+ memcpy(&req.data, eeprom + i, MT76_TM_EEPROM_BLOCK_SIZE);
1838+
1839+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_ACCESS),
1840+ &req, sizeof(req), true);
1841+ if (ret)
1842+ return ret;
1843+ }
1844+
1845+out:
1846+ return ret;
1847+}
1848+
1849+static int
1850+mt7915_tm_set_eeprom(struct mt76_phy *mphy, u32 offset, u8 *val, u8 action)
1851+{
1852+ struct mt7915_phy *phy = mphy->priv;
1853+ struct mt7915_dev *dev = phy->dev;
1854+ u8 *eeprom = dev->mt76.eeprom.data;
1855+ int ret = 0;
1856+
1857+ if (offset >= mt7915_eeprom_size(dev))
1858+ return -EINVAL;
1859+
1860+ switch (action) {
1861+ case MT76_TM_EEPROM_ACTION_UPDATE_DATA:
1862+ memcpy(eeprom + offset, val, MT76_TM_EEPROM_BLOCK_SIZE);
1863+ break;
1864+ case MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE:
1865+ ret = mt7915_mcu_set_eeprom(dev, true);
1866+ break;
1867+ case MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE:
1868+ ret = mt7915_tm_write_back_to_efuse(dev);
1869+ break;
1870+ default:
1871+ break;
1872+ }
1873+
1874+ return ret;
1875+}
1876+
1877 const struct mt76_testmode_ops mt7915_testmode_ops = {
1878 .set_state = mt7915_tm_set_state,
1879 .set_params = mt7915_tm_set_params,
1880 .dump_stats = mt7915_tm_dump_stats,
1881+ .set_eeprom = mt7915_tm_set_eeprom,
1882 };
1883diff --git a/mt7915/testmode.h b/mt7915/testmode.h
developer20747c12022-09-16 14:09:40 +08001884index a1c54c89..01b08e9e 100644
developer4c6b6002022-05-30 16:36:44 +08001885--- a/mt7915/testmode.h
1886+++ b/mt7915/testmode.h
1887@@ -4,6 +4,8 @@
1888 #ifndef __MT7915_TESTMODE_H
1889 #define __MT7915_TESTMODE_H
1890
1891+#include "mcu.h"
1892+
1893 struct mt7915_tm_trx {
1894 u8 type;
1895 u8 enable;
1896@@ -39,6 +41,11 @@ struct mt7915_tm_cfg {
1897 u8 _rsv[2];
1898 };
1899
1900+struct mt7915_tm_mu_rx_aid {
1901+ __le32 band;
1902+ __le16 aid;
1903+};
1904+
1905 struct mt7915_tm_cmd {
1906 u8 testmode_en;
1907 u8 param_idx;
1908@@ -50,6 +57,7 @@ struct mt7915_tm_cmd {
1909 struct mt7915_tm_slot_time slot;
1910 struct mt7915_tm_clean_txq clean;
1911 struct mt7915_tm_cfg cfg;
1912+ struct mt7915_tm_mu_rx_aid rx_aid;
1913 u8 test[72];
1914 } param;
1915 } __packed;
1916@@ -109,6 +117,16 @@ enum {
1917 TAM_ARB_OP_MODE_FORCE_SU = 5,
1918 };
1919
1920+enum {
1921+ TM_CBW_20MHZ,
1922+ TM_CBW_40MHZ,
1923+ TM_CBW_80MHZ,
1924+ TM_CBW_10MHZ,
1925+ TM_CBW_5MHZ,
1926+ TM_CBW_160MHZ,
1927+ TM_CBW_8080MHZ,
1928+};
1929+
1930 struct mt7915_tm_rx_stat_band {
1931 u8 category;
1932
1933@@ -130,4 +148,264 @@ struct mt7915_tm_rx_stat_band {
1934 __le16 mdrdy_cnt_ofdm;
1935 };
1936
1937+struct mt7915_tm_muru_comm {
1938+ u8 ppdu_format;
1939+ u8 sch_type;
1940+ u8 band;
1941+ u8 wmm_idx;
1942+ u8 spe_idx;
1943+ u8 proc_type;
1944+};
1945+
1946+struct mt7915_tm_muru_dl_usr {
1947+ __le16 wlan_idx;
1948+ u8 ru_alloc_seg;
1949+ u8 ru_idx;
1950+ u8 ldpc;
1951+ u8 nss;
1952+ u8 mcs;
1953+ u8 mu_group_idx;
1954+ u8 vht_groud_id;
1955+ u8 vht_up;
1956+ u8 he_start_stream;
1957+ u8 he_mu_spatial;
1958+ u8 ack_policy;
1959+ __le16 tx_power_alpha;
1960+};
1961+
1962+struct mt7915_tm_muru_dl {
1963+ u8 user_num;
1964+ u8 tx_mode;
1965+ u8 bw;
1966+ u8 gi;
1967+ u8 ltf;
1968+ /* sigB */
1969+ u8 mcs;
1970+ u8 dcm;
1971+ u8 cmprs;
1972+
1973+ u8 tx_power;
1974+ u8 ru[8];
1975+ u8 c26[2];
1976+ u8 ack_policy;
1977+
1978+ struct mt7915_tm_muru_dl_usr usr[16];
1979+};
1980+
1981+struct mt7915_tm_muru_ul_usr {
1982+ __le16 wlan_idx;
1983+ u8 ru_alloc;
1984+ u8 ru_idx;
1985+ u8 ldpc;
1986+ u8 nss;
1987+ u8 mcs;
1988+ u8 target_rssi;
1989+ __le32 trig_pkt_size;
1990+};
1991+
1992+struct mt7915_tm_muru_ul {
1993+ u8 user_num;
1994+
1995+ /* UL TX */
1996+ u8 trig_type;
1997+ __le16 trig_cnt;
1998+ __le16 trig_intv;
1999+ u8 bw;
2000+ u8 gi_ltf;
2001+ __le16 ul_len;
2002+ u8 pad;
2003+ u8 trig_ta[ETH_ALEN];
2004+ u8 ru[8];
2005+ u8 c26[2];
2006+
2007+ struct mt7915_tm_muru_ul_usr usr[16];
2008+ /* HE TB RX Debug */
2009+ __le32 rx_hetb_nonsf_en_bitmap;
2010+ __le32 rx_hetb_cfg[2];
2011+
2012+ /* DL TX */
2013+ u8 ba_type;
2014+};
2015+
2016+struct mt7915_tm_muru {
2017+ __le32 cfg_comm;
2018+ __le32 cfg_dl;
2019+ __le32 cfg_ul;
2020+
2021+ struct mt7915_tm_muru_comm comm;
2022+ struct mt7915_tm_muru_dl dl;
2023+ struct mt7915_tm_muru_ul ul;
2024+};
2025+
2026+#define MURU_PPDU_HE_MU BIT(3)
2027+
2028+/* Common Config */
2029+/* #define MURU_COMM_PPDU_FMT BIT(0) */
2030+/* #define MURU_COMM_SCH_TYPE BIT(1) */
2031+/* #define MURU_COMM_BAND BIT(2) */
2032+/* #define MURU_COMM_WMM BIT(3) */
2033+/* #define MURU_COMM_SPE_IDX BIT(4) */
2034+/* #define MURU_COMM_PROC_TYPE BIT(5) */
2035+/* #define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_BAND | \ */
2036+/* MURU_COMM_WMM | MURU_COMM_SPE_IDX) */
2037+/* DL Config */
2038+#define MURU_DL_BW BIT(0)
2039+#define MURU_DL_GI BIT(1)
2040+#define MURU_DL_TX_MODE BIT(2)
2041+#define MURU_DL_TONE_PLAN BIT(3)
2042+#define MURU_DL_USER_CNT BIT(4)
2043+#define MURU_DL_LTF BIT(5)
2044+#define MURU_DL_SIGB_MCS BIT(6)
2045+#define MURU_DL_SIGB_DCM BIT(7)
2046+#define MURU_DL_SIGB_CMPRS BIT(8)
2047+#define MURU_DL_ACK_POLICY BIT(9)
2048+#define MURU_DL_TXPOWER BIT(10)
2049+/* DL Per User Config */
2050+#define MURU_DL_USER_WLAN_ID BIT(16)
2051+#define MURU_DL_USER_COD BIT(17)
2052+#define MURU_DL_USER_MCS BIT(18)
2053+#define MURU_DL_USER_NSS BIT(19)
2054+#define MURU_DL_USER_RU_ALLOC BIT(20)
2055+#define MURU_DL_USER_MUMIMO_GRP BIT(21)
2056+#define MURU_DL_USER_MUMIMO_VHT BIT(22)
2057+#define MURU_DL_USER_ACK_POLICY BIT(23)
2058+#define MURU_DL_USER_MUMIMO_HE BIT(24)
2059+#define MURU_DL_USER_PWR_ALPHA BIT(25)
2060+#define MURU_DL_SET (GENMASK(7, 0) | GENMASK(20, 16) | BIT(25))
2061+
2062+#define MAX_PHASE_GROUP_NUM 9
2063+
2064+struct mt7915_tm_txbf_phase {
2065+ u8 status;
2066+ struct {
2067+ u8 r0_uh;
2068+ u8 r0_h;
2069+ u8 r0_m;
2070+ u8 r0_l;
2071+ u8 r0_ul;
2072+ u8 r1_uh;
2073+ u8 r1_h;
2074+ u8 r1_m;
2075+ u8 r1_l;
2076+ u8 r1_ul;
2077+ u8 r2_uh;
2078+ u8 r2_h;
2079+ u8 r2_m;
2080+ u8 r2_l;
2081+ u8 r2_ul;
2082+ u8 r3_uh;
2083+ u8 r3_h;
2084+ u8 r3_m;
2085+ u8 r3_l;
2086+ u8 r3_ul;
2087+ u8 r2_uh_sx2;
2088+ u8 r2_h_sx2;
2089+ u8 r2_m_sx2;
2090+ u8 r2_l_sx2;
2091+ u8 r2_ul_sx2;
2092+ u8 r3_uh_sx2;
2093+ u8 r3_h_sx2;
2094+ u8 r3_m_sx2;
2095+ u8 r3_l_sx2;
2096+ u8 r3_ul_sx2;
2097+ u8 m_t0_h;
2098+ u8 m_t1_h;
2099+ u8 m_t2_h;
2100+ u8 m_t2_h_sx2;
2101+ u8 r0_reserved;
2102+ u8 r1_reserved;
2103+ u8 r2_reserved;
2104+ u8 r3_reserved;
2105+ u8 r2_sx2_reserved;
2106+ u8 r3_sx2_reserved;
2107+ } phase;
2108+};
2109+
2110+struct mt7915_tm_pfmu_tag1 {
2111+ __le32 pfmu_idx:10;
2112+ __le32 ebf:1;
2113+ __le32 data_bw:2;
2114+ __le32 lm:2;
2115+ __le32 is_mu:1;
2116+ __le32 nr:3, nc:3;
2117+ __le32 codebook:2;
2118+ __le32 ngroup:2;
2119+ __le32 _rsv:2;
2120+ __le32 invalid_prof:1;
2121+ __le32 rmsd:3;
2122+
2123+ __le32 col_id1:6, row_id1:10;
2124+ __le32 col_id2:6, row_id2:10;
2125+ __le32 col_id3:6, row_id3:10;
2126+ __le32 col_id4:6, row_id4:10;
2127+
2128+ __le32 ru_start_id:7;
2129+ __le32 _rsv1:1;
2130+ __le32 ru_end_id:7;
2131+ __le32 _rsv2:1;
2132+ __le32 mob_cal_en:1;
2133+ __le32 _rsv3:15;
2134+
2135+ __le32 snr_sts0:8, snr_sts1:8, snr_sts2:8, snr_sts3:8;
2136+ __le32 snr_sts4:8, snr_sts5:8, snr_sts6:8, snr_sts7:8;
2137+
2138+ __le32 _rsv4;
2139+} __packed;
2140+
2141+struct mt7915_tm_pfmu_tag2 {
2142+ __le32 smart_ant:24;
2143+ __le32 se_idx:5;
2144+ __le32 _rsv:3;
2145+
2146+ __le32 _rsv1:8;
2147+ __le32 rmsd_thres:3;
2148+ __le32 _rsv2:5;
2149+ __le32 ibf_timeout:8;
2150+ __le32 _rsv3:8;
2151+
2152+ __le32 _rsv4:16;
2153+ __le32 ibf_data_bw:2;
2154+ __le32 ibf_nc:3;
2155+ __le32 ibf_nr:3;
2156+ __le32 ibf_ru:8;
2157+
2158+ __le32 mob_delta_t:8;
2159+ __le32 mob_lq_result:7;
2160+ __le32 _rsv5:1;
2161+ __le32 _rsv6:16;
2162+
2163+ __le32 _rsv7;
2164+} __packed;
2165+
2166+struct mt7915_tm_pfmu_tag {
2167+ struct mt7915_tm_pfmu_tag1 t1;
2168+ struct mt7915_tm_pfmu_tag2 t2;
2169+};
2170+
2171+struct mt7915_tm_pfmu_data {
2172+ __le16 subc_idx;
2173+ __le16 phi11;
2174+ __le16 phi21;
2175+ __le16 phi31;
2176+};
2177+
2178+struct mt7915_tm_ibf_cal_info {
2179+ u8 format_id;
2180+ u8 group_l_m_n;
2181+ u8 group;
2182+ bool sx2;
2183+ u8 status;
2184+ u8 cal_type;
2185+ u8 _rsv[2];
2186+ u8 buf[1000];
2187+} __packed;
2188+
2189+enum {
2190+ IBF_PHASE_CAL_UNSPEC,
2191+ IBF_PHASE_CAL_NORMAL,
2192+ IBF_PHASE_CAL_VERIFY,
2193+ IBF_PHASE_CAL_NORMAL_INSTRUMENT,
2194+ IBF_PHASE_CAL_VERIFY_INSTRUMENT,
2195+};
2196+
2197 #endif
2198diff --git a/testmode.c b/testmode.c
developer20747c12022-09-16 14:09:40 +08002199index 1d0d5d30..7a9ed543 100644
developer4c6b6002022-05-30 16:36:44 +08002200--- a/testmode.c
2201+++ b/testmode.c
developere9954402022-07-12 10:15:11 -07002202@@ -27,28 +27,16 @@ const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
developer4c6b6002022-05-30 16:36:44 +08002203 };
2204 EXPORT_SYMBOL_GPL(mt76_tm_policy);
2205
2206-void mt76_testmode_tx_pending(struct mt76_phy *phy)
2207+static void
2208+mt76_testmode_queue_tx(struct mt76_phy *phy, struct mt76_wcid *wcid,
developerf1b69ea2022-07-04 10:54:39 +08002209+ struct sk_buff *skb, struct mt76_queue *q, int qid,
2210+ u16 limit)
developer4c6b6002022-05-30 16:36:44 +08002211 {
2212 struct mt76_testmode_data *td = &phy->test;
2213 struct mt76_dev *dev = phy->dev;
2214- struct mt76_wcid *wcid = &dev->global_wcid;
2215- struct sk_buff *skb = td->tx_skb;
2216- struct mt76_queue *q;
2217- u16 tx_queued_limit;
2218- int qid;
2219-
2220- if (!skb || !td->tx_pending)
2221- return;
2222+ u16 count = limit;
2223
2224- qid = skb_get_queue_mapping(skb);
2225- q = phy->q_tx[qid];
2226-
2227- tx_queued_limit = td->tx_queued_limit ? td->tx_queued_limit : 1000;
2228-
2229- spin_lock_bh(&q->lock);
2230-
2231- while (td->tx_pending > 0 &&
2232- td->tx_queued - td->tx_done < tx_queued_limit &&
2233+ while (td->tx_pending > 0 && count &&
2234 q->queued < q->ndesc / 2) {
2235 int ret;
2236
developere9954402022-07-12 10:15:11 -07002237@@ -57,13 +45,65 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002238 if (ret < 0)
2239 break;
2240
2241+ count--;
2242 td->tx_pending--;
2243 td->tx_queued++;
2244+
2245+ if (td->tx_rate_mode != MT76_TM_TX_MODE_HE_MU)
2246+ if (td->tx_queued - td->tx_done >= limit)
2247+ break;
2248 }
2249
2250 dev->queue_ops->kick(dev, q);
2251+}
2252+
2253+void mt76_testmode_tx_pending(struct mt76_phy *phy)
2254+{
2255+ struct mt76_testmode_data *td = &phy->test;
2256+ struct mt76_testmode_entry_data *ed;
2257+ struct mt76_queue *q;
2258+ int qid;
2259+ u16 tx_queued_limit;
2260+ u32 remain;
2261+ bool is_mu;
2262+
2263+ if (!td->tx_pending)
2264+ return;
2265+
2266+ /* tx_queued_limit = td->tx_queued_limit ?: 100; */
2267+ tx_queued_limit = 100;
2268+
2269+ if (!td->aid) {
2270+ qid = skb_get_queue_mapping(td->tx_skb);
2271+ q = phy->q_tx[qid];
2272+ spin_lock_bh(&q->lock);
2273+ mt76_testmode_queue_tx(phy, &phy->dev->global_wcid,
developerf1b69ea2022-07-04 10:54:39 +08002274+ td->tx_skb, q, qid, tx_queued_limit);
developer4c6b6002022-05-30 16:36:44 +08002275+ spin_unlock_bh(&q->lock);
2276+
2277+ return;
2278+ }
2279+
2280+ is_mu = td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU;
2281+ ed = mt76_testmode_entry_data(phy, td->cur_entry);
2282+ qid = skb_get_queue_mapping(ed->tx_skb);
2283+ q = phy->q_tx[qid];
2284+
2285+ spin_lock_bh(&q->lock);
2286+
2287+ remain = is_mu ? 1 : (td->tx_pending % td->tx_count) ?: td->tx_count;
2288+ if (remain < tx_queued_limit)
2289+ tx_queued_limit = remain;
2290+
developerf1b69ea2022-07-04 10:54:39 +08002291+ mt76_testmode_queue_tx(phy, td->cur_entry, ed->tx_skb, q, qid, tx_queued_limit);
developer4c6b6002022-05-30 16:36:44 +08002292+
2293+ if (td->tx_pending % td->tx_count == 0 || is_mu)
2294+ td->cur_entry = list_next_entry(td->cur_entry, list);
2295
2296 spin_unlock_bh(&q->lock);
2297+
2298+ if (is_mu && td->tx_pending)
2299+ mt76_worker_schedule(&phy->dev->tx_worker);
2300 }
2301
2302 static u32
developere9954402022-07-12 10:15:11 -07002303@@ -89,15 +129,31 @@ mt76_testmode_max_mpdu_len(struct mt76_phy *phy, u8 tx_rate_mode)
developer4c6b6002022-05-30 16:36:44 +08002304 }
2305
2306 static void
2307-mt76_testmode_free_skb(struct mt76_phy *phy)
2308+mt76_testmode_free_skb(struct sk_buff **tx_skb)
2309+{
2310+ if (!(*tx_skb))
2311+ return;
2312+
2313+ dev_kfree_skb(*tx_skb);
2314+ *tx_skb = NULL;
2315+}
2316+
2317+static void
2318+mt76_testmode_free_skb_all(struct mt76_phy *phy)
2319 {
2320 struct mt76_testmode_data *td = &phy->test;
2321+ struct mt76_testmode_entry_data *ed = &td->ed;
2322+ struct mt76_wcid *wcid;
2323+
2324+ mt76_testmode_free_skb(&ed->tx_skb);
2325
2326- dev_kfree_skb(td->tx_skb);
2327- td->tx_skb = NULL;
2328+ mt76_tm_for_each_entry(phy, wcid, ed)
2329+ mt76_testmode_free_skb(&ed->tx_skb);
2330 }
2331
2332-int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
2333+static int
2334+mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len,
2335+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN])
2336 {
2337 #define MT_TXP_MAX_LEN 4095
2338 u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
developer1d9fede2022-08-29 15:24:07 +08002339@@ -118,7 +174,8 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer4c6b6002022-05-30 16:36:44 +08002340 nfrags = len / MT_TXP_MAX_LEN;
2341 head_len = nfrags ? MT_TXP_MAX_LEN : len;
2342
2343- if (len > IEEE80211_MAX_FRAME_LEN)
2344+ if (len > IEEE80211_MAX_FRAME_LEN ||
2345+ td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
2346 fc |= IEEE80211_STYPE_QOS_DATA;
2347
2348 head = alloc_skb(head_len, GFP_KERNEL);
developer1d9fede2022-08-29 15:24:07 +08002349@@ -127,9 +184,9 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer4c6b6002022-05-30 16:36:44 +08002350
developere9954402022-07-12 10:15:11 -07002351 hdr = __skb_put_zero(head, sizeof(*hdr));
developer4c6b6002022-05-30 16:36:44 +08002352 hdr->frame_control = cpu_to_le16(fc);
2353- memcpy(hdr->addr1, td->addr[0], ETH_ALEN);
2354- memcpy(hdr->addr2, td->addr[1], ETH_ALEN);
2355- memcpy(hdr->addr3, td->addr[2], ETH_ALEN);
2356+ memcpy(hdr->addr1, addr[0], ETH_ALEN);
2357+ memcpy(hdr->addr2, addr[1], ETH_ALEN);
2358+ memcpy(hdr->addr3, addr[2], ETH_ALEN);
2359 skb_set_queue_mapping(head, IEEE80211_AC_BE);
developere9954402022-07-12 10:15:11 -07002360 get_random_bytes(__skb_put(head, head_len - sizeof(*hdr)),
2361 head_len - sizeof(*hdr));
developer1d9fede2022-08-29 15:24:07 +08002362@@ -153,7 +210,7 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer4c6b6002022-05-30 16:36:44 +08002363
2364 frag = alloc_skb(frag_len, GFP_KERNEL);
2365 if (!frag) {
2366- mt76_testmode_free_skb(phy);
2367+ mt76_testmode_free_skb(tx_skb);
2368 dev_kfree_skb(head);
2369 return -ENOMEM;
2370 }
developer1d9fede2022-08-29 15:24:07 +08002371@@ -166,15 +223,14 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer4c6b6002022-05-30 16:36:44 +08002372 frag_tail = &(*frag_tail)->next;
2373 }
2374
2375- mt76_testmode_free_skb(phy);
2376- td->tx_skb = head;
2377+ mt76_testmode_free_skb(tx_skb);
2378+ *tx_skb = head;
2379
2380 return 0;
2381 }
2382-EXPORT_SYMBOL(mt76_testmode_alloc_skb);
2383
2384-static int
2385-mt76_testmode_tx_init(struct mt76_phy *phy)
2386+int mt76_testmode_init_skb(struct mt76_phy *phy, u32 len,
2387+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN])
2388 {
2389 struct mt76_testmode_data *td = &phy->test;
2390 struct ieee80211_tx_info *info;
developer1d9fede2022-08-29 15:24:07 +08002391@@ -182,7 +238,7 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002392 u8 max_nss = hweight8(phy->antenna_mask);
2393 int ret;
2394
2395- ret = mt76_testmode_alloc_skb(phy, td->tx_mpdu_len);
2396+ ret = mt76_testmode_alloc_skb(phy, len, tx_skb, addr);
2397 if (ret)
2398 return ret;
2399
developer1d9fede2022-08-29 15:24:07 +08002400@@ -192,7 +248,7 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002401 if (td->tx_antenna_mask)
2402 max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask));
2403
2404- info = IEEE80211_SKB_CB(td->tx_skb);
2405+ info = IEEE80211_SKB_CB(*tx_skb);
2406 rate = &info->control.rates[0];
2407 rate->count = 1;
2408 rate->idx = td->tx_rate_idx;
developer1d9fede2022-08-29 15:24:07 +08002409@@ -264,6 +320,25 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002410 out:
2411 return 0;
2412 }
2413+EXPORT_SYMBOL(mt76_testmode_init_skb);
2414+
2415+static int
2416+mt76_testmode_tx_init(struct mt76_phy *phy)
2417+{
2418+ struct mt76_testmode_entry_data *ed;
2419+ struct mt76_wcid *wcid;
2420+
2421+ mt76_tm_for_each_entry(phy, wcid, ed) {
2422+ int ret;
2423+
2424+ ret = mt76_testmode_init_skb(phy, ed->tx_mpdu_len,
2425+ &ed->tx_skb, ed->addr);
2426+ if (ret)
2427+ return ret;
2428+ }
2429+
2430+ return 0;
2431+}
2432
2433 static void
2434 mt76_testmode_tx_start(struct mt76_phy *phy)
developer1d9fede2022-08-29 15:24:07 +08002435@@ -274,6 +349,14 @@ mt76_testmode_tx_start(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002436 td->tx_queued = 0;
2437 td->tx_done = 0;
2438 td->tx_pending = td->tx_count;
2439+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
2440+ td->tx_pending = 1;
2441+ if (td->entry_num) {
2442+ td->tx_pending *= td->entry_num;
2443+ td->cur_entry = list_first_entry(&td->tm_entry_list,
2444+ struct mt76_wcid, list);
2445+ }
2446+
2447 mt76_worker_schedule(&dev->tx_worker);
2448 }
2449
developer1d9fede2022-08-29 15:24:07 +08002450@@ -292,7 +375,7 @@ mt76_testmode_tx_stop(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002451 wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued,
2452 MT76_TM_TIMEOUT * HZ);
2453
2454- mt76_testmode_free_skb(phy);
2455+ mt76_testmode_free_skb_all(phy);
2456 }
2457
2458 static inline void
developer1d9fede2022-08-29 15:24:07 +08002459@@ -323,6 +406,8 @@ mt76_testmode_init_defaults(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002460 memcpy(td->addr[0], phy->macaddr, ETH_ALEN);
2461 memcpy(td->addr[1], phy->macaddr, ETH_ALEN);
2462 memcpy(td->addr[2], phy->macaddr, ETH_ALEN);
2463+
2464+ INIT_LIST_HEAD(&phy->test.tm_entry_list);
2465 }
2466
2467 static int
developer1d9fede2022-08-29 15:24:07 +08002468@@ -332,8 +417,12 @@ __mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state)
developer4c6b6002022-05-30 16:36:44 +08002469 struct mt76_dev *dev = phy->dev;
2470 int err;
2471
2472- if (prev_state == MT76_TM_STATE_TX_FRAMES)
2473+ if (prev_state == MT76_TM_STATE_TX_FRAMES) {
2474+ /* MU needs to clean hwq for free done event */
2475+ if (phy->test.tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
2476+ dev->test_ops->set_state(phy, MT76_TM_STATE_IDLE);
2477 mt76_testmode_tx_stop(phy);
2478+ }
2479
2480 if (state == MT76_TM_STATE_TX_FRAMES) {
2481 err = mt76_testmode_tx_init(phy);
developer1d9fede2022-08-29 15:24:07 +08002482@@ -403,6 +492,44 @@ mt76_tm_get_u8(struct nlattr *attr, u8 *dest, u8 min, u8 max)
developer4c6b6002022-05-30 16:36:44 +08002483 return 0;
2484 }
2485
2486+static int
2487+mt76_testmode_set_eeprom(struct mt76_phy *phy, struct nlattr **tb)
2488+{
2489+ struct mt76_dev *dev = phy->dev;
2490+ u8 action, val[MT76_TM_EEPROM_BLOCK_SIZE];
2491+ u32 offset = 0;
2492+ int err = -EINVAL;
2493+
2494+ if (!dev->test_ops->set_eeprom)
2495+ return -EOPNOTSUPP;
2496+
2497+ if (mt76_tm_get_u8(tb[MT76_TM_ATTR_EEPROM_ACTION], &action,
2498+ 0, MT76_TM_EEPROM_ACTION_MAX))
2499+ goto out;
2500+
2501+ if (tb[MT76_TM_ATTR_EEPROM_OFFSET]) {
2502+ struct nlattr *cur;
2503+ int rem, idx = 0;
2504+
2505+ offset = nla_get_u32(tb[MT76_TM_ATTR_EEPROM_OFFSET]);
2506+ if (!!(offset % MT76_TM_EEPROM_BLOCK_SIZE) ||
2507+ !tb[MT76_TM_ATTR_EEPROM_VAL])
2508+ goto out;
2509+
2510+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_EEPROM_VAL], rem) {
2511+ if (nla_len(cur) != 1 || idx >= ARRAY_SIZE(val))
2512+ goto out;
2513+
2514+ val[idx++] = nla_get_u8(cur);
2515+ }
2516+ }
2517+
2518+ err = dev->test_ops->set_eeprom(phy, offset, val, action);
2519+
2520+out:
2521+ return err;
2522+}
2523+
2524 int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2525 void *data, int len)
2526 {
developer1d9fede2022-08-29 15:24:07 +08002527@@ -426,6 +553,11 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer4c6b6002022-05-30 16:36:44 +08002528
2529 mutex_lock(&dev->mutex);
2530
2531+ if (tb[MT76_TM_ATTR_EEPROM_ACTION]) {
2532+ err = mt76_testmode_set_eeprom(phy, tb);
2533+ goto out;
2534+ }
2535+
2536 if (tb[MT76_TM_ATTR_RESET]) {
2537 mt76_testmode_set_state(phy, MT76_TM_STATE_OFF);
2538 memset(td, 0, sizeof(*td));
developer5ce5ea42022-08-31 14:12:29 +08002539@@ -452,7 +584,10 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer4c6b6002022-05-30 16:36:44 +08002540 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_DUTY_CYCLE],
2541 &td->tx_duty_cycle, 0, 99) ||
2542 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_POWER_CONTROL],
2543- &td->tx_power_control, 0, 1))
2544+ &td->tx_power_control, 0, 1) ||
2545+ mt76_tm_get_u8(tb[MT76_TM_ATTR_AID], &td->aid, 0, 16) ||
2546+ mt76_tm_get_u8(tb[MT76_TM_ATTR_RU_ALLOC], &td->ru_alloc, 0, 0xff) ||
2547+ mt76_tm_get_u8(tb[MT76_TM_ATTR_RU_IDX], &td->ru_idx, 0, 68))
2548 goto out;
2549
2550 if (tb[MT76_TM_ATTR_TX_LENGTH]) {
developer5ce5ea42022-08-31 14:12:29 +08002551@@ -484,8 +619,7 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer4c6b6002022-05-30 16:36:44 +08002552
2553 if (tb[MT76_TM_ATTR_TX_POWER]) {
2554 struct nlattr *cur;
2555- int idx = 0;
2556- int rem;
2557+ int rem, idx = 0;
2558
2559 nla_for_each_nested(cur, tb[MT76_TM_ATTR_TX_POWER], rem) {
2560 if (nla_len(cur) != 1 ||
developer5ce5ea42022-08-31 14:12:29 +08002561@@ -505,11 +639,45 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer4c6b6002022-05-30 16:36:44 +08002562 if (nla_len(cur) != ETH_ALEN || idx >= 3)
2563 goto out;
2564
2565- memcpy(td->addr[idx], nla_data(cur), ETH_ALEN);
2566+ memcpy(td->addr[idx++], nla_data(cur), ETH_ALEN);
2567+ }
2568+ }
2569+
2570+ if (tb[MT76_TM_ATTR_CFG]) {
2571+ struct nlattr *cur;
2572+ int rem, idx = 0;
2573+
2574+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_CFG], rem) {
2575+ if (nla_len(cur) != 1 || idx >= 2)
2576+ goto out;
2577+
2578+ if (idx == 0)
2579+ td->cfg.type = nla_get_u8(cur);
2580+ else
2581+ td->cfg.enable = nla_get_u8(cur);
2582 idx++;
2583 }
2584 }
2585
2586+ if (tb[MT76_TM_ATTR_TXBF_ACT]) {
2587+ struct nlattr *cur;
2588+ int rem, idx = 0;
2589+
2590+ if (!tb[MT76_TM_ATTR_TXBF_PARAM] ||
2591+ mt76_tm_get_u8(tb[MT76_TM_ATTR_TXBF_ACT], &td->txbf_act,
2592+ 0, MT76_TM_TXBF_ACT_MAX))
2593+ goto out;
2594+
2595+ memset(td->txbf_param, 0, sizeof(td->txbf_param));
2596+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_TXBF_PARAM], rem) {
2597+ if (nla_len(cur) != 2 ||
2598+ idx >= ARRAY_SIZE(td->txbf_param))
2599+ goto out;
2600+
2601+ td->txbf_param[idx++] = nla_get_u16(cur);
2602+ }
2603+ }
2604+
2605 if (dev->test_ops->set_params) {
2606 err = dev->test_ops->set_params(phy, tb, state);
2607 if (err)
developer5ce5ea42022-08-31 14:12:29 +08002608@@ -574,6 +742,7 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer4c6b6002022-05-30 16:36:44 +08002609 struct mt76_phy *phy = hw->priv;
2610 struct mt76_dev *dev = phy->dev;
2611 struct mt76_testmode_data *td = &phy->test;
2612+ struct mt76_testmode_entry_data *ed = &td->ed;
2613 struct nlattr *tb[NUM_MT76_TM_ATTRS] = {};
2614 int err = 0;
2615 void *a;
developer5ce5ea42022-08-31 14:12:29 +08002616@@ -606,6 +775,19 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer4c6b6002022-05-30 16:36:44 +08002617 goto out;
2618 }
2619
2620+ if (tb[MT76_TM_ATTR_AID]) {
2621+ struct mt76_wcid *wcid;
2622+ u8 aid;
2623+
2624+ err = mt76_tm_get_u8(tb[MT76_TM_ATTR_AID], &aid, 1, 16);
2625+ if (err)
2626+ goto out;
2627+
2628+ mt76_tm_for_each_entry(phy, wcid, ed)
2629+ if (ed->aid == aid)
2630+ ed = mt76_testmode_entry_data(phy, wcid);
2631+ }
2632+
2633 mt76_testmode_init_defaults(phy);
2634
2635 err = -EMSGSIZE;
developer5ce5ea42022-08-31 14:12:29 +08002636@@ -618,12 +800,8 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer4c6b6002022-05-30 16:36:44 +08002637 goto out;
2638
2639 if (nla_put_u32(msg, MT76_TM_ATTR_TX_COUNT, td->tx_count) ||
2640- nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_mpdu_len) ||
2641 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_MODE, td->tx_rate_mode) ||
2642- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, td->tx_rate_nss) ||
2643- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, td->tx_rate_idx) ||
2644 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_SGI, td->tx_rate_sgi) ||
2645- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, td->tx_rate_ldpc) ||
2646 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_STBC, td->tx_rate_stbc) ||
2647 (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_LTF) &&
2648 nla_put_u8(msg, MT76_TM_ATTR_TX_LTF, td->tx_ltf)) ||
developer5ce5ea42022-08-31 14:12:29 +08002649@@ -643,6 +821,15 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer4c6b6002022-05-30 16:36:44 +08002650 nla_put_u8(msg, MT76_TM_ATTR_FREQ_OFFSET, td->freq_offset)))
2651 goto out;
2652
2653+ if (nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, ed->tx_mpdu_len) ||
2654+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, ed->tx_rate_nss) ||
2655+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, ed->tx_rate_idx) ||
2656+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, ed->tx_rate_ldpc) ||
2657+ nla_put_u8(msg, MT76_TM_ATTR_AID, ed->aid) ||
2658+ nla_put_u8(msg, MT76_TM_ATTR_RU_ALLOC, ed->ru_alloc) ||
2659+ nla_put_u8(msg, MT76_TM_ATTR_RU_IDX, ed->ru_idx))
2660+ goto out;
2661+
2662 if (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER)) {
2663 a = nla_nest_start(msg, MT76_TM_ATTR_TX_POWER);
2664 if (!a)
2665diff --git a/testmode.h b/testmode.h
developer20747c12022-09-16 14:09:40 +08002666index 89613266..57949f2b 100644
developer4c6b6002022-05-30 16:36:44 +08002667--- a/testmode.h
2668+++ b/testmode.h
2669@@ -6,6 +6,8 @@
2670 #define __MT76_TESTMODE_H
2671
2672 #define MT76_TM_TIMEOUT 10
2673+#define MT76_TM_MAX_ENTRY_NUM 16
2674+#define MT76_TM_EEPROM_BLOCK_SIZE 16
2675
2676 /**
2677 * enum mt76_testmode_attr - testmode attributes inside NL80211_ATTR_TESTDATA
2678@@ -47,6 +49,15 @@
2679 * @MT76_TM_ATTR_DRV_DATA: driver specific netlink attrs (nested)
2680 *
2681 * @MT76_TM_ATTR_MAC_ADDRS: array of nested MAC addresses (nested)
2682+ *
2683+ * @MT76_TM_ATTR_EEPROM_ACTION: eeprom setting actions
2684+ * (u8, see &enum mt76_testmode_eeprom_action)
2685+ * @MT76_TM_ATTR_EEPROM_OFFSET: offset of eeprom data block for writing (u32)
2686+ * @MT76_TM_ATTR_EEPROM_VAL: values for writing into a 16-byte data block
2687+ * (nested, u8 attrs)
2688+ *
2689+ * @MT76_TM_ATTR_CFG: config testmode rf feature (nested, see &mt76_testmode_cfg)
2690+ *
2691 */
2692 enum mt76_testmode_attr {
2693 MT76_TM_ATTR_UNSPEC,
2694@@ -84,6 +95,17 @@ enum mt76_testmode_attr {
2695 MT76_TM_ATTR_DRV_DATA,
2696
2697 MT76_TM_ATTR_MAC_ADDRS,
2698+ MT76_TM_ATTR_AID,
2699+ MT76_TM_ATTR_RU_ALLOC,
2700+ MT76_TM_ATTR_RU_IDX,
2701+
2702+ MT76_TM_ATTR_EEPROM_ACTION,
2703+ MT76_TM_ATTR_EEPROM_OFFSET,
2704+ MT76_TM_ATTR_EEPROM_VAL,
2705+
2706+ MT76_TM_ATTR_CFG,
2707+ MT76_TM_ATTR_TXBF_ACT,
2708+ MT76_TM_ATTR_TXBF_PARAM,
2709
2710 /* keep last */
2711 NUM_MT76_TM_ATTRS,
2712@@ -198,4 +220,57 @@ enum mt76_testmode_tx_mode {
2713
2714 extern const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS];
2715
2716+/**
2717+ * enum mt76_testmode_eeprom_action - eeprom setting actions
2718+ *
2719+ * @MT76_TM_EEPROM_ACTION_UPDATE_DATA: update rf values to specific
2720+ * eeprom data block
2721+ * @MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE: send updated eeprom data to fw
2722+ * @MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE: write eeprom data back to efuse
2723+ */
2724+enum mt76_testmode_eeprom_action {
2725+ MT76_TM_EEPROM_ACTION_UPDATE_DATA,
2726+ MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE,
2727+ MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE,
2728+
2729+ /* keep last */
2730+ NUM_MT76_TM_EEPROM_ACTION,
2731+ MT76_TM_EEPROM_ACTION_MAX = NUM_MT76_TM_EEPROM_ACTION - 1,
2732+};
2733+
2734+/**
2735+ * enum mt76_testmode_cfg - packet tx phy mode
2736+ *
2737+ * @MT76_TM_EEPROM_ACTION_UPDATE_DATA: update rf values to specific
2738+ * eeprom data block
2739+ * @MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE: send updated eeprom data to fw
2740+ * @MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE: write eeprom data back to efuse
2741+ */
2742+enum mt76_testmode_cfg {
2743+ MT76_TM_CFG_TSSI,
2744+ MT76_TM_CFG_DPD,
2745+ MT76_TM_CFG_RATE_POWER_OFFSET,
2746+ MT76_TM_CFG_THERMAL_COMP,
2747+
2748+ /* keep last */
2749+ NUM_MT76_TM_CFG,
2750+ MT76_TM_CFG_MAX = NUM_MT76_TM_CFG - 1,
2751+};
2752+
2753+enum mt76_testmode_txbf_act {
2754+ MT76_TM_TXBF_ACT_INIT,
2755+ MT76_TM_TXBF_ACT_UPDATE_CH,
2756+ MT76_TM_TXBF_ACT_PHASE_COMP,
2757+ MT76_TM_TXBF_ACT_TX_PREP,
2758+ MT76_TM_TXBF_ACT_IBF_PROF_UPDATE,
2759+ MT76_TM_TXBF_ACT_EBF_PROF_UPDATE,
2760+ MT76_TM_TXBF_ACT_PHASE_CAL,
2761+ MT76_TM_TXBF_ACT_PROF_UPDATE_ALL,
2762+ MT76_TM_TXBF_ACT_E2P_UPDATE,
2763+
2764+ /* keep last */
2765+ NUM_MT76_TM_TXBF_ACT,
2766+ MT76_TM_TXBF_ACT_MAX = NUM_MT76_TM_TXBF_ACT - 1,
2767+};
2768+
2769 #endif
2770diff --git a/tools/fields.c b/tools/fields.c
developer20747c12022-09-16 14:09:40 +08002771index e3f69089..6e36ab27 100644
developer4c6b6002022-05-30 16:36:44 +08002772--- a/tools/fields.c
2773+++ b/tools/fields.c
2774@@ -10,6 +10,7 @@ static const char * const testmode_state[] = {
2775 [MT76_TM_STATE_IDLE] = "idle",
2776 [MT76_TM_STATE_TX_FRAMES] = "tx_frames",
2777 [MT76_TM_STATE_RX_FRAMES] = "rx_frames",
2778+ [MT76_TM_STATE_TX_CONT] = "tx_cont",
2779 };
2780
2781 static const char * const testmode_tx_mode[] = {
2782@@ -201,6 +202,63 @@ static void print_extra_stats(const struct tm_field *field, struct nlattr **tb)
2783 printf("%srx_per=%.02f%%\n", prefix, 100 * failed / total);
2784 }
2785
2786+static bool parse_mac(const struct tm_field *field, int idx,
2787+ struct nl_msg *msg, const char *val)
2788+{
2789+#define ETH_ALEN 6
2790+ bool ret = true;
2791+ char *str, *cur, *ap;
2792+ void *a;
2793+
2794+ ap = str = strdup(val);
2795+
2796+ a = nla_nest_start(msg, idx);
2797+
2798+ idx = 0;
2799+ while ((cur = strsep(&ap, ",")) != NULL) {
2800+ unsigned char addr[ETH_ALEN];
2801+ char *val, *tmp = cur;
2802+ int i = 0;
2803+
2804+ while ((val = strsep(&tmp, ":")) != NULL) {
2805+ if (i >= ETH_ALEN)
2806+ break;
2807+
2808+ addr[i++] = strtoul(val, NULL, 16);
2809+ }
2810+
2811+ nla_put(msg, idx, ETH_ALEN, addr);
2812+
2813+ idx++;
2814+ }
2815+
2816+ nla_nest_end(msg, a);
2817+
2818+ free(str);
2819+
2820+ return ret;
2821+}
2822+
2823+static void print_mac(const struct tm_field *field, struct nlattr *attr)
2824+{
2825+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
2826+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
2827+ unsigned char addr[3][6];
2828+ struct nlattr *cur;
2829+ int idx = 0;
2830+ int rem;
2831+
2832+ nla_for_each_nested(cur, attr, rem) {
2833+ if (nla_len(cur) != 6)
2834+ continue;
2835+ memcpy(addr[idx++], nla_data(cur), 6);
2836+ }
2837+
2838+ printf("" MACSTR "," MACSTR "," MACSTR "",
2839+ MAC2STR(addr[0]), MAC2STR(addr[1]), MAC2STR(addr[2]));
2840+
2841+ return;
2842+}
2843
2844 #define FIELD_GENERIC(_field, _name, ...) \
2845 [FIELD_NAME(_field)] = { \
2846@@ -250,6 +308,13 @@ static void print_extra_stats(const struct tm_field *field, struct nlattr **tb)
2847 ##__VA_ARGS__ \
2848 )
2849
2850+#define FIELD_MAC(_field, _name) \
2851+ [FIELD_NAME(_field)] = { \
2852+ .name = _name, \
2853+ .parse = parse_mac, \
2854+ .print = print_mac \
2855+ }
2856+
2857 #define FIELD_NAME(_field) MT76_TM_RX_ATTR_##_field
2858 static const struct tm_field rx_fields[NUM_MT76_TM_RX_ATTRS] = {
2859 FIELD_RO(s32, FREQ_OFFSET, "freq_offset"),
2860@@ -300,10 +365,18 @@ static const struct tm_field testdata_fields[NUM_MT76_TM_ATTRS] = {
2861 FIELD(u8, TX_RATE_LDPC, "tx_rate_ldpc"),
2862 FIELD(u8, TX_RATE_STBC, "tx_rate_stbc"),
2863 FIELD(u8, TX_LTF, "tx_ltf"),
2864+ FIELD(u8, TX_DUTY_CYCLE, "tx_duty_cycle"),
2865+ FIELD(u32, TX_IPG, "tx_ipg"),
2866+ FIELD(u32, TX_TIME, "tx_time"),
2867 FIELD(u8, TX_POWER_CONTROL, "tx_power_control"),
2868 FIELD_ARRAY(u8, TX_POWER, "tx_power"),
2869 FIELD(u8, TX_ANTENNA, "tx_antenna"),
2870+ FIELD(u8, TX_SPE_IDX, "tx_spe_idx"),
2871 FIELD(u32, FREQ_OFFSET, "freq_offset"),
2872+ FIELD(u8, AID, "aid"),
2873+ FIELD(u8, RU_ALLOC, "ru_alloc"),
2874+ FIELD(u8, RU_IDX, "ru_idx"),
2875+ FIELD_MAC(MAC_ADDRS, "mac_addrs"),
2876 FIELD_NESTED_RO(STATS, stats, "",
2877 .print_extra = print_extra_stats),
2878 };
2879@@ -322,9 +395,16 @@ static struct nla_policy testdata_policy[NUM_MT76_TM_ATTRS] = {
2880 [MT76_TM_ATTR_TX_RATE_LDPC] = { .type = NLA_U8 },
2881 [MT76_TM_ATTR_TX_RATE_STBC] = { .type = NLA_U8 },
2882 [MT76_TM_ATTR_TX_LTF] = { .type = NLA_U8 },
2883+ [MT76_TM_ATTR_TX_DUTY_CYCLE] = { .type = NLA_U8 },
2884+ [MT76_TM_ATTR_TX_IPG] = { .type = NLA_U32 },
2885+ [MT76_TM_ATTR_TX_TIME] = { .type = NLA_U32 },
2886 [MT76_TM_ATTR_TX_POWER_CONTROL] = { .type = NLA_U8 },
2887 [MT76_TM_ATTR_TX_ANTENNA] = { .type = NLA_U8 },
2888+ [MT76_TM_ATTR_TX_SPE_IDX] = { .type = NLA_U8 },
2889 [MT76_TM_ATTR_FREQ_OFFSET] = { .type = NLA_U32 },
2890+ [MT76_TM_ATTR_AID] = { .type = NLA_U8 },
2891+ [MT76_TM_ATTR_RU_ALLOC] = { .type = NLA_U8 },
2892+ [MT76_TM_ATTR_RU_IDX] = { .type = NLA_U8 },
2893 [MT76_TM_ATTR_STATS] = { .type = NLA_NESTED },
2894 };
2895
2896diff --git a/tx.c b/tx.c
developer20747c12022-09-16 14:09:40 +08002897index 65e2b7c1..8b33186b 100644
developer4c6b6002022-05-30 16:36:44 +08002898--- a/tx.c
2899+++ b/tx.c
2900@@ -245,8 +245,7 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *
2901 if (mt76_is_testmode_skb(dev, skb, &hw)) {
2902 struct mt76_phy *phy = hw->priv;
2903
2904- if (skb == phy->test.tx_skb)
2905- phy->test.tx_done++;
2906+ phy->test.tx_done++;
2907 if (phy->test.tx_queued == phy->test.tx_done)
2908 wake_up(&dev->tx_wait);
2909
2910--
developer20747c12022-09-16 14:09:40 +080029112.25.1
developer4c6b6002022-05-30 16:36:44 +08002912