blob: 1d1658088823cae0b9164f62e68a99ec1bbc90ea [file] [log] [blame]
developer9851a292022-12-15 17:33:43 +08001From 2da3e0606e1d6f6f4a31737bd0a2e154d92f3da6 Mon Sep 17 00:00:00 2001
developer4c6b6002022-05-30 16:36:44 +08002From: Shayne Chen <shayne.chen@mediatek.com>
3Date: Thu, 21 Apr 2022 15:43:19 +0800
developer9851a292022-12-15 17:33:43 +08004Subject: [PATCH 1112/1130] mt76: testmode: additional supports
developer4c6b6002022-05-30 16:36:44 +08005
6Signed-off-by: Shayne Chen <shayne.chen@mediatek.com>
7---
developerf40484f2022-08-25 15:33:33 +08008 dma.c | 3 +-
9 mac80211.c | 12 +
developer1d9fede2022-08-29 15:24:07 +080010 mt76.h | 108 ++++-
developerf40484f2022-08-25 15:33:33 +080011 mt76_connac_mcu.c | 4 +
12 mt76_connac_mcu.h | 2 +
13 mt7915/init.c | 2 +-
developer356ecec2022-11-14 10:25:04 +080014 mt7915/mac.c | 37 +-
developerf40484f2022-08-25 15:33:33 +080015 mt7915/main.c | 2 +-
16 mt7915/mcu.c | 10 +-
17 mt7915/mcu.h | 28 +-
18 mt7915/mmio.c | 2 +
19 mt7915/mt7915.h | 14 +-
20 mt7915/regs.h | 3 +
developereb6a0182022-12-12 18:53:32 +080021 mt7915/testmode.c | 1172 ++++++++++++++++++++++++++++++++++++++++++---
developerf40484f2022-08-25 15:33:33 +080022 mt7915/testmode.h | 278 +++++++++++
23 testmode.c | 275 +++++++++--
24 testmode.h | 75 +++
25 tools/fields.c | 80 ++++
26 tx.c | 3 +-
developereb6a0182022-12-12 18:53:32 +080027 19 files changed, 1963 insertions(+), 147 deletions(-)
developer4c6b6002022-05-30 16:36:44 +080028
29diff --git a/dma.c b/dma.c
developereb6a0182022-12-12 18:53:32 +080030index fc24b353..dc8d8882 100644
developer4c6b6002022-05-30 16:36:44 +080031--- a/dma.c
32+++ b/dma.c
developereb6a0182022-12-12 18:53:32 +080033@@ -539,8 +539,7 @@ free:
developer4c6b6002022-05-30 16:36:44 +080034 if (mt76_is_testmode_skb(dev, skb, &hw)) {
35 struct mt76_phy *phy = hw->priv;
36
37- if (tx_info.skb == phy->test.tx_skb)
38- phy->test.tx_done--;
39+ phy->test.tx_done--;
40 }
41 #endif
42
43diff --git a/mac80211.c b/mac80211.c
developereb6a0182022-12-12 18:53:32 +080044index 19d9efb3..2e0e011d 100644
developer4c6b6002022-05-30 16:36:44 +080045--- a/mac80211.c
46+++ b/mac80211.c
47@@ -55,6 +55,13 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 CHAN5G(60, 5300),
49 CHAN5G(64, 5320),
50
51+ CHAN5G(68, 5340),
52+ CHAN5G(80, 5400),
53+ CHAN5G(84, 5420),
54+ CHAN5G(88, 5440),
55+ CHAN5G(92, 5460),
56+ CHAN5G(96, 5480),
57+
58 CHAN5G(100, 5500),
59 CHAN5G(104, 5520),
60 CHAN5G(108, 5540),
61@@ -75,6 +82,11 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = {
62 CHAN5G(165, 5825),
63 CHAN5G(169, 5845),
64 CHAN5G(173, 5865),
65+
66+ CHAN5G(184, 4920),
67+ CHAN5G(188, 4940),
68+ CHAN5G(192, 4960),
69+ CHAN5G(196, 4980),
70 };
71
72 static const struct ieee80211_channel mt76_channels_6ghz[] = {
73diff --git a/mt76.h b/mt76.h
developereb6a0182022-12-12 18:53:32 +080074index 4822ffb7..58258e19 100644
developer4c6b6002022-05-30 16:36:44 +080075--- a/mt76.h
76+++ b/mt76.h
developer144824b2022-11-25 21:27:43 +080077@@ -635,6 +635,21 @@ struct mt76_testmode_ops {
developer4c6b6002022-05-30 16:36:44 +080078 int (*set_params)(struct mt76_phy *phy, struct nlattr **tb,
79 enum mt76_testmode_state new_state);
80 int (*dump_stats)(struct mt76_phy *phy, struct sk_buff *msg);
81+ int (*set_eeprom)(struct mt76_phy *phy, u32 offset, u8 *val, u8 action);
82+};
83+
84+struct mt76_testmode_entry_data {
85+ struct sk_buff *tx_skb;
86+
87+ u16 tx_mpdu_len;
88+ u8 tx_rate_idx;
89+ u8 tx_rate_nss;
90+ u8 tx_rate_ldpc;
91+
92+ u8 addr[3][ETH_ALEN];
93+ u8 aid;
94+ u8 ru_alloc;
95+ u8 ru_idx;
96 };
97
98 #define MT_TM_FW_RX_COUNT BIT(0)
developer144824b2022-11-25 21:27:43 +080099@@ -643,16 +658,11 @@ struct mt76_testmode_data {
developer4c6b6002022-05-30 16:36:44 +0800100 enum mt76_testmode_state state;
101
102 u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)];
103- struct sk_buff *tx_skb;
104
105 u32 tx_count;
106- u16 tx_mpdu_len;
107
108 u8 tx_rate_mode;
109- u8 tx_rate_idx;
110- u8 tx_rate_nss;
111 u8 tx_rate_sgi;
112- u8 tx_rate_ldpc;
113 u8 tx_rate_stbc;
114 u8 tx_ltf;
115
developer144824b2022-11-25 21:27:43 +0800116@@ -668,10 +678,37 @@ struct mt76_testmode_data {
developer4c6b6002022-05-30 16:36:44 +0800117 u8 tx_power[4];
118 u8 tx_power_control;
119
120- u8 addr[3][ETH_ALEN];
121+ struct list_head tm_entry_list;
122+ struct mt76_wcid *cur_entry;
123+ u8 entry_num;
124+ union {
125+ struct mt76_testmode_entry_data ed;
126+ struct {
127+ /* must be the same as mt76_testmode_entry_data */
128+ struct sk_buff *tx_skb;
129+
130+ u16 tx_mpdu_len;
131+ u8 tx_rate_idx;
132+ u8 tx_rate_nss;
133+ u8 tx_rate_ldpc;
134+
135+ u8 addr[3][ETH_ALEN];
136+ u8 aid;
137+ u8 ru_alloc;
138+ u8 ru_idx;
139+ };
140+ };
141
142 u8 flag;
143
144+ struct {
145+ u8 type;
146+ u8 enable;
147+ } cfg;
148+
149+ u8 txbf_act;
150+ u16 txbf_param[8];
151+
152 u32 tx_pending;
153 u32 tx_queued;
154 u16 tx_queued_limit;
developereb6a0182022-12-12 18:53:32 +0800155@@ -1135,6 +1172,59 @@ static inline bool mt76_testmode_enabled(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +0800156 #endif
157 }
158
159+#ifdef CONFIG_NL80211_TESTMODE
160+static inline struct mt76_wcid *
161+mt76_testmode_first_entry(struct mt76_phy *phy)
162+{
163+ if (list_empty(&phy->test.tm_entry_list) && !phy->test.aid)
164+ return &phy->dev->global_wcid;
165+
166+ return list_first_entry(&phy->test.tm_entry_list,
167+ typeof(struct mt76_wcid),
168+ list);
169+}
170+
171+static inline struct mt76_testmode_entry_data *
172+mt76_testmode_entry_data(struct mt76_phy *phy, struct mt76_wcid *wcid)
173+{
174+ if (!wcid)
175+ return NULL;
176+ if (wcid == &phy->dev->global_wcid)
177+ return &phy->test.ed;
178+
179+ return (struct mt76_testmode_entry_data *)((u8 *)wcid +
180+ phy->hw->sta_data_size);
181+}
182+
183+#define mt76_tm_for_each_entry(phy, wcid, ed) \
184+ for (wcid = mt76_testmode_first_entry(phy), \
185+ ed = mt76_testmode_entry_data(phy, wcid); \
186+ ((phy->test.aid && \
187+ !list_entry_is_head(wcid, &phy->test.tm_entry_list, list)) || \
188+ (!phy->test.aid && wcid == &phy->dev->global_wcid)) && ed; \
189+ wcid = list_next_entry(wcid, list), \
190+ ed = mt76_testmode_entry_data(phy, wcid))
191+#endif
192+
193+static inline bool __mt76_is_testmode_skb(struct mt76_phy *phy,
194+ struct sk_buff *skb)
195+{
196+#ifdef CONFIG_NL80211_TESTMODE
197+ struct mt76_testmode_entry_data *ed = &phy->test.ed;
198+ struct mt76_wcid *wcid;
199+
200+ if (skb == ed->tx_skb)
201+ return true;
202+
203+ mt76_tm_for_each_entry(phy, wcid, ed)
204+ if (skb == ed->tx_skb)
205+ return true;
206+ return false;
207+#else
208+ return false;
209+#endif
210+}
211+
212 static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
213 struct sk_buff *skb,
214 struct ieee80211_hw **hw)
developereb6a0182022-12-12 18:53:32 +0800215@@ -1145,7 +1235,8 @@ static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
developer1d9fede2022-08-29 15:24:07 +0800216 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
217 struct mt76_phy *phy = dev->phys[i];
218
219- if (phy && skb == phy->test.tx_skb) {
220+ if (phy && mt76_testmode_enabled(phy) &&
221+ __mt76_is_testmode_skb(phy, skb)) {
222 *hw = dev->phys[i]->hw;
223 return true;
224 }
developereb6a0182022-12-12 18:53:32 +0800225@@ -1247,7 +1338,8 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer4c6b6002022-05-30 16:36:44 +0800226 int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
227 struct netlink_callback *cb, void *data, int len);
228 int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state);
229-int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len);
230+int mt76_testmode_init_skb(struct mt76_phy *phy, u32 len,
231+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN]);
232
233 static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable)
234 {
235diff --git a/mt76_connac_mcu.c b/mt76_connac_mcu.c
developereb6a0182022-12-12 18:53:32 +0800236index cf6ec55f..291a7c12 100644
developer4c6b6002022-05-30 16:36:44 +0800237--- a/mt76_connac_mcu.c
238+++ b/mt76_connac_mcu.c
developer144824b2022-11-25 21:27:43 +0800239@@ -394,6 +394,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
developer4c6b6002022-05-30 16:36:44 +0800240 switch (vif->type) {
241 case NL80211_IFTYPE_MESH_POINT:
242 case NL80211_IFTYPE_AP:
243+ case NL80211_IFTYPE_MONITOR:
244 if (vif->p2p)
245 conn_type = CONNECTION_P2P_GC;
246 else
developer144824b2022-11-25 21:27:43 +0800247@@ -575,6 +576,9 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
developer4c6b6002022-05-30 16:36:44 +0800248 rx->rca2 = 1;
249 rx->rv = 1;
250
251+ if (vif->type == NL80211_IFTYPE_MONITOR)
252+ rx->rca1 = 0;
253+
254 if (!is_connac_v1(dev))
255 return;
256
257diff --git a/mt76_connac_mcu.h b/mt76_connac_mcu.h
developereb6a0182022-12-12 18:53:32 +0800258index 18d6c669..a8ce98c9 100644
developer4c6b6002022-05-30 16:36:44 +0800259--- a/mt76_connac_mcu.h
260+++ b/mt76_connac_mcu.h
developereb6a0182022-12-12 18:53:32 +0800261@@ -994,6 +994,7 @@ enum {
developer4c6b6002022-05-30 16:36:44 +0800262 MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13,
263 MCU_EXT_EVENT_THERMAL_PROTECT = 0x22,
264 MCU_EXT_EVENT_ASSERT_DUMP = 0x23,
265+ MCU_EXT_EVENT_BF_STATUS_READ = 0x35,
266 MCU_EXT_EVENT_RDD_REPORT = 0x3a,
267 MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
268 MCU_EXT_EVENT_BCC_NOTIFY = 0x75,
developereb6a0182022-12-12 18:53:32 +0800269@@ -1195,6 +1196,7 @@ enum {
developer4c6b6002022-05-30 16:36:44 +0800270 MCU_EXT_CMD_PHY_STAT_INFO = 0xad,
271 /* for vendor csi and air monitor */
272 MCU_EXT_CMD_SMESH_CTRL = 0xae,
273+ MCU_EXT_CMD_RX_STAT_USER_CTRL = 0xb3,
274 MCU_EXT_CMD_CERT_CFG = 0xb7,
275 MCU_EXT_CMD_CSI_CTRL = 0xc2,
276 };
277diff --git a/mt7915/init.c b/mt7915/init.c
developer9851a292022-12-15 17:33:43 +0800278index 00ddab78..e044f503 100644
developer4c6b6002022-05-30 16:36:44 +0800279--- a/mt7915/init.c
280+++ b/mt7915/init.c
developer9851a292022-12-15 17:33:43 +0800281@@ -687,7 +687,7 @@ static void mt7915_init_work(struct work_struct *work)
developer4c6b6002022-05-30 16:36:44 +0800282 struct mt7915_dev *dev = container_of(work, struct mt7915_dev,
283 init_work);
284
285- mt7915_mcu_set_eeprom(dev);
286+ mt7915_mcu_set_eeprom(dev, dev->flash_mode);
287 mt7915_mac_init(dev);
288 mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband);
289 mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband);
290diff --git a/mt7915/mac.c b/mt7915/mac.c
developereb6a0182022-12-12 18:53:32 +0800291index fc7b7903..0746e4b3 100644
developer4c6b6002022-05-30 16:36:44 +0800292--- a/mt7915/mac.c
293+++ b/mt7915/mac.c
developereb6a0182022-12-12 18:53:32 +0800294@@ -628,16 +628,38 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
developer4c6b6002022-05-30 16:36:44 +0800295 {
296 #ifdef CONFIG_NL80211_TESTMODE
297 struct mt76_testmode_data *td = &phy->mt76->test;
298+ struct mt76_testmode_entry_data *ed;
299+ struct mt76_wcid *wcid;
300 const struct ieee80211_rate *r;
301- u8 bw, mode, nss = td->tx_rate_nss;
302- u8 rate_idx = td->tx_rate_idx;
303+ u8 bw, mode, nss, rate_idx, ldpc;
304 u16 rateval = 0;
305 u32 val;
306 bool cck = false;
307 int band;
308
309- if (skb != phy->mt76->test.tx_skb)
310+ txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
311+ txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
312+ phy->test.spe_idx));
313+
314+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU) {
315+ txwi[1] |= cpu_to_le32(BIT(18));
316+ txwi[2] = 0;
317+ txwi[3] &= ~cpu_to_le32(MT_TXD3_NO_ACK);
318+ le32p_replace_bits(&txwi[3], 0x1f, MT_TXD3_REM_TX_COUNT);
319+
developerf40484f2022-08-25 15:33:33 +0800320 return;
developer4c6b6002022-05-30 16:36:44 +0800321+ }
322+
323+ mt76_tm_for_each_entry(phy->mt76, wcid, ed)
324+ if (ed->tx_skb == skb)
325+ break;
326+
327+ if (!ed)
developerf40484f2022-08-25 15:33:33 +0800328+ return;
329+
developer4c6b6002022-05-30 16:36:44 +0800330+ nss = ed->tx_rate_nss;
331+ rate_idx = ed->tx_rate_idx;
332+ ldpc = ed->tx_rate_ldpc;
developerf40484f2022-08-25 15:33:33 +0800333
developer4c6b6002022-05-30 16:36:44 +0800334 switch (td->tx_rate_mode) {
335 case MT76_TM_TX_MODE_HT:
developereb6a0182022-12-12 18:53:32 +0800336@@ -727,13 +749,14 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
developer4c6b6002022-05-30 16:36:44 +0800337 if (mode >= MT_PHY_TYPE_HE_SU)
338 val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf);
339
340- if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
341+ if (ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
342 val |= MT_TXD6_LDPC;
343
developerf40484f2022-08-25 15:33:33 +0800344 txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
developer4c6b6002022-05-30 16:36:44 +0800345+ if (phy->test.bf_en)
346+ val |= MT_TXD6_TX_IBF | MT_TXD6_TX_EBF;
347+
348 txwi[6] |= cpu_to_le32(val);
349- txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
350- phy->test.spe_idx));
351 #endif
352 }
353
developer144824b2022-11-25 21:27:43 +0800354@@ -1483,7 +1506,7 @@ mt7915_mac_restart(struct mt7915_dev *dev)
developer356ecec2022-11-14 10:25:04 +0800355 goto out;
356
357 /* set the necessary init items */
358- ret = mt7915_mcu_set_eeprom(dev);
359+ ret = mt7915_mcu_set_eeprom(dev, dev->flash_mode);
360 if (ret)
361 goto out;
362
developer4c6b6002022-05-30 16:36:44 +0800363diff --git a/mt7915/main.c b/mt7915/main.c
developer9851a292022-12-15 17:33:43 +0800364index 2c4e3923..4c3d822e 100644
developer4c6b6002022-05-30 16:36:44 +0800365--- a/mt7915/main.c
366+++ b/mt7915/main.c
developer9851a292022-12-15 17:33:43 +0800367@@ -238,7 +238,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
developer4c6b6002022-05-30 16:36:44 +0800368 mvif->phy = phy;
developereb6a0182022-12-12 18:53:32 +0800369 mvif->mt76.band_idx = phy->mt76->band_idx;
developer4c6b6002022-05-30 16:36:44 +0800370
371- mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
372+ mvif->mt76.wmm_idx = (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_MONITOR);
373 if (ext_phy)
374 mvif->mt76.wmm_idx += 2;
375
376diff --git a/mt7915/mcu.c b/mt7915/mcu.c
developer9851a292022-12-15 17:33:43 +0800377index 7728e82e..85450ead 100644
developer4c6b6002022-05-30 16:36:44 +0800378--- a/mt7915/mcu.c
379+++ b/mt7915/mcu.c
developereb6a0182022-12-12 18:53:32 +0800380@@ -383,6 +383,11 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
developer4c6b6002022-05-30 16:36:44 +0800381 case MCU_EXT_EVENT_BCC_NOTIFY:
382 mt7915_mcu_rx_bcc_notify(dev, skb);
383 break;
384+#ifdef CONFIG_NL80211_TESTMODE
385+ case MCU_EXT_EVENT_BF_STATUS_READ:
386+ mt7915_tm_txbf_status_read(dev, skb);
387+ break;
388+#endif
389 default:
390 break;
391 }
developereb6a0182022-12-12 18:53:32 +0800392@@ -414,6 +419,7 @@ void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb)
developer4c6b6002022-05-30 16:36:44 +0800393 rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP ||
394 rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC ||
395 rxd->ext_eid == MCU_EXT_EVENT_BCC_NOTIFY ||
396+ rxd->ext_eid == MCU_EXT_EVENT_BF_STATUS_READ ||
397 !rxd->seq)
398 mt7915_mcu_rx_unsolicited_event(dev, skb);
399 else
developereb6a0182022-12-12 18:53:32 +0800400@@ -2798,14 +2804,14 @@ static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev)
developer4c6b6002022-05-30 16:36:44 +0800401 return 0;
402 }
403
404-int mt7915_mcu_set_eeprom(struct mt7915_dev *dev)
405+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev, bool flash_mode)
406 {
407 struct mt7915_mcu_eeprom req = {
408 .buffer_mode = EE_MODE_EFUSE,
409 .format = EE_FORMAT_WHOLE,
410 };
411
412- if (dev->flash_mode)
413+ if (flash_mode)
414 return mt7915_mcu_set_eeprom_flash(dev);
415
416 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_BUFFER_MODE),
417diff --git a/mt7915/mcu.h b/mt7915/mcu.h
developereb6a0182022-12-12 18:53:32 +0800418index 2e97db7b..4d86c7f1 100644
developer4c6b6002022-05-30 16:36:44 +0800419--- a/mt7915/mcu.h
420+++ b/mt7915/mcu.h
developerf64861f2022-06-22 11:44:53 +0800421@@ -8,10 +8,15 @@
developer4c6b6002022-05-30 16:36:44 +0800422
423 enum {
424 MCU_ATE_SET_TRX = 0x1,
425+ MCU_ATE_SET_TSSI = 0x5,
426+ MCU_ATE_SET_DPD = 0x6,
427+ MCU_ATE_SET_RATE_POWER_OFFSET = 0x7,
428+ MCU_ATE_SET_THERMAL_COMP = 0x8,
429 MCU_ATE_SET_FREQ_OFFSET = 0xa,
430 MCU_ATE_SET_PHY_COUNT = 0x11,
431 MCU_ATE_SET_SLOT_TIME = 0x13,
432 MCU_ATE_CLEAN_TXQUEUE = 0x1c,
433+ MCU_ATE_SET_MU_RX_AID = 0x1e,
434 };
435
developerf64861f2022-06-22 11:44:53 +0800436 struct mt7915_mcu_thermal_ctrl {
developereb6a0182022-12-12 18:53:32 +0800437@@ -471,6 +476,12 @@ enum {
developer4c6b6002022-05-30 16:36:44 +0800438
439 enum {
440 MT_BF_SOUNDING_ON = 1,
441+ MT_BF_DATA_PACKET_APPLY = 2,
442+ MT_BF_PFMU_TAG_READ = 5,
443+ MT_BF_PFMU_TAG_WRITE = 6,
444+ MT_BF_PHASE_CAL = 14,
445+ MT_BF_IBF_PHASE_COMP = 15,
446+ MT_BF_PROFILE_WRITE_ALL = 17,
447 MT_BF_TYPE_UPDATE = 20,
448 MT_BF_MODULE_UPDATE = 25
449 };
developereb6a0182022-12-12 18:53:32 +0800450@@ -717,10 +728,19 @@ struct mt7915_muru {
developer4c6b6002022-05-30 16:36:44 +0800451 #define MURU_OFDMA_SCH_TYPE_UL BIT(1)
452
developerf64861f2022-06-22 11:44:53 +0800453 /* Common Config */
developer4c6b6002022-05-30 16:36:44 +0800454-#define MURU_COMM_PPDU_FMT BIT(0)
455-#define MURU_COMM_SCH_TYPE BIT(1)
456-#define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_SCH_TYPE)
developer4c6b6002022-05-30 16:36:44 +0800457-/* DL&UL User config*/
developer4c6b6002022-05-30 16:36:44 +0800458+/* #define MURU_COMM_PPDU_FMT BIT(0) */
459+/* #define MURU_COMM_SCH_TYPE BIT(1) */
460+/* #define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_SCH_TYPE) */
developer4721e252022-06-21 16:41:28 +0800461+#define MURU_COMM_PPDU_FMT BIT(0)
462+#define MURU_COMM_SCH_TYPE BIT(1)
463+#define MURU_COMM_BAND BIT(2)
464+#define MURU_COMM_WMM BIT(3)
465+#define MURU_COMM_SPE_IDX BIT(4)
466+#define MURU_COMM_PROC_TYPE BIT(5)
467+#define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_BAND | \
468+ MURU_COMM_WMM | MURU_COMM_SPE_IDX)
developer4c6b6002022-05-30 16:36:44 +0800469+
470+/* DL&UL User config */
471 #define MURU_USER_CNT BIT(4)
472
473 enum {
474diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developereb6a0182022-12-12 18:53:32 +0800475index 07de3cbd..26881fdc 100644
developer4c6b6002022-05-30 16:36:44 +0800476--- a/mt7915/mmio.c
477+++ b/mt7915/mmio.c
developer144824b2022-11-25 21:27:43 +0800478@@ -132,6 +132,7 @@ static const u32 mt7915_offs[] = {
developer4c6b6002022-05-30 16:36:44 +0800479 [ARB_DRNGR0] = 0x194,
480 [ARB_SCR] = 0x080,
481 [RMAC_MIB_AIRTIME14] = 0x3b8,
482+ [AGG_AALCR0] = 0x048,
483 [AGG_AWSCR0] = 0x05c,
484 [AGG_PCR0] = 0x06c,
485 [AGG_ACR0] = 0x084,
developer144824b2022-11-25 21:27:43 +0800486@@ -207,6 +208,7 @@ static const u32 mt7916_offs[] = {
developer4c6b6002022-05-30 16:36:44 +0800487 [ARB_DRNGR0] = 0x1e0,
488 [ARB_SCR] = 0x000,
489 [RMAC_MIB_AIRTIME14] = 0x0398,
490+ [AGG_AALCR0] = 0x028,
491 [AGG_AWSCR0] = 0x030,
492 [AGG_PCR0] = 0x040,
493 [AGG_ACR0] = 0x054,
494diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
developer9851a292022-12-15 17:33:43 +0800495index 9fdb80b5..4336f204 100644
developer4c6b6002022-05-30 16:36:44 +0800496--- a/mt7915/mt7915.h
497+++ b/mt7915/mt7915.h
developer9851a292022-12-15 17:33:43 +0800498@@ -320,6 +320,9 @@ struct mt7915_phy {
developer4c6b6002022-05-30 16:36:44 +0800499 u8 last_snr;
500
501 u8 spe_idx;
502+
503+ bool bf_en;
504+ bool bf_ever_en;
505 } test;
506 #endif
507
developer9851a292022-12-15 17:33:43 +0800508@@ -419,6 +422,14 @@ struct mt7915_dev {
developer4c6b6002022-05-30 16:36:44 +0800509 void __iomem *dcm;
510 void __iomem *sku;
511
512+#ifdef CONFIG_NL80211_TESTMODE
513+ struct {
514+ void *txbf_phase_cal;
515+ void *txbf_pfmu_data;
516+ void *txbf_pfmu_tag;
517+ } test;
518+#endif
519+
520 #ifdef MTK_DEBUG
521 u16 wlan_idx;
522 struct {
developer9851a292022-12-15 17:33:43 +0800523@@ -590,7 +601,7 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
developer4c6b6002022-05-30 16:36:44 +0800524 struct ieee80211_vif *vif,
525 struct ieee80211_sta *sta,
526 void *data, u32 field);
527-int mt7915_mcu_set_eeprom(struct mt7915_dev *dev);
528+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev, bool flash_mode);
529 int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset);
530 int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num);
531 int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable,
developer9851a292022-12-15 17:33:43 +0800532@@ -628,6 +639,7 @@ int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl);
developer4c6b6002022-05-30 16:36:44 +0800533 int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level);
534 void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb);
535 void mt7915_mcu_exit(struct mt7915_dev *dev);
536+int mt7915_tm_txbf_status_read(struct mt7915_dev *dev, struct sk_buff *skb);
537
538 static inline u16 mt7915_wtbl_size(struct mt7915_dev *dev)
539 {
540diff --git a/mt7915/regs.h b/mt7915/regs.h
developereb6a0182022-12-12 18:53:32 +0800541index d6a05f13..e8768488 100644
developer4c6b6002022-05-30 16:36:44 +0800542--- a/mt7915/regs.h
543+++ b/mt7915/regs.h
developer144824b2022-11-25 21:27:43 +0800544@@ -62,6 +62,7 @@ enum offs_rev {
developer4c6b6002022-05-30 16:36:44 +0800545 ARB_DRNGR0,
546 ARB_SCR,
547 RMAC_MIB_AIRTIME14,
548+ AGG_AALCR0,
549 AGG_AWSCR0,
550 AGG_PCR0,
551 AGG_ACR0,
developer144824b2022-11-25 21:27:43 +0800552@@ -482,6 +483,8 @@ enum offs_rev {
developer4c6b6002022-05-30 16:36:44 +0800553 #define MT_WF_AGG_BASE(_band) ((_band) ? 0x820f2000 : 0x820e2000)
554 #define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
555
556+#define MT_AGG_AALCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AALCR0) + \
557+ (_n) * 4))
558 #define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AWSCR0) + \
559 (_n) * 4))
560 #define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_PCR0) + \
561diff --git a/mt7915/testmode.c b/mt7915/testmode.c
developereb6a0182022-12-12 18:53:32 +0800562index 46939191..e0ba088f 100644
developer4c6b6002022-05-30 16:36:44 +0800563--- a/mt7915/testmode.c
564+++ b/mt7915/testmode.c
565@@ -9,6 +9,9 @@
566 enum {
567 TM_CHANGED_TXPOWER,
568 TM_CHANGED_FREQ_OFFSET,
569+ TM_CHANGED_AID,
570+ TM_CHANGED_CFG,
571+ TM_CHANGED_TXBF_ACT,
572
573 /* must be last */
574 NUM_TM_CHANGED
575@@ -17,6 +20,9 @@ enum {
576 static const u8 tm_change_map[] = {
577 [TM_CHANGED_TXPOWER] = MT76_TM_ATTR_TX_POWER,
578 [TM_CHANGED_FREQ_OFFSET] = MT76_TM_ATTR_FREQ_OFFSET,
579+ [TM_CHANGED_AID] = MT76_TM_ATTR_AID,
580+ [TM_CHANGED_CFG] = MT76_TM_ATTR_CFG,
581+ [TM_CHANGED_TXBF_ACT] = MT76_TM_ATTR_TXBF_ACT,
582 };
583
584 struct reg_band {
developerc6f56bb2022-06-14 18:36:30 +0800585@@ -33,6 +39,38 @@ struct reg_band {
developer4c6b6002022-05-30 16:36:44 +0800586 #define TM_REG_MAX_ID 20
587 static struct reg_band reg_backup_list[TM_REG_MAX_ID];
588
developerc6f56bb2022-06-14 18:36:30 +0800589+static void mt7915_tm_update_entry(struct mt7915_phy *phy);
590+
developer4c6b6002022-05-30 16:36:44 +0800591+static u8 mt7915_tm_chan_bw(enum nl80211_chan_width width)
592+{
593+ static const u8 width_to_bw[] = {
594+ [NL80211_CHAN_WIDTH_40] = TM_CBW_40MHZ,
595+ [NL80211_CHAN_WIDTH_80] = TM_CBW_80MHZ,
596+ [NL80211_CHAN_WIDTH_80P80] = TM_CBW_8080MHZ,
597+ [NL80211_CHAN_WIDTH_160] = TM_CBW_160MHZ,
598+ [NL80211_CHAN_WIDTH_5] = TM_CBW_5MHZ,
599+ [NL80211_CHAN_WIDTH_10] = TM_CBW_10MHZ,
600+ [NL80211_CHAN_WIDTH_20] = TM_CBW_20MHZ,
601+ [NL80211_CHAN_WIDTH_20_NOHT] = TM_CBW_20MHZ,
602+ };
603+
604+ if (width >= ARRAY_SIZE(width_to_bw))
605+ return 0;
606+
607+ return width_to_bw[width];
608+}
609+
610+static void
611+mt7915_tm_update_channel(struct mt7915_phy *phy)
612+{
613+ mutex_unlock(&phy->dev->mt76.mutex);
614+ mt7915_set_channel(phy);
615+ mutex_lock(&phy->dev->mt76.mutex);
616+
617+ mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
developerc6f56bb2022-06-14 18:36:30 +0800618+
619+ mt7915_tm_update_entry(phy);
developer4c6b6002022-05-30 16:36:44 +0800620+}
621
622 static int
623 mt7915_tm_set_tx_power(struct mt7915_phy *phy)
developerc6f56bb2022-06-14 18:36:30 +0800624@@ -119,18 +157,28 @@ mt7915_tm_set_trx(struct mt7915_phy *phy, int type, bool en)
developer4c6b6002022-05-30 16:36:44 +0800625 }
626
627 static int
628-mt7915_tm_clean_hwq(struct mt7915_phy *phy, u8 wcid)
629+mt7915_tm_clean_hwq(struct mt7915_phy *phy)
630 {
631+ struct mt76_testmode_entry_data *ed;
632+ struct mt76_wcid *wcid;
633 struct mt7915_dev *dev = phy->dev;
634 struct mt7915_tm_cmd req = {
635 .testmode_en = 1,
636 .param_idx = MCU_ATE_CLEAN_TXQUEUE,
637- .param.clean.wcid = wcid,
developereb6a0182022-12-12 18:53:32 +0800638 .param.clean.band = phy->mt76->band_idx,
developer4c6b6002022-05-30 16:36:44 +0800639 };
640
641- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
642- sizeof(req), false);
643+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
644+ int ret;
645+
646+ req.param.clean.wcid = wcid->idx;
647+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL),
648+ &req, sizeof(req), false);
649+ if (ret)
650+ return ret;
651+ }
652+
653+ return 0;
654 }
655
656 static int
developereb6a0182022-12-12 18:53:32 +0800657@@ -141,7 +189,7 @@ mt7915_tm_set_phy_count(struct mt7915_phy *phy, u8 control)
658 .testmode_en = 1,
659 .param_idx = MCU_ATE_SET_PHY_COUNT,
660 .param.cfg.enable = control,
661- .param.cfg.band = phy != &dev->phy,
662+ .param.cfg.band = phy->mt76->band_idx,
663 };
664
665 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
developerf64861f2022-06-22 11:44:53 +0800666@@ -182,12 +230,738 @@ mt7915_tm_set_tam_arb(struct mt7915_phy *phy, bool enable, bool mu)
developer4c6b6002022-05-30 16:36:44 +0800667 return mt7915_mcu_set_muru_ctrl(dev, MURU_SET_ARB_OP_MODE, op_mode);
668 }
669
670+static int
671+mt7915_tm_set_cfg(struct mt7915_phy *phy)
672+{
673+ static const u8 cfg_cmd[] = {
674+ [MT76_TM_CFG_TSSI] = MCU_ATE_SET_TSSI,
675+ [MT76_TM_CFG_DPD] = MCU_ATE_SET_DPD,
676+ [MT76_TM_CFG_RATE_POWER_OFFSET] = MCU_ATE_SET_RATE_POWER_OFFSET,
677+ [MT76_TM_CFG_THERMAL_COMP] = MCU_ATE_SET_THERMAL_COMP,
678+ };
679+ struct mt76_testmode_data *td = &phy->mt76->test;
680+ struct mt7915_dev *dev = phy->dev;
681+ struct mt7915_tm_cmd req = {
682+ .testmode_en = !(phy->mt76->test.state == MT76_TM_STATE_OFF),
683+ .param_idx = cfg_cmd[td->cfg.type],
684+ .param.cfg.enable = td->cfg.enable,
developereb6a0182022-12-12 18:53:32 +0800685+ .param.cfg.band = phy->mt76->band_idx,
developer4c6b6002022-05-30 16:36:44 +0800686+ };
687+
688+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
689+ sizeof(req), false);
690+}
691+
692+static int
693+mt7915_tm_add_txbf(struct mt7915_phy *phy, struct ieee80211_vif *vif,
694+ struct ieee80211_sta *sta, u8 pfmu_idx, u8 nr,
695+ u8 nc, bool ebf)
696+{
697+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
698+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
699+ struct mt7915_dev *dev = phy->dev;
700+ struct sk_buff *skb;
701+ struct sta_rec_bf *bf;
702+ struct tlv *tlv;
703+ u8 ndp_rate;
704+
705+ if (nr == 1)
706+ ndp_rate = 8;
707+ else if (nr == 2)
708+ ndp_rate = 16;
709+ else
710+ ndp_rate = 24;
711+
712+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
713+ &msta->wcid);
714+ if (IS_ERR(skb))
715+ return PTR_ERR(skb);
716+
717+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
718+ bf = (struct sta_rec_bf *)tlv;
719+
720+ bf->pfmu = cpu_to_le16(pfmu_idx);
721+ bf->sounding_phy = 1;
722+ bf->bf_cap = ebf;
723+ bf->ncol = nc;
724+ bf->nrow = nr;
725+ bf->ndp_rate = ndp_rate;
726+ bf->ibf_timeout = 0xff;
727+ bf->tx_mode = MT_PHY_TYPE_HT;
728+
729+ if (ebf) {
730+ bf->mem[0].row = 0;
731+ bf->mem[1].row = 1;
732+ bf->mem[2].row = 2;
733+ bf->mem[3].row = 3;
734+ } else {
735+ bf->mem[0].row = 4;
736+ bf->mem[1].row = 5;
737+ bf->mem[2].row = 6;
738+ bf->mem[3].row = 7;
739+ }
740+
741+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
742+ MCU_EXT_CMD(STA_REC_UPDATE), true);
743+}
744+
745+static int
746+mt7915_tm_entry_add(struct mt7915_phy *phy, u8 aid)
747+{
748+ struct mt76_testmode_data *td = &phy->mt76->test;
749+ struct mt76_testmode_entry_data *ed;
750+ struct ieee80211_sband_iftype_data *sdata;
751+ struct ieee80211_supported_band *sband;
752+ struct ieee80211_sta *sta;
753+ struct mt7915_sta *msta;
754+ int tid, ret;
755+
756+ if (td->entry_num >= MT76_TM_MAX_ENTRY_NUM)
757+ return -EINVAL;
758+
759+ sta = kzalloc(sizeof(*sta) + phy->mt76->hw->sta_data_size +
760+ sizeof(*ed), GFP_KERNEL);
761+ if (!sta)
762+ return -ENOMEM;
763+
764+ msta = (struct mt7915_sta *)sta->drv_priv;
765+ ed = mt76_testmode_entry_data(phy->mt76, &msta->wcid);
766+ memcpy(ed, &td->ed, sizeof(*ed));
767+
768+ if (phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ) {
769+ sband = &phy->mt76->sband_5g.sband;
770+ sdata = phy->iftype[NL80211_BAND_5GHZ];
771+ } else if (phy->mt76->chandef.chan->band == NL80211_BAND_6GHZ) {
772+ sband = &phy->mt76->sband_6g.sband;
773+ sdata = phy->iftype[NL80211_BAND_6GHZ];
774+ } else {
775+ sband = &phy->mt76->sband_2g.sband;
776+ sdata = phy->iftype[NL80211_BAND_2GHZ];
777+ }
778+
779+ memcpy(sta->addr, ed->addr[0], ETH_ALEN);
780+ if (phy->test.bf_en) {
781+ u8 addr[ETH_ALEN] = {0x00, 0x11, 0x11, 0x11, 0x11, 0x11};
782+
783+ memcpy(sta->addr, addr, ETH_ALEN);
784+ }
785+
786+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_HT)
developereb6a0182022-12-12 18:53:32 +0800787+ memcpy(&sta->deflink.ht_cap, &sband->ht_cap, sizeof(sta->deflink.ht_cap));
developer4c6b6002022-05-30 16:36:44 +0800788+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_VHT)
developereb6a0182022-12-12 18:53:32 +0800789+ memcpy(&sta->deflink.vht_cap, &sband->vht_cap, sizeof(sta->deflink.vht_cap));
developer4c6b6002022-05-30 16:36:44 +0800790+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_HE_SU)
developereb6a0182022-12-12 18:53:32 +0800791+ memcpy(&sta->deflink.he_cap, &sdata[NL80211_IFTYPE_STATION].he_cap,
792+ sizeof(sta->deflink.he_cap));
developer4c6b6002022-05-30 16:36:44 +0800793+ sta->aid = aid;
794+ sta->wme = 1;
795+
796+ ret = mt7915_mac_sta_add(&phy->dev->mt76, phy->monitor_vif, sta);
797+ if (ret) {
798+ kfree(sta);
799+ return ret;
800+ }
801+
802+ /* prevent from starting tx ba session */
803+ for (tid = 0; tid < 8; tid++)
804+ set_bit(tid, &msta->ampdu_state);
805+
806+ list_add_tail(&msta->wcid.list, &td->tm_entry_list);
807+ td->entry_num++;
808+
809+ return 0;
810+}
811+
812+static void
813+mt7915_tm_entry_remove(struct mt7915_phy *phy, u8 aid)
814+{
815+ struct mt76_testmode_data *td = &phy->mt76->test;
816+ struct mt76_wcid *wcid, *tmp;
817+
818+ if (list_empty(&td->tm_entry_list))
819+ return;
820+
821+ list_for_each_entry_safe(wcid, tmp, &td->tm_entry_list, list) {
developerc6f56bb2022-06-14 18:36:30 +0800822+ struct mt76_testmode_entry_data *ed;
developer4c6b6002022-05-30 16:36:44 +0800823+ struct mt7915_dev *dev = phy->dev;
developerc6f56bb2022-06-14 18:36:30 +0800824+ struct ieee80211_sta *sta;
developer4c6b6002022-05-30 16:36:44 +0800825+
developerc6f56bb2022-06-14 18:36:30 +0800826+ ed = mt76_testmode_entry_data(phy->mt76, wcid);
827+ if (aid && ed->aid != aid)
828+ continue;
829+
830+ sta = wcid_to_sta(wcid);
developer4c6b6002022-05-30 16:36:44 +0800831+ mt7915_mac_sta_remove(&dev->mt76, phy->monitor_vif, sta);
832+ mt76_wcid_mask_clear(dev->mt76.wcid_mask, wcid->idx);
833+
834+ list_del_init(&wcid->list);
835+ kfree(sta);
836+ phy->mt76->test.entry_num--;
837+ }
838+}
839+
840+static int
841+mt7915_tm_set_entry(struct mt7915_phy *phy)
842+{
843+ struct mt76_testmode_data *td = &phy->mt76->test;
844+ struct mt76_testmode_entry_data *ed;
845+ struct mt76_wcid *wcid;
846+
847+ if (!td->aid) {
848+ if (td->state > MT76_TM_STATE_IDLE)
849+ mt76_testmode_set_state(phy->mt76, MT76_TM_STATE_IDLE);
850+ mt7915_tm_entry_remove(phy, td->aid);
851+ return 0;
852+ }
853+
854+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
855+ if (ed->aid == td->aid) {
856+ struct sk_buff *skb;
857+
858+ local_bh_disable();
859+ skb = ed->tx_skb;
860+ memcpy(ed, &td->ed, sizeof(*ed));
861+ ed->tx_skb = skb;
862+ local_bh_enable();
863+
864+ return 0;
865+ }
866+ }
867+
868+ return mt7915_tm_entry_add(phy, td->aid);
869+}
870+
developerc6f56bb2022-06-14 18:36:30 +0800871+static void
872+mt7915_tm_update_entry(struct mt7915_phy *phy)
873+{
874+ struct mt76_testmode_data *td = &phy->mt76->test;
875+ struct mt76_testmode_entry_data *ed, tmp;
876+ struct mt76_wcid *wcid, *last;
877+
878+ if (!td->aid || phy->test.bf_en)
879+ return;
880+
881+ memcpy(&tmp, &td->ed, sizeof(tmp));
882+ last = list_last_entry(&td->tm_entry_list,
883+ struct mt76_wcid, list);
884+
885+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
886+ memcpy(&td->ed, ed, sizeof(td->ed));
887+ mt7915_tm_entry_remove(phy, td->aid);
888+ mt7915_tm_entry_add(phy, td->aid);
889+ if (wcid == last)
890+ break;
891+ }
892+
893+ memcpy(&td->ed, &tmp, sizeof(td->ed));
894+}
895+
developer4c6b6002022-05-30 16:36:44 +0800896+static int
897+mt7915_tm_txbf_init(struct mt7915_phy *phy, u16 *val)
898+{
899+ struct mt76_testmode_data *td = &phy->mt76->test;
900+ struct mt7915_dev *dev = phy->dev;
901+ bool enable = val[0];
902+ void *phase_cal, *pfmu_data, *pfmu_tag;
903+ u8 addr[ETH_ALEN] = {0x00, 0x22, 0x22, 0x22, 0x22, 0x22};
904+
905+ if (!enable) {
906+ phy->test.bf_en = 0;
907+ return 0;
908+ }
909+
910+ if (!dev->test.txbf_phase_cal) {
911+ phase_cal = devm_kzalloc(dev->mt76.dev,
912+ sizeof(struct mt7915_tm_txbf_phase) *
913+ MAX_PHASE_GROUP_NUM,
914+ GFP_KERNEL);
915+ if (!phase_cal)
916+ return -ENOMEM;
917+
918+ dev->test.txbf_phase_cal = phase_cal;
919+ }
920+
921+ if (!dev->test.txbf_pfmu_data) {
922+ pfmu_data = devm_kzalloc(dev->mt76.dev, 512, GFP_KERNEL);
923+ if (!pfmu_data)
924+ return -ENOMEM;
925+
926+ dev->test.txbf_pfmu_data = pfmu_data;
927+ }
928+
929+ if (!dev->test.txbf_pfmu_tag) {
930+ pfmu_tag = devm_kzalloc(dev->mt76.dev,
931+ sizeof(struct mt7915_tm_pfmu_tag), GFP_KERNEL);
932+ if (!pfmu_tag)
933+ return -ENOMEM;
934+
935+ dev->test.txbf_pfmu_tag = pfmu_tag;
936+ }
937+
938+ memcpy(phy->monitor_vif->addr, addr, ETH_ALEN);
939+ mt7915_mcu_add_dev_info(phy, phy->monitor_vif, true);
940+
941+ td->tx_rate_mode = MT76_TM_TX_MODE_HT;
942+ td->tx_mpdu_len = 1024;
943+ td->tx_rate_sgi = 0;
944+ td->tx_ipg = 100;
945+ phy->test.bf_en = 1;
946+
947+ return mt7915_tm_set_trx(phy, TM_MAC_TX, true);
948+}
949+
950+static int
951+mt7915_tm_txbf_phase_comp(struct mt7915_phy *phy, u16 *val)
952+{
953+ struct mt7915_dev *dev = phy->dev;
954+ struct {
955+ u8 category;
956+ u8 wlan_idx_lo;
957+ u8 bw;
958+ u8 jp_band;
959+ u8 dbdc_idx;
960+ bool read_from_e2p;
961+ bool disable;
962+ u8 wlan_idx_hi;
963+ u8 buf[40];
964+ } __packed req = {
965+ .category = MT_BF_IBF_PHASE_COMP,
966+ .bw = val[0],
967+ .jp_band = (val[2] == 1) ? 1 : 0,
developereb6a0182022-12-12 18:53:32 +0800968+ .dbdc_idx = phy->mt76->band_idx,
developer4c6b6002022-05-30 16:36:44 +0800969+ .read_from_e2p = val[3],
970+ .disable = val[4],
971+ };
972+ struct mt7915_tm_txbf_phase *phase =
973+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
974+
975+ wait_event_timeout(dev->mt76.tx_wait, phase[val[2]].status != 0, HZ);
976+ memcpy(req.buf, &phase[val[2]].phase, sizeof(req.buf));
977+
978+ pr_info("ibf cal process: phase comp info\n");
979+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1,
980+ &req, sizeof(req), 0);
981+
982+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
983+ sizeof(req), true);
984+}
985+
986+static int
987+mt7915_tm_txbf_profile_tag_read(struct mt7915_phy *phy, u8 pfmu_idx)
988+{
989+ struct mt7915_dev *dev = phy->dev;
990+ struct {
991+ u8 format_id;
992+ u8 pfmu_idx;
993+ bool bfer;
994+ u8 dbdc_idx;
995+ } __packed req = {
996+ .format_id = MT_BF_PFMU_TAG_READ,
997+ .pfmu_idx = pfmu_idx,
998+ .bfer = 1,
999+ .dbdc_idx = phy != &dev->phy,
1000+ };
1001+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
1002+
1003+ tag->t1.pfmu_idx = 0;
1004+
1005+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1006+ sizeof(req), true);
1007+}
1008+
1009+static int
1010+mt7915_tm_txbf_profile_tag_write(struct mt7915_phy *phy, u8 pfmu_idx,
1011+ struct mt7915_tm_pfmu_tag *tag)
1012+{
1013+ struct mt7915_dev *dev = phy->dev;
1014+ struct {
1015+ u8 format_id;
1016+ u8 pfmu_idx;
1017+ bool bfer;
1018+ u8 dbdc_idx;
1019+ u8 buf[64];
1020+ } __packed req = {
1021+ .format_id = MT_BF_PFMU_TAG_WRITE,
1022+ .pfmu_idx = pfmu_idx,
1023+ .bfer = 1,
1024+ .dbdc_idx = phy != &dev->phy,
1025+ };
1026+
1027+ memcpy(req.buf, tag, sizeof(*tag));
1028+ wait_event_timeout(dev->mt76.tx_wait, tag->t1.pfmu_idx != 0, HZ);
1029+
1030+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1031+ sizeof(req), false);
1032+}
1033+
1034+static int
1035+mt7915_tm_txbf_apply_tx(struct mt7915_phy *phy, u16 wlan_idx, bool ebf,
1036+ bool ibf, bool phase_cal)
1037+{
1038+#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
1039+#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
1040+ struct mt7915_dev *dev = phy->dev;
1041+ struct {
1042+ u8 category;
1043+ u8 wlan_idx_lo;
1044+ bool ebf;
1045+ bool ibf;
1046+ bool mu_txbf;
1047+ bool phase_cal;
1048+ u8 wlan_idx_hi;
1049+ u8 _rsv;
1050+ } __packed req = {
1051+ .category = MT_BF_DATA_PACKET_APPLY,
1052+ .wlan_idx_lo = to_wcid_lo(wlan_idx),
1053+ .ebf = ebf,
1054+ .ibf = ibf,
1055+ .phase_cal = phase_cal,
1056+ .wlan_idx_hi = to_wcid_hi(wlan_idx),
1057+ };
1058+
1059+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1060+ sizeof(req), false);
1061+}
1062+
1063+static int mt7915_tm_txbf_set_rate(struct mt7915_phy *phy,
1064+ struct mt76_wcid *wcid)
1065+{
1066+ struct mt7915_dev *dev = phy->dev;
1067+ struct mt76_testmode_entry_data *ed = mt76_testmode_entry_data(phy->mt76, wcid);
1068+ struct ieee80211_sta *sta = wcid_to_sta(wcid);
1069+ struct sta_phy rate = {};
1070+
1071+ if (!sta)
1072+ return 0;
1073+
1074+ rate.type = MT_PHY_TYPE_HT;
1075+ rate.bw = mt7915_tm_chan_bw(phy->mt76->chandef.width);
1076+ rate.nss = ed->tx_rate_nss;
1077+ rate.mcs = ed->tx_rate_idx;
1078+ rate.ldpc = (rate.bw || ed->tx_rate_ldpc) * GENMASK(2, 0);
1079+
1080+ return mt7915_mcu_set_fixed_rate_ctrl(dev, phy->monitor_vif, sta,
1081+ &rate, RATE_PARAM_FIXED);
1082+}
1083+
1084+static int
1085+mt7915_tm_txbf_set_tx(struct mt7915_phy *phy, u16 *val)
1086+{
1087+ bool bf_on = val[0], update = val[3];
1088+ /* u16 wlan_idx = val[2]; */
1089+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
1090+ struct mt76_testmode_data *td = &phy->mt76->test;
1091+ struct mt76_wcid *wcid;
1092+
1093+ if (bf_on) {
1094+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
1095+ mt7915_tm_txbf_profile_tag_read(phy, 2);
1096+ tag->t1.invalid_prof = false;
1097+ mt7915_tm_txbf_profile_tag_write(phy, 2, tag);
1098+
1099+ phy->test.bf_ever_en = true;
1100+
1101+ if (update)
1102+ mt7915_tm_txbf_apply_tx(phy, 1, 0, 1, 1);
1103+ } else {
1104+ if (!phy->test.bf_ever_en) {
1105+ if (update)
1106+ mt7915_tm_txbf_apply_tx(phy, 1, 0, 0, 0);
1107+ } else {
1108+ phy->test.bf_ever_en = false;
1109+
1110+ mt7915_tm_txbf_profile_tag_read(phy, 2);
1111+ tag->t1.invalid_prof = true;
1112+ mt7915_tm_txbf_profile_tag_write(phy, 2, tag);
1113+ }
1114+ }
1115+
1116+ wcid = list_first_entry(&td->tm_entry_list, struct mt76_wcid, list);
1117+ mt7915_tm_txbf_set_rate(phy, wcid);
1118+
1119+ return 0;
1120+}
1121+
1122+static int
1123+mt7915_tm_txbf_profile_update(struct mt7915_phy *phy, u16 *val, bool ebf)
1124+{
1125+ static const u8 mode_to_lm[] = {
1126+ [MT76_TM_TX_MODE_CCK] = 0,
1127+ [MT76_TM_TX_MODE_OFDM] = 0,
1128+ [MT76_TM_TX_MODE_HT] = 1,
1129+ [MT76_TM_TX_MODE_VHT] = 2,
1130+ [MT76_TM_TX_MODE_HE_SU] = 3,
1131+ [MT76_TM_TX_MODE_HE_EXT_SU] = 3,
1132+ [MT76_TM_TX_MODE_HE_TB] = 3,
1133+ [MT76_TM_TX_MODE_HE_MU] = 3,
1134+ };
1135+ struct mt76_testmode_data *td = &phy->mt76->test;
1136+ struct mt76_wcid *wcid;
1137+ struct ieee80211_vif *vif = phy->monitor_vif;
1138+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
1139+ u8 pfmu_idx = val[0], nc = val[2], nr;
1140+ int ret;
1141+
1142+ if (td->tx_antenna_mask == 3)
1143+ nr = 1;
1144+ else if (td->tx_antenna_mask == 7)
1145+ nr = 2;
1146+ else
1147+ nr = 3;
1148+
1149+ memset(tag, 0, sizeof(*tag));
1150+ tag->t1.pfmu_idx = pfmu_idx;
1151+ tag->t1.ebf = ebf;
1152+ tag->t1.nr = nr;
1153+ tag->t1.nc = nc;
1154+ tag->t1.invalid_prof = true;
1155+
1156+ tag->t1.snr_sts4 = 0xc0;
1157+ tag->t1.snr_sts5 = 0xff;
1158+ tag->t1.snr_sts6 = 0xff;
1159+ tag->t1.snr_sts7 = 0xff;
1160+
1161+ if (ebf) {
1162+ tag->t1.row_id1 = 0;
1163+ tag->t1.row_id2 = 1;
1164+ tag->t1.row_id3 = 2;
1165+ tag->t1.row_id4 = 3;
1166+ tag->t1.lm = mode_to_lm[MT76_TM_TX_MODE_HT];
1167+ } else {
1168+ tag->t1.row_id1 = 4;
1169+ tag->t1.row_id2 = 5;
1170+ tag->t1.row_id3 = 6;
1171+ tag->t1.row_id4 = 7;
1172+ tag->t1.lm = mode_to_lm[MT76_TM_TX_MODE_OFDM];
1173+
1174+ tag->t2.ibf_timeout = 0xff;
1175+ tag->t2.ibf_nr = nr;
1176+ }
1177+
1178+ ret = mt7915_tm_txbf_profile_tag_write(phy, pfmu_idx, tag);
1179+ if (ret)
1180+ return ret;
1181+
1182+ wcid = list_first_entry(&td->tm_entry_list, struct mt76_wcid, list);
1183+ ret = mt7915_tm_add_txbf(phy, vif, wcid_to_sta(wcid), pfmu_idx, nr, nc, ebf);
1184+ if (ret)
1185+ return ret;
1186+
1187+ if (!ebf)
1188+ return mt7915_tm_txbf_apply_tx(phy, 1, false, true, true);
1189+
1190+ return 0;
1191+}
1192+
1193+static int
1194+mt7915_tm_txbf_phase_cal(struct mt7915_phy *phy, u16 *val)
1195+{
1196+#define GROUP_L 0
1197+#define GROUP_M 1
1198+#define GROUP_H 2
1199+ struct mt7915_dev *dev = phy->dev;
1200+ struct {
1201+ u8 category;
1202+ u8 group_l_m_n;
1203+ u8 group;
1204+ bool sx2;
1205+ u8 cal_type;
1206+ u8 lna_gain_level;
1207+ u8 _rsv[2];
1208+ } __packed req = {
1209+ .category = MT_BF_PHASE_CAL,
1210+ .group = val[0],
1211+ .group_l_m_n = val[1],
1212+ .sx2 = val[2],
1213+ .cal_type = val[3],
1214+ .lna_gain_level = 0, /* for test purpose */
1215+ };
1216+ struct mt7915_tm_txbf_phase *phase =
1217+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1218+
1219+ phase[req.group].status = 0;
1220+
1221+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1222+ sizeof(req), true);
1223+}
1224+
1225+int mt7915_tm_txbf_status_read(struct mt7915_dev *dev, struct sk_buff *skb)
1226+{
1227+#define BF_PFMU_TAG 16
1228+#define BF_CAL_PHASE 21
1229+ u8 format_id;
1230+
developerf64861f2022-06-22 11:44:53 +08001231+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
developer4c6b6002022-05-30 16:36:44 +08001232+ format_id = *(u8 *)skb->data;
1233+
1234+ if (format_id == BF_PFMU_TAG) {
1235+ struct mt7915_tm_pfmu_tag *tag = dev->test.txbf_pfmu_tag;
1236+
1237+ skb_pull(skb, 8);
1238+ memcpy(tag, skb->data, sizeof(struct mt7915_tm_pfmu_tag));
1239+ } else if (format_id == BF_CAL_PHASE) {
1240+ struct mt7915_tm_ibf_cal_info *cal;
1241+ struct mt7915_tm_txbf_phase *phase =
1242+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1243+
1244+ cal = (struct mt7915_tm_ibf_cal_info *)skb->data;
1245+ switch (cal->cal_type) {
1246+ case IBF_PHASE_CAL_NORMAL:
1247+ case IBF_PHASE_CAL_NORMAL_INSTRUMENT:
1248+ if (cal->group_l_m_n != GROUP_M)
1249+ break;
1250+ phase = &phase[cal->group];
1251+ memcpy(&phase->phase, cal->buf + 16, sizeof(phase->phase));
1252+ phase->status = cal->status;
1253+ break;
1254+ case IBF_PHASE_CAL_VERIFY:
1255+ case IBF_PHASE_CAL_VERIFY_INSTRUMENT:
1256+ break;
1257+ default:
1258+ break;
1259+ }
1260+ }
1261+
1262+ wake_up(&dev->mt76.tx_wait);
1263+
1264+ return 0;
1265+}
1266+
1267+static int
1268+mt7915_tm_txbf_profile_update_all(struct mt7915_phy *phy, u16 *val)
1269+{
1270+ struct mt76_testmode_data *td = &phy->mt76->test;
1271+ u16 pfmu_idx = val[0];
1272+ u16 subc_id = val[1];
1273+ u16 angle11 = val[2];
1274+ u16 angle21 = val[3];
1275+ u16 angle31 = val[4];
1276+ u16 angle41 = val[5];
1277+ s16 phi11 = 0, phi21 = 0, phi31 = 0;
1278+ struct mt7915_tm_pfmu_data *pfmu_data;
1279+
1280+ if (subc_id > 63)
1281+ return -EINVAL;
1282+
1283+ if (td->tx_antenna_mask == 2) {
1284+ phi11 = (s16)(angle21 - angle11);
1285+ } else if (td->tx_antenna_mask == 3) {
1286+ phi11 = (s16)(angle31 - angle11);
1287+ phi21 = (s16)(angle31 - angle21);
1288+ } else {
1289+ phi11 = (s16)(angle41 - angle11);
1290+ phi21 = (s16)(angle41 - angle21);
1291+ phi31 = (s16)(angle41 - angle31);
1292+ }
1293+
1294+ pfmu_data = (struct mt7915_tm_pfmu_data *)phy->dev->test.txbf_pfmu_data;
1295+ pfmu_data = &pfmu_data[subc_id];
1296+
1297+ if (subc_id < 32)
1298+ pfmu_data->subc_idx = cpu_to_le16(subc_id + 224);
1299+ else
1300+ pfmu_data->subc_idx = cpu_to_le16(subc_id - 32);
1301+ pfmu_data->phi11 = cpu_to_le16(phi11);
1302+ pfmu_data->phi21 = cpu_to_le16(phi21);
1303+ pfmu_data->phi31 = cpu_to_le16(phi31);
1304+
1305+ if (subc_id == 63) {
1306+ struct mt7915_dev *dev = phy->dev;
1307+ struct {
1308+ u8 format_id;
1309+ u8 pfmu_idx;
1310+ u8 dbdc_idx;
1311+ u8 _rsv;
1312+ u8 buf[512];
1313+ } __packed req = {
1314+ .format_id = MT_BF_PROFILE_WRITE_ALL,
1315+ .pfmu_idx = pfmu_idx,
1316+ .dbdc_idx = phy != &dev->phy,
1317+ };
1318+
1319+ memcpy(req.buf, dev->test.txbf_pfmu_data, 512);
1320+
1321+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION),
1322+ &req, sizeof(req), true);
1323+ }
1324+
1325+ return 0;
1326+}
1327+
1328+static int
1329+mt7915_tm_txbf_e2p_update(struct mt7915_phy *phy)
1330+{
1331+ struct mt7915_tm_txbf_phase *phase, *p;
1332+ struct mt7915_dev *dev = phy->dev;
1333+ u8 *eeprom = dev->mt76.eeprom.data;
1334+ u16 offset;
1335+ bool is_7976;
1336+ int i;
1337+
1338+ is_7976 = mt7915_check_adie(dev, false) || is_mt7916(&dev->mt76);
1339+ offset = is_7976 ? 0x60a : 0x651;
1340+
1341+ phase = (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1342+ for (i = 0; i < MAX_PHASE_GROUP_NUM; i++) {
1343+ p = &phase[i];
1344+
1345+ if (!p->status)
1346+ continue;
1347+
1348+ /* copy phase cal data to eeprom */
1349+ memcpy(eeprom + offset + i * sizeof(p->phase), &p->phase,
1350+ sizeof(p->phase));
1351+ }
1352+
1353+ return 0;
1354+}
1355+
1356+static int
1357+mt7915_tm_set_txbf(struct mt7915_phy *phy)
1358+{
1359+ struct mt76_testmode_data *td = &phy->mt76->test;
1360+ u16 *val = td->txbf_param;
1361+
1362+ pr_info("ibf cal process: act = %u, val = %u, %u, %u, %u, %u\n",
1363+ td->txbf_act, val[0], val[1], val[2], val[3], val[4]);
1364+
1365+ switch (td->txbf_act) {
1366+ case MT76_TM_TXBF_ACT_INIT:
1367+ return mt7915_tm_txbf_init(phy, val);
1368+ case MT76_TM_TXBF_ACT_UPDATE_CH:
1369+ mt7915_tm_update_channel(phy);
1370+ break;
1371+ case MT76_TM_TXBF_ACT_PHASE_COMP:
1372+ return mt7915_tm_txbf_phase_comp(phy, val);
1373+ case MT76_TM_TXBF_ACT_TX_PREP:
1374+ return mt7915_tm_txbf_set_tx(phy, val);
1375+ case MT76_TM_TXBF_ACT_IBF_PROF_UPDATE:
1376+ return mt7915_tm_txbf_profile_update(phy, val, false);
1377+ case MT76_TM_TXBF_ACT_EBF_PROF_UPDATE:
1378+ return mt7915_tm_txbf_profile_update(phy, val, true);
1379+ case MT76_TM_TXBF_ACT_PHASE_CAL:
1380+ return mt7915_tm_txbf_phase_cal(phy, val);
1381+ case MT76_TM_TXBF_ACT_PROF_UPDATE_ALL:
1382+ return mt7915_tm_txbf_profile_update_all(phy, val);
1383+ case MT76_TM_TXBF_ACT_E2P_UPDATE:
1384+ return mt7915_tm_txbf_e2p_update(phy);
1385+ default:
1386+ break;
1387+ };
1388+
1389+ return 0;
1390+}
1391+
1392 static int
developerf64861f2022-06-22 11:44:53 +08001393 mt7915_tm_set_wmm_qid(struct mt7915_phy *phy, u8 qid, u8 aifs, u8 cw_min,
developer4c6b6002022-05-30 16:36:44 +08001394- u16 cw_max, u16 txop)
1395+ u16 cw_max, u16 txop, u8 tx_cmd)
1396 {
developerf64861f2022-06-22 11:44:53 +08001397 struct mt7915_vif *mvif = (struct mt7915_vif *)phy->monitor_vif->drv_priv;
developer4c6b6002022-05-30 16:36:44 +08001398- struct mt7915_mcu_tx req = { .total = 1 };
1399+ struct mt7915_mcu_tx req = {
1400+ .valid = true,
1401+ .mode = tx_cmd,
1402+ .total = 1,
1403+ };
1404 struct edca *e = &req.edca[0];
1405
developerf64861f2022-06-22 11:44:53 +08001406 e->queue = qid + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS;
developereb6a0182022-12-12 18:53:32 +08001407@@ -263,7 +1037,8 @@ done:
developer4c6b6002022-05-30 16:36:44 +08001408
developerf64861f2022-06-22 11:44:53 +08001409 return mt7915_tm_set_wmm_qid(phy,
developer4c6b6002022-05-30 16:36:44 +08001410 mt76_connac_lmac_mapping(IEEE80211_AC_BE),
1411- aifsn, cw, cw, 0);
1412+ aifsn, cw, cw, 0,
1413+ mode == MT76_TM_TX_MODE_HE_MU);
1414 }
1415
1416 static int
developereb6a0182022-12-12 18:53:32 +08001417@@ -339,7 +1114,7 @@ mt7915_tm_set_tx_len(struct mt7915_phy *phy, u32 tx_time)
developer4c6b6002022-05-30 16:36:44 +08001418 bitrate = cfg80211_calculate_bitrate(&rate);
1419 tx_len = bitrate * tx_time / 10 / 8;
1420
1421- ret = mt76_testmode_alloc_skb(phy->mt76, tx_len);
1422+ ret = mt76_testmode_init_skb(phy->mt76, tx_len, &td->tx_skb, td->addr);
1423 if (ret)
1424 return ret;
1425
developereb6a0182022-12-12 18:53:32 +08001426@@ -458,64 +1233,227 @@ mt7915_tm_init(struct mt7915_phy *phy, bool en)
developer4c6b6002022-05-30 16:36:44 +08001427
1428 phy->mt76->test.flag |= MT_TM_FW_RX_COUNT;
1429
1430- if (!en)
1431+ if (!en) {
1432 mt7915_tm_set_tam_arb(phy, en, 0);
1433+
1434+ phy->mt76->test.aid = 0;
1435+ phy->mt76->test.tx_mpdu_len = 0;
1436+ phy->test.bf_en = 0;
1437+ mt7915_tm_set_entry(phy);
1438+ }
1439+}
1440+
1441+static bool
1442+mt7915_tm_check_skb(struct mt7915_phy *phy)
1443+{
1444+ struct mt76_testmode_entry_data *ed;
1445+ struct mt76_wcid *wcid;
1446+
1447+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
1448+ struct ieee80211_tx_info *info;
1449+
1450+ if (!ed->tx_skb)
1451+ return false;
1452+
1453+ info = IEEE80211_SKB_CB(ed->tx_skb);
1454+ info->control.vif = phy->monitor_vif;
1455+ }
1456+
1457+ return true;
1458+}
1459+
1460+static int
1461+mt7915_tm_set_ba(struct mt7915_phy *phy)
1462+{
1463+ struct mt7915_dev *dev = phy->dev;
1464+ struct mt76_testmode_data *td = &phy->mt76->test;
1465+ struct mt76_wcid *wcid;
1466+ struct ieee80211_vif *vif = phy->monitor_vif;
1467+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1468+ struct ieee80211_ampdu_params params = { .buf_size = 256 };
1469+
1470+ list_for_each_entry(wcid, &td->tm_entry_list, list) {
1471+ int tid, ret;
1472+
1473+ params.sta = wcid_to_sta(wcid);
1474+ for (tid = 0; tid < 8; tid++) {
1475+ params.tid = tid;
1476+ ret = mt7915_mcu_add_tx_ba(phy->dev, &params, true);
1477+ if (ret)
1478+ return ret;
1479+ }
1480+ }
1481+
1482+ mt76_wr(dev, MT_AGG_AALCR0(mvif->mt76.band_idx, mvif->mt76.wmm_idx),
1483+ 0x01010101);
1484+
1485+ return 0;
1486+}
1487+
1488+static int
1489+mt7915_tm_set_muru_cfg(struct mt7915_phy *phy, struct mt7915_tm_muru *muru)
1490+{
1491+/* #define MURU_SET_MANUAL_CFG 100 */
1492+ struct mt7915_dev *dev = phy->dev;
1493+ struct {
1494+ __le32 cmd;
1495+ struct mt7915_tm_muru muru;
1496+ } __packed req = {
1497+ .cmd = cpu_to_le32(MURU_SET_MANUAL_CFG),
1498+ };
1499+
1500+ memcpy(&req.muru, muru, sizeof(struct mt7915_tm_muru));
1501+
1502+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
1503+ sizeof(req), false);
1504+}
1505+
1506+static int
1507+mt7915_tm_set_muru_dl(struct mt7915_phy *phy)
1508+{
1509+ struct mt76_testmode_data *td = &phy->mt76->test;
1510+ struct mt76_testmode_entry_data *ed;
1511+ struct mt76_wcid *wcid;
1512+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
1513+ struct ieee80211_vif *vif = phy->monitor_vif;
1514+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1515+ struct mt7915_tm_muru muru = {};
1516+ struct mt7915_tm_muru_comm *comm = &muru.comm;
1517+ struct mt7915_tm_muru_dl *dl = &muru.dl;
1518+ int i;
1519+
1520+ comm->ppdu_format = MURU_PPDU_HE_MU;
1521+ comm->band = mvif->mt76.band_idx;
1522+ comm->wmm_idx = mvif->mt76.wmm_idx;
1523+ comm->spe_idx = phy->test.spe_idx;
1524+
1525+ dl->bw = mt7915_tm_chan_bw(chandef->width);
1526+ dl->gi = td->tx_rate_sgi;;
1527+ dl->ltf = td->tx_ltf;
1528+ dl->tx_mode = MT_PHY_TYPE_HE_MU;
1529+
1530+ for (i = 0; i < sizeof(dl->ru); i++)
1531+ dl->ru[i] = 0x71;
1532+
1533+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
1534+ struct mt7915_tm_muru_dl_usr *dl_usr = &dl->usr[dl->user_num];
1535+
1536+ dl_usr->wlan_idx = cpu_to_le16(wcid->idx);
1537+ dl_usr->ru_alloc_seg = ed->aid < 8 ? 0 : 1;
1538+ dl_usr->ru_idx = ed->ru_idx;
1539+ dl_usr->mcs = ed->tx_rate_idx;
1540+ dl_usr->nss = ed->tx_rate_nss - 1;
1541+ dl_usr->ldpc = ed->tx_rate_ldpc;
1542+ dl->ru[dl->user_num] = ed->ru_alloc;
1543+
1544+ dl->user_num++;
1545+ }
1546+
1547+ muru.cfg_comm = cpu_to_le32(MURU_COMM_SET);
1548+ muru.cfg_dl = cpu_to_le32(MURU_DL_SET);
1549+
1550+ return mt7915_tm_set_muru_cfg(phy, &muru);
1551+}
1552+
1553+static int
1554+mt7915_tm_set_muru_pkt_cnt(struct mt7915_phy *phy, bool enable, u32 tx_count)
1555+{
1556+#define MURU_SET_TX_PKT_CNT 105
1557+#define MURU_SET_TX_EN 106
1558+ struct mt7915_dev *dev = phy->dev;
1559+ struct {
1560+ __le32 cmd;
1561+ u8 band;
1562+ u8 enable;
1563+ u8 _rsv[2];
1564+ __le32 tx_count;
1565+ } __packed req = {
developereb6a0182022-12-12 18:53:32 +08001566+ .band = phy->mt76->band_idx,
developer4c6b6002022-05-30 16:36:44 +08001567+ .enable = enable,
1568+ .tx_count = enable ? cpu_to_le32(tx_count) : 0,
1569+ };
1570+ int ret;
1571+
1572+ req.cmd = enable ? cpu_to_le32(MURU_SET_TX_PKT_CNT) :
1573+ cpu_to_le32(MURU_SET_TX_EN);
1574+
1575+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
1576+ sizeof(req), false);
1577+ if (ret)
1578+ return ret;
1579+
1580+ req.cmd = enable ? cpu_to_le32(MURU_SET_TX_EN) :
1581+ cpu_to_le32(MURU_SET_TX_PKT_CNT);
1582+
1583+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
1584+ sizeof(req), false);
1585 }
1586
1587 static void
1588-mt7915_tm_update_channel(struct mt7915_phy *phy)
1589+mt7915_tm_tx_frames_mu(struct mt7915_phy *phy, bool enable)
1590 {
1591- mutex_unlock(&phy->dev->mt76.mutex);
1592- mt7915_set_channel(phy);
1593- mutex_lock(&phy->dev->mt76.mutex);
1594+ struct mt76_testmode_data *td = &phy->mt76->test;
1595
1596- mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
1597+ if (enable) {
1598+ struct mt7915_dev *dev = phy->dev;
1599+
1600+ mt7915_tm_set_ba(phy);
1601+ mt7915_tm_set_muru_dl(phy);
1602+ mt76_rr(dev, MT_MIB_DR8(phy != &dev->phy));
1603+ } else {
1604+ /* set to zero for counting real tx free num */
1605+ td->tx_done = 0;
1606+ }
1607+
1608+ mt7915_tm_set_muru_pkt_cnt(phy, enable, td->tx_count);
1609+ usleep_range(100000, 200000);
1610 }
1611
1612 static void
developerd59e4772022-07-14 13:48:49 +08001613 mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
1614 {
developer4c6b6002022-05-30 16:36:44 +08001615 struct mt76_testmode_data *td = &phy->mt76->test;
1616- struct mt7915_dev *dev = phy->dev;
1617- struct ieee80211_tx_info *info;
1618- u8 duty_cycle = td->tx_duty_cycle;
1619- u32 tx_time = td->tx_time;
1620- u32 ipg = td->tx_ipg;
1621
1622 mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
1623- mt7915_tm_clean_hwq(phy, dev->mt76.global_wcid.idx);
1624+ mt7915_tm_set_trx(phy, TM_MAC_TX, false);
1625
1626 if (en) {
1627- mt7915_tm_update_channel(phy);
1628+ u32 tx_time = td->tx_time, ipg = td->tx_ipg;
1629+ u8 duty_cycle = td->tx_duty_cycle;
1630+
1631+ if (!phy->test.bf_en)
1632+ mt7915_tm_update_channel(phy);
1633
developerd59e4772022-07-14 13:48:49 +08001634 if (td->tx_spe_idx)
developer4c6b6002022-05-30 16:36:44 +08001635 phy->test.spe_idx = td->tx_spe_idx;
developerd59e4772022-07-14 13:48:49 +08001636 else
1637 phy->test.spe_idx = mt76_connac_spe_idx(td->tx_antenna_mask);
developer4c6b6002022-05-30 16:36:44 +08001638- }
1639
1640- mt7915_tm_set_tam_arb(phy, en,
1641- td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU);
1642+ /* if all three params are set, duty_cycle will be ignored */
1643+ if (duty_cycle && tx_time && !ipg) {
1644+ ipg = tx_time * 100 / duty_cycle - tx_time;
1645+ } else if (duty_cycle && !tx_time && ipg) {
1646+ if (duty_cycle < 100)
1647+ tx_time = duty_cycle * ipg / (100 - duty_cycle);
1648+ }
1649
1650- /* if all three params are set, duty_cycle will be ignored */
1651- if (duty_cycle && tx_time && !ipg) {
1652- ipg = tx_time * 100 / duty_cycle - tx_time;
1653- } else if (duty_cycle && !tx_time && ipg) {
1654- if (duty_cycle < 100)
1655- tx_time = duty_cycle * ipg / (100 - duty_cycle);
1656- }
1657+ mt7915_tm_set_ipg_params(phy, ipg, td->tx_rate_mode);
1658+ mt7915_tm_set_tx_len(phy, tx_time);
1659
1660- mt7915_tm_set_ipg_params(phy, ipg, td->tx_rate_mode);
1661- mt7915_tm_set_tx_len(phy, tx_time);
1662+ if (ipg)
1663+ td->tx_queued_limit = MT76_TM_TIMEOUT * 1000000 / ipg / 2;
1664
1665- if (ipg)
1666- td->tx_queued_limit = MT76_TM_TIMEOUT * 1000000 / ipg / 2;
1667+ if (!mt7915_tm_check_skb(phy))
1668+ return;
1669+ } else {
1670+ mt7915_tm_clean_hwq(phy);
1671+ }
1672
1673- if (!en || !td->tx_skb)
1674- return;
1675+ mt7915_tm_set_tam_arb(phy, en,
1676+ td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU);
1677
1678- info = IEEE80211_SKB_CB(td->tx_skb);
1679- info->control.vif = phy->monitor_vif;
1680+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
1681+ mt7915_tm_tx_frames_mu(phy, en);
1682
1683 mt7915_tm_set_trx(phy, TM_MAC_TX, en);
1684 }
developereb6a0182022-12-12 18:53:32 +08001685@@ -544,10 +1482,6 @@ mt7915_tm_get_rx_stats(struct mt7915_phy *phy, bool clear)
developer4c6b6002022-05-30 16:36:44 +08001686 return ret;
1687
1688 rs_band = (struct mt7915_tm_rx_stat_band *)skb->data;
1689- /* pr_info("mdrdy_cnt = %d\n", le32_to_cpu(rs_band->mdrdy_cnt)); */
1690- /* pr_info("fcs_err = %d\n", le16_to_cpu(rs_band->fcs_err)); */
1691- /* pr_info("len_mismatch = %d\n", le16_to_cpu(rs_band->len_mismatch)); */
1692- /* pr_info("fcs_ok = %d\n", le16_to_cpu(rs_band->fcs_succ)); */
1693
1694 if (!clear) {
developer1d9fede2022-08-29 15:24:07 +08001695 enum mt76_rxq_id q = req.band ? MT_RXQ_BAND1 : MT_RXQ_MAIN;
developereb6a0182022-12-12 18:53:32 +08001696@@ -562,13 +1496,61 @@ mt7915_tm_get_rx_stats(struct mt7915_phy *phy, bool clear)
developer4c6b6002022-05-30 16:36:44 +08001697 return 0;
1698 }
1699
1700+static int
1701+mt7915_tm_set_rx_user_idx(struct mt7915_phy *phy, u8 aid)
1702+{
1703+ struct mt7915_dev *dev = phy->dev;
1704+ struct mt76_wcid *wcid = NULL;
1705+ struct mt76_testmode_entry_data *ed;
1706+ struct {
1707+ u8 band;
1708+ u8 _rsv;
1709+ __le16 wlan_idx;
1710+ } __packed req = {
developereb6a0182022-12-12 18:53:32 +08001711+ .band = phy->mt76->band_idx,
developer4c6b6002022-05-30 16:36:44 +08001712+ };
1713+
1714+ mt76_tm_for_each_entry(phy->mt76, wcid, ed)
1715+ if (ed->aid == aid)
1716+ break;
1717+
1718+ if (!wcid)
1719+ return -EINVAL;
1720+
1721+ req.wlan_idx = cpu_to_le16(wcid->idx);
1722+
1723+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_STAT_USER_CTRL),
1724+ &req, sizeof(req), false);
1725+}
1726+
1727+static int
1728+mt7915_tm_set_muru_aid(struct mt7915_phy *phy, u16 aid)
1729+{
1730+ struct mt7915_dev *dev = phy->dev;
1731+ struct mt7915_tm_cmd req = {
1732+ .testmode_en = 1,
1733+ .param_idx = MCU_ATE_SET_MU_RX_AID,
developereb6a0182022-12-12 18:53:32 +08001734+ .param.rx_aid.band = cpu_to_le32(phy->mt76->band_idx),
developer4c6b6002022-05-30 16:36:44 +08001735+ .param.rx_aid.aid = cpu_to_le16(aid),
1736+ };
1737+
1738+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
1739+ sizeof(req), false);
1740+}
1741+
1742 static void
1743 mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
1744 {
1745+ struct mt76_testmode_data *td = &phy->mt76->test;
1746+
1747+ mt7915_tm_set_trx(phy, TM_MAC_TX, false);
1748 mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
1749
1750 if (en) {
1751- mt7915_tm_update_channel(phy);
1752+ if (!phy->test.bf_en)
1753+ mt7915_tm_update_channel(phy);
1754+ if (td->aid)
1755+ mt7915_tm_set_rx_user_idx(phy, td->aid);
1756
1757 /* read-clear */
1758 mt7915_tm_get_rx_stats(phy, true);
developereb6a0182022-12-12 18:53:32 +08001759@@ -576,9 +1558,12 @@ mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
developer4c6b6002022-05-30 16:36:44 +08001760 /* clear fw count */
1761 mt7915_tm_set_phy_count(phy, 0);
1762 mt7915_tm_set_phy_count(phy, 1);
1763-
1764- mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
1765 }
1766+
1767+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
1768+ mt7915_tm_set_muru_aid(phy, en ? td->aid : 0xf800);
1769+
1770+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
1771 }
1772
1773 static int
developereb6a0182022-12-12 18:53:32 +08001774@@ -617,34 +1602,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
developer4c6b6002022-05-30 16:36:44 +08001775 tx_cont->tx_ant = td->tx_antenna_mask;
developereb6a0182022-12-12 18:53:32 +08001776 tx_cont->band = band;
developer144824b2022-11-25 21:27:43 +08001777
developer4c6b6002022-05-30 16:36:44 +08001778- switch (chandef->width) {
1779- case NL80211_CHAN_WIDTH_40:
1780- tx_cont->bw = CMD_CBW_40MHZ;
1781- break;
1782- case NL80211_CHAN_WIDTH_80:
1783- tx_cont->bw = CMD_CBW_80MHZ;
1784- break;
1785- case NL80211_CHAN_WIDTH_80P80:
1786- tx_cont->bw = CMD_CBW_8080MHZ;
1787- break;
1788- case NL80211_CHAN_WIDTH_160:
1789- tx_cont->bw = CMD_CBW_160MHZ;
1790- break;
1791- case NL80211_CHAN_WIDTH_5:
1792- tx_cont->bw = CMD_CBW_5MHZ;
1793- break;
1794- case NL80211_CHAN_WIDTH_10:
1795- tx_cont->bw = CMD_CBW_10MHZ;
1796- break;
1797- case NL80211_CHAN_WIDTH_20:
1798- tx_cont->bw = CMD_CBW_20MHZ;
1799- break;
1800- case NL80211_CHAN_WIDTH_20_NOHT:
1801- tx_cont->bw = CMD_CBW_20MHZ;
1802- break;
1803- default:
1804- return -EINVAL;
1805- }
1806+ tx_cont->bw = mt7915_tm_chan_bw(chandef->width);
1807
1808 if (!en) {
developereb6a0182022-12-12 18:53:32 +08001809 req.op.rf.param.func_data = cpu_to_le32(band);
1810@@ -728,6 +1686,12 @@ mt7915_tm_update_params(struct mt7915_phy *phy, u32 changed)
developer4c6b6002022-05-30 16:36:44 +08001811 mt7915_tm_set_freq_offset(phy, en, en ? td->freq_offset : 0);
1812 if (changed & BIT(TM_CHANGED_TXPOWER))
1813 mt7915_tm_set_tx_power(phy);
1814+ if (changed & BIT(TM_CHANGED_AID))
1815+ mt7915_tm_set_entry(phy);
1816+ if (changed & BIT(TM_CHANGED_CFG))
1817+ mt7915_tm_set_cfg(phy);
1818+ if (changed & BIT(TM_CHANGED_TXBF_ACT))
1819+ mt7915_tm_set_txbf(phy);
1820 }
1821
1822 static int
developereb6a0182022-12-12 18:53:32 +08001823@@ -807,6 +1771,7 @@ static int
developer4c6b6002022-05-30 16:36:44 +08001824 mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
1825 {
1826 struct mt7915_phy *phy = mphy->priv;
1827+ struct mt7915_dev *dev = phy->dev;
1828 void *rx, *rssi;
1829 int i;
1830
developereb6a0182022-12-12 18:53:32 +08001831@@ -852,11 +1817,68 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
developer4c6b6002022-05-30 16:36:44 +08001832
1833 nla_nest_end(msg, rx);
1834
1835+ if (mphy->test.tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
1836+ mphy->test.tx_done += mt76_rr(dev, MT_MIB_DR8(phy != &dev->phy));
1837+
1838 return mt7915_tm_get_rx_stats(phy, false);
1839 }
1840
1841+static int
1842+mt7915_tm_write_back_to_efuse(struct mt7915_dev *dev)
1843+{
1844+ struct mt7915_mcu_eeprom_info req = {};
1845+ u8 *eeprom = dev->mt76.eeprom.data;
1846+ int i, ret = -EINVAL;
1847+
1848+ /* prevent from damaging chip id in efuse */
1849+ if (mt76_chip(&dev->mt76) != get_unaligned_le16(eeprom))
1850+ goto out;
1851+
1852+ for (i = 0; i < mt7915_eeprom_size(dev); i += MT76_TM_EEPROM_BLOCK_SIZE) {
1853+ req.addr = cpu_to_le32(i);
1854+ memcpy(&req.data, eeprom + i, MT76_TM_EEPROM_BLOCK_SIZE);
1855+
1856+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_ACCESS),
1857+ &req, sizeof(req), true);
1858+ if (ret)
1859+ return ret;
1860+ }
1861+
1862+out:
1863+ return ret;
1864+}
1865+
1866+static int
1867+mt7915_tm_set_eeprom(struct mt76_phy *mphy, u32 offset, u8 *val, u8 action)
1868+{
1869+ struct mt7915_phy *phy = mphy->priv;
1870+ struct mt7915_dev *dev = phy->dev;
1871+ u8 *eeprom = dev->mt76.eeprom.data;
1872+ int ret = 0;
1873+
1874+ if (offset >= mt7915_eeprom_size(dev))
1875+ return -EINVAL;
1876+
1877+ switch (action) {
1878+ case MT76_TM_EEPROM_ACTION_UPDATE_DATA:
1879+ memcpy(eeprom + offset, val, MT76_TM_EEPROM_BLOCK_SIZE);
1880+ break;
1881+ case MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE:
1882+ ret = mt7915_mcu_set_eeprom(dev, true);
1883+ break;
1884+ case MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE:
1885+ ret = mt7915_tm_write_back_to_efuse(dev);
1886+ break;
1887+ default:
1888+ break;
1889+ }
1890+
1891+ return ret;
1892+}
1893+
1894 const struct mt76_testmode_ops mt7915_testmode_ops = {
1895 .set_state = mt7915_tm_set_state,
1896 .set_params = mt7915_tm_set_params,
1897 .dump_stats = mt7915_tm_dump_stats,
1898+ .set_eeprom = mt7915_tm_set_eeprom,
1899 };
1900diff --git a/mt7915/testmode.h b/mt7915/testmode.h
developer81ca9d62022-10-14 11:23:22 +08001901index a1c54c89..01b08e9e 100644
developer4c6b6002022-05-30 16:36:44 +08001902--- a/mt7915/testmode.h
1903+++ b/mt7915/testmode.h
1904@@ -4,6 +4,8 @@
1905 #ifndef __MT7915_TESTMODE_H
1906 #define __MT7915_TESTMODE_H
1907
1908+#include "mcu.h"
1909+
1910 struct mt7915_tm_trx {
1911 u8 type;
1912 u8 enable;
1913@@ -39,6 +41,11 @@ struct mt7915_tm_cfg {
1914 u8 _rsv[2];
1915 };
1916
1917+struct mt7915_tm_mu_rx_aid {
1918+ __le32 band;
1919+ __le16 aid;
1920+};
1921+
1922 struct mt7915_tm_cmd {
1923 u8 testmode_en;
1924 u8 param_idx;
1925@@ -50,6 +57,7 @@ struct mt7915_tm_cmd {
1926 struct mt7915_tm_slot_time slot;
1927 struct mt7915_tm_clean_txq clean;
1928 struct mt7915_tm_cfg cfg;
1929+ struct mt7915_tm_mu_rx_aid rx_aid;
1930 u8 test[72];
1931 } param;
1932 } __packed;
1933@@ -109,6 +117,16 @@ enum {
1934 TAM_ARB_OP_MODE_FORCE_SU = 5,
1935 };
1936
1937+enum {
1938+ TM_CBW_20MHZ,
1939+ TM_CBW_40MHZ,
1940+ TM_CBW_80MHZ,
1941+ TM_CBW_10MHZ,
1942+ TM_CBW_5MHZ,
1943+ TM_CBW_160MHZ,
1944+ TM_CBW_8080MHZ,
1945+};
1946+
1947 struct mt7915_tm_rx_stat_band {
1948 u8 category;
1949
1950@@ -130,4 +148,264 @@ struct mt7915_tm_rx_stat_band {
1951 __le16 mdrdy_cnt_ofdm;
1952 };
1953
1954+struct mt7915_tm_muru_comm {
1955+ u8 ppdu_format;
1956+ u8 sch_type;
1957+ u8 band;
1958+ u8 wmm_idx;
1959+ u8 spe_idx;
1960+ u8 proc_type;
1961+};
1962+
1963+struct mt7915_tm_muru_dl_usr {
1964+ __le16 wlan_idx;
1965+ u8 ru_alloc_seg;
1966+ u8 ru_idx;
1967+ u8 ldpc;
1968+ u8 nss;
1969+ u8 mcs;
1970+ u8 mu_group_idx;
1971+ u8 vht_groud_id;
1972+ u8 vht_up;
1973+ u8 he_start_stream;
1974+ u8 he_mu_spatial;
1975+ u8 ack_policy;
1976+ __le16 tx_power_alpha;
1977+};
1978+
1979+struct mt7915_tm_muru_dl {
1980+ u8 user_num;
1981+ u8 tx_mode;
1982+ u8 bw;
1983+ u8 gi;
1984+ u8 ltf;
1985+ /* sigB */
1986+ u8 mcs;
1987+ u8 dcm;
1988+ u8 cmprs;
1989+
1990+ u8 tx_power;
1991+ u8 ru[8];
1992+ u8 c26[2];
1993+ u8 ack_policy;
1994+
1995+ struct mt7915_tm_muru_dl_usr usr[16];
1996+};
1997+
1998+struct mt7915_tm_muru_ul_usr {
1999+ __le16 wlan_idx;
2000+ u8 ru_alloc;
2001+ u8 ru_idx;
2002+ u8 ldpc;
2003+ u8 nss;
2004+ u8 mcs;
2005+ u8 target_rssi;
2006+ __le32 trig_pkt_size;
2007+};
2008+
2009+struct mt7915_tm_muru_ul {
2010+ u8 user_num;
2011+
2012+ /* UL TX */
2013+ u8 trig_type;
2014+ __le16 trig_cnt;
2015+ __le16 trig_intv;
2016+ u8 bw;
2017+ u8 gi_ltf;
2018+ __le16 ul_len;
2019+ u8 pad;
2020+ u8 trig_ta[ETH_ALEN];
2021+ u8 ru[8];
2022+ u8 c26[2];
2023+
2024+ struct mt7915_tm_muru_ul_usr usr[16];
2025+ /* HE TB RX Debug */
2026+ __le32 rx_hetb_nonsf_en_bitmap;
2027+ __le32 rx_hetb_cfg[2];
2028+
2029+ /* DL TX */
2030+ u8 ba_type;
2031+};
2032+
2033+struct mt7915_tm_muru {
2034+ __le32 cfg_comm;
2035+ __le32 cfg_dl;
2036+ __le32 cfg_ul;
2037+
2038+ struct mt7915_tm_muru_comm comm;
2039+ struct mt7915_tm_muru_dl dl;
2040+ struct mt7915_tm_muru_ul ul;
2041+};
2042+
2043+#define MURU_PPDU_HE_MU BIT(3)
2044+
2045+/* Common Config */
2046+/* #define MURU_COMM_PPDU_FMT BIT(0) */
2047+/* #define MURU_COMM_SCH_TYPE BIT(1) */
2048+/* #define MURU_COMM_BAND BIT(2) */
2049+/* #define MURU_COMM_WMM BIT(3) */
2050+/* #define MURU_COMM_SPE_IDX BIT(4) */
2051+/* #define MURU_COMM_PROC_TYPE BIT(5) */
2052+/* #define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_BAND | \ */
2053+/* MURU_COMM_WMM | MURU_COMM_SPE_IDX) */
2054+/* DL Config */
2055+#define MURU_DL_BW BIT(0)
2056+#define MURU_DL_GI BIT(1)
2057+#define MURU_DL_TX_MODE BIT(2)
2058+#define MURU_DL_TONE_PLAN BIT(3)
2059+#define MURU_DL_USER_CNT BIT(4)
2060+#define MURU_DL_LTF BIT(5)
2061+#define MURU_DL_SIGB_MCS BIT(6)
2062+#define MURU_DL_SIGB_DCM BIT(7)
2063+#define MURU_DL_SIGB_CMPRS BIT(8)
2064+#define MURU_DL_ACK_POLICY BIT(9)
2065+#define MURU_DL_TXPOWER BIT(10)
2066+/* DL Per User Config */
2067+#define MURU_DL_USER_WLAN_ID BIT(16)
2068+#define MURU_DL_USER_COD BIT(17)
2069+#define MURU_DL_USER_MCS BIT(18)
2070+#define MURU_DL_USER_NSS BIT(19)
2071+#define MURU_DL_USER_RU_ALLOC BIT(20)
2072+#define MURU_DL_USER_MUMIMO_GRP BIT(21)
2073+#define MURU_DL_USER_MUMIMO_VHT BIT(22)
2074+#define MURU_DL_USER_ACK_POLICY BIT(23)
2075+#define MURU_DL_USER_MUMIMO_HE BIT(24)
2076+#define MURU_DL_USER_PWR_ALPHA BIT(25)
2077+#define MURU_DL_SET (GENMASK(7, 0) | GENMASK(20, 16) | BIT(25))
2078+
2079+#define MAX_PHASE_GROUP_NUM 9
2080+
2081+struct mt7915_tm_txbf_phase {
2082+ u8 status;
2083+ struct {
2084+ u8 r0_uh;
2085+ u8 r0_h;
2086+ u8 r0_m;
2087+ u8 r0_l;
2088+ u8 r0_ul;
2089+ u8 r1_uh;
2090+ u8 r1_h;
2091+ u8 r1_m;
2092+ u8 r1_l;
2093+ u8 r1_ul;
2094+ u8 r2_uh;
2095+ u8 r2_h;
2096+ u8 r2_m;
2097+ u8 r2_l;
2098+ u8 r2_ul;
2099+ u8 r3_uh;
2100+ u8 r3_h;
2101+ u8 r3_m;
2102+ u8 r3_l;
2103+ u8 r3_ul;
2104+ u8 r2_uh_sx2;
2105+ u8 r2_h_sx2;
2106+ u8 r2_m_sx2;
2107+ u8 r2_l_sx2;
2108+ u8 r2_ul_sx2;
2109+ u8 r3_uh_sx2;
2110+ u8 r3_h_sx2;
2111+ u8 r3_m_sx2;
2112+ u8 r3_l_sx2;
2113+ u8 r3_ul_sx2;
2114+ u8 m_t0_h;
2115+ u8 m_t1_h;
2116+ u8 m_t2_h;
2117+ u8 m_t2_h_sx2;
2118+ u8 r0_reserved;
2119+ u8 r1_reserved;
2120+ u8 r2_reserved;
2121+ u8 r3_reserved;
2122+ u8 r2_sx2_reserved;
2123+ u8 r3_sx2_reserved;
2124+ } phase;
2125+};
2126+
2127+struct mt7915_tm_pfmu_tag1 {
2128+ __le32 pfmu_idx:10;
2129+ __le32 ebf:1;
2130+ __le32 data_bw:2;
2131+ __le32 lm:2;
2132+ __le32 is_mu:1;
2133+ __le32 nr:3, nc:3;
2134+ __le32 codebook:2;
2135+ __le32 ngroup:2;
2136+ __le32 _rsv:2;
2137+ __le32 invalid_prof:1;
2138+ __le32 rmsd:3;
2139+
2140+ __le32 col_id1:6, row_id1:10;
2141+ __le32 col_id2:6, row_id2:10;
2142+ __le32 col_id3:6, row_id3:10;
2143+ __le32 col_id4:6, row_id4:10;
2144+
2145+ __le32 ru_start_id:7;
2146+ __le32 _rsv1:1;
2147+ __le32 ru_end_id:7;
2148+ __le32 _rsv2:1;
2149+ __le32 mob_cal_en:1;
2150+ __le32 _rsv3:15;
2151+
2152+ __le32 snr_sts0:8, snr_sts1:8, snr_sts2:8, snr_sts3:8;
2153+ __le32 snr_sts4:8, snr_sts5:8, snr_sts6:8, snr_sts7:8;
2154+
2155+ __le32 _rsv4;
2156+} __packed;
2157+
2158+struct mt7915_tm_pfmu_tag2 {
2159+ __le32 smart_ant:24;
2160+ __le32 se_idx:5;
2161+ __le32 _rsv:3;
2162+
2163+ __le32 _rsv1:8;
2164+ __le32 rmsd_thres:3;
2165+ __le32 _rsv2:5;
2166+ __le32 ibf_timeout:8;
2167+ __le32 _rsv3:8;
2168+
2169+ __le32 _rsv4:16;
2170+ __le32 ibf_data_bw:2;
2171+ __le32 ibf_nc:3;
2172+ __le32 ibf_nr:3;
2173+ __le32 ibf_ru:8;
2174+
2175+ __le32 mob_delta_t:8;
2176+ __le32 mob_lq_result:7;
2177+ __le32 _rsv5:1;
2178+ __le32 _rsv6:16;
2179+
2180+ __le32 _rsv7;
2181+} __packed;
2182+
2183+struct mt7915_tm_pfmu_tag {
2184+ struct mt7915_tm_pfmu_tag1 t1;
2185+ struct mt7915_tm_pfmu_tag2 t2;
2186+};
2187+
2188+struct mt7915_tm_pfmu_data {
2189+ __le16 subc_idx;
2190+ __le16 phi11;
2191+ __le16 phi21;
2192+ __le16 phi31;
2193+};
2194+
2195+struct mt7915_tm_ibf_cal_info {
2196+ u8 format_id;
2197+ u8 group_l_m_n;
2198+ u8 group;
2199+ bool sx2;
2200+ u8 status;
2201+ u8 cal_type;
2202+ u8 _rsv[2];
2203+ u8 buf[1000];
2204+} __packed;
2205+
2206+enum {
2207+ IBF_PHASE_CAL_UNSPEC,
2208+ IBF_PHASE_CAL_NORMAL,
2209+ IBF_PHASE_CAL_VERIFY,
2210+ IBF_PHASE_CAL_NORMAL_INSTRUMENT,
2211+ IBF_PHASE_CAL_VERIFY_INSTRUMENT,
2212+};
2213+
2214 #endif
2215diff --git a/testmode.c b/testmode.c
developer81ca9d62022-10-14 11:23:22 +08002216index 1d0d5d30..7a9ed543 100644
developer4c6b6002022-05-30 16:36:44 +08002217--- a/testmode.c
2218+++ b/testmode.c
developere9954402022-07-12 10:15:11 -07002219@@ -27,28 +27,16 @@ const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
developer4c6b6002022-05-30 16:36:44 +08002220 };
2221 EXPORT_SYMBOL_GPL(mt76_tm_policy);
2222
2223-void mt76_testmode_tx_pending(struct mt76_phy *phy)
2224+static void
2225+mt76_testmode_queue_tx(struct mt76_phy *phy, struct mt76_wcid *wcid,
developerf1b69ea2022-07-04 10:54:39 +08002226+ struct sk_buff *skb, struct mt76_queue *q, int qid,
2227+ u16 limit)
developer4c6b6002022-05-30 16:36:44 +08002228 {
2229 struct mt76_testmode_data *td = &phy->test;
2230 struct mt76_dev *dev = phy->dev;
2231- struct mt76_wcid *wcid = &dev->global_wcid;
2232- struct sk_buff *skb = td->tx_skb;
2233- struct mt76_queue *q;
2234- u16 tx_queued_limit;
2235- int qid;
2236-
2237- if (!skb || !td->tx_pending)
2238- return;
2239+ u16 count = limit;
2240
2241- qid = skb_get_queue_mapping(skb);
2242- q = phy->q_tx[qid];
2243-
2244- tx_queued_limit = td->tx_queued_limit ? td->tx_queued_limit : 1000;
2245-
2246- spin_lock_bh(&q->lock);
2247-
2248- while (td->tx_pending > 0 &&
2249- td->tx_queued - td->tx_done < tx_queued_limit &&
2250+ while (td->tx_pending > 0 && count &&
2251 q->queued < q->ndesc / 2) {
2252 int ret;
2253
developere9954402022-07-12 10:15:11 -07002254@@ -57,13 +45,65 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002255 if (ret < 0)
2256 break;
2257
2258+ count--;
2259 td->tx_pending--;
2260 td->tx_queued++;
2261+
2262+ if (td->tx_rate_mode != MT76_TM_TX_MODE_HE_MU)
2263+ if (td->tx_queued - td->tx_done >= limit)
2264+ break;
2265 }
2266
2267 dev->queue_ops->kick(dev, q);
2268+}
2269+
2270+void mt76_testmode_tx_pending(struct mt76_phy *phy)
2271+{
2272+ struct mt76_testmode_data *td = &phy->test;
2273+ struct mt76_testmode_entry_data *ed;
2274+ struct mt76_queue *q;
2275+ int qid;
2276+ u16 tx_queued_limit;
2277+ u32 remain;
2278+ bool is_mu;
2279+
2280+ if (!td->tx_pending)
2281+ return;
2282+
2283+ /* tx_queued_limit = td->tx_queued_limit ?: 100; */
2284+ tx_queued_limit = 100;
2285+
2286+ if (!td->aid) {
2287+ qid = skb_get_queue_mapping(td->tx_skb);
2288+ q = phy->q_tx[qid];
2289+ spin_lock_bh(&q->lock);
2290+ mt76_testmode_queue_tx(phy, &phy->dev->global_wcid,
developerf1b69ea2022-07-04 10:54:39 +08002291+ td->tx_skb, q, qid, tx_queued_limit);
developer4c6b6002022-05-30 16:36:44 +08002292+ spin_unlock_bh(&q->lock);
2293+
2294+ return;
2295+ }
2296+
2297+ is_mu = td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU;
2298+ ed = mt76_testmode_entry_data(phy, td->cur_entry);
2299+ qid = skb_get_queue_mapping(ed->tx_skb);
2300+ q = phy->q_tx[qid];
2301+
2302+ spin_lock_bh(&q->lock);
2303+
2304+ remain = is_mu ? 1 : (td->tx_pending % td->tx_count) ?: td->tx_count;
2305+ if (remain < tx_queued_limit)
2306+ tx_queued_limit = remain;
2307+
developerf1b69ea2022-07-04 10:54:39 +08002308+ mt76_testmode_queue_tx(phy, td->cur_entry, ed->tx_skb, q, qid, tx_queued_limit);
developer4c6b6002022-05-30 16:36:44 +08002309+
2310+ if (td->tx_pending % td->tx_count == 0 || is_mu)
2311+ td->cur_entry = list_next_entry(td->cur_entry, list);
2312
2313 spin_unlock_bh(&q->lock);
2314+
2315+ if (is_mu && td->tx_pending)
2316+ mt76_worker_schedule(&phy->dev->tx_worker);
2317 }
2318
2319 static u32
developere9954402022-07-12 10:15:11 -07002320@@ -89,15 +129,31 @@ mt76_testmode_max_mpdu_len(struct mt76_phy *phy, u8 tx_rate_mode)
developer4c6b6002022-05-30 16:36:44 +08002321 }
2322
2323 static void
2324-mt76_testmode_free_skb(struct mt76_phy *phy)
2325+mt76_testmode_free_skb(struct sk_buff **tx_skb)
2326+{
2327+ if (!(*tx_skb))
2328+ return;
2329+
2330+ dev_kfree_skb(*tx_skb);
2331+ *tx_skb = NULL;
2332+}
2333+
2334+static void
2335+mt76_testmode_free_skb_all(struct mt76_phy *phy)
2336 {
2337 struct mt76_testmode_data *td = &phy->test;
2338+ struct mt76_testmode_entry_data *ed = &td->ed;
2339+ struct mt76_wcid *wcid;
2340+
2341+ mt76_testmode_free_skb(&ed->tx_skb);
2342
2343- dev_kfree_skb(td->tx_skb);
2344- td->tx_skb = NULL;
2345+ mt76_tm_for_each_entry(phy, wcid, ed)
2346+ mt76_testmode_free_skb(&ed->tx_skb);
2347 }
2348
2349-int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
2350+static int
2351+mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len,
2352+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN])
2353 {
2354 #define MT_TXP_MAX_LEN 4095
2355 u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
developer1d9fede2022-08-29 15:24:07 +08002356@@ -118,7 +174,8 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer4c6b6002022-05-30 16:36:44 +08002357 nfrags = len / MT_TXP_MAX_LEN;
2358 head_len = nfrags ? MT_TXP_MAX_LEN : len;
2359
2360- if (len > IEEE80211_MAX_FRAME_LEN)
2361+ if (len > IEEE80211_MAX_FRAME_LEN ||
2362+ td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
2363 fc |= IEEE80211_STYPE_QOS_DATA;
2364
2365 head = alloc_skb(head_len, GFP_KERNEL);
developer1d9fede2022-08-29 15:24:07 +08002366@@ -127,9 +184,9 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer4c6b6002022-05-30 16:36:44 +08002367
developere9954402022-07-12 10:15:11 -07002368 hdr = __skb_put_zero(head, sizeof(*hdr));
developer4c6b6002022-05-30 16:36:44 +08002369 hdr->frame_control = cpu_to_le16(fc);
2370- memcpy(hdr->addr1, td->addr[0], ETH_ALEN);
2371- memcpy(hdr->addr2, td->addr[1], ETH_ALEN);
2372- memcpy(hdr->addr3, td->addr[2], ETH_ALEN);
2373+ memcpy(hdr->addr1, addr[0], ETH_ALEN);
2374+ memcpy(hdr->addr2, addr[1], ETH_ALEN);
2375+ memcpy(hdr->addr3, addr[2], ETH_ALEN);
2376 skb_set_queue_mapping(head, IEEE80211_AC_BE);
developere9954402022-07-12 10:15:11 -07002377 get_random_bytes(__skb_put(head, head_len - sizeof(*hdr)),
2378 head_len - sizeof(*hdr));
developer1d9fede2022-08-29 15:24:07 +08002379@@ -153,7 +210,7 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer4c6b6002022-05-30 16:36:44 +08002380
2381 frag = alloc_skb(frag_len, GFP_KERNEL);
2382 if (!frag) {
2383- mt76_testmode_free_skb(phy);
2384+ mt76_testmode_free_skb(tx_skb);
2385 dev_kfree_skb(head);
2386 return -ENOMEM;
2387 }
developer1d9fede2022-08-29 15:24:07 +08002388@@ -166,15 +223,14 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer4c6b6002022-05-30 16:36:44 +08002389 frag_tail = &(*frag_tail)->next;
2390 }
2391
2392- mt76_testmode_free_skb(phy);
2393- td->tx_skb = head;
2394+ mt76_testmode_free_skb(tx_skb);
2395+ *tx_skb = head;
2396
2397 return 0;
2398 }
2399-EXPORT_SYMBOL(mt76_testmode_alloc_skb);
2400
2401-static int
2402-mt76_testmode_tx_init(struct mt76_phy *phy)
2403+int mt76_testmode_init_skb(struct mt76_phy *phy, u32 len,
2404+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN])
2405 {
2406 struct mt76_testmode_data *td = &phy->test;
2407 struct ieee80211_tx_info *info;
developer1d9fede2022-08-29 15:24:07 +08002408@@ -182,7 +238,7 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002409 u8 max_nss = hweight8(phy->antenna_mask);
2410 int ret;
2411
2412- ret = mt76_testmode_alloc_skb(phy, td->tx_mpdu_len);
2413+ ret = mt76_testmode_alloc_skb(phy, len, tx_skb, addr);
2414 if (ret)
2415 return ret;
2416
developer1d9fede2022-08-29 15:24:07 +08002417@@ -192,7 +248,7 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002418 if (td->tx_antenna_mask)
2419 max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask));
2420
2421- info = IEEE80211_SKB_CB(td->tx_skb);
2422+ info = IEEE80211_SKB_CB(*tx_skb);
2423 rate = &info->control.rates[0];
2424 rate->count = 1;
2425 rate->idx = td->tx_rate_idx;
developer1d9fede2022-08-29 15:24:07 +08002426@@ -264,6 +320,25 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002427 out:
2428 return 0;
2429 }
2430+EXPORT_SYMBOL(mt76_testmode_init_skb);
2431+
2432+static int
2433+mt76_testmode_tx_init(struct mt76_phy *phy)
2434+{
2435+ struct mt76_testmode_entry_data *ed;
2436+ struct mt76_wcid *wcid;
2437+
2438+ mt76_tm_for_each_entry(phy, wcid, ed) {
2439+ int ret;
2440+
2441+ ret = mt76_testmode_init_skb(phy, ed->tx_mpdu_len,
2442+ &ed->tx_skb, ed->addr);
2443+ if (ret)
2444+ return ret;
2445+ }
2446+
2447+ return 0;
2448+}
2449
2450 static void
2451 mt76_testmode_tx_start(struct mt76_phy *phy)
developer1d9fede2022-08-29 15:24:07 +08002452@@ -274,6 +349,14 @@ mt76_testmode_tx_start(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002453 td->tx_queued = 0;
2454 td->tx_done = 0;
2455 td->tx_pending = td->tx_count;
2456+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
2457+ td->tx_pending = 1;
2458+ if (td->entry_num) {
2459+ td->tx_pending *= td->entry_num;
2460+ td->cur_entry = list_first_entry(&td->tm_entry_list,
2461+ struct mt76_wcid, list);
2462+ }
2463+
2464 mt76_worker_schedule(&dev->tx_worker);
2465 }
2466
developer1d9fede2022-08-29 15:24:07 +08002467@@ -292,7 +375,7 @@ mt76_testmode_tx_stop(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002468 wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued,
2469 MT76_TM_TIMEOUT * HZ);
2470
2471- mt76_testmode_free_skb(phy);
2472+ mt76_testmode_free_skb_all(phy);
2473 }
2474
2475 static inline void
developer1d9fede2022-08-29 15:24:07 +08002476@@ -323,6 +406,8 @@ mt76_testmode_init_defaults(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002477 memcpy(td->addr[0], phy->macaddr, ETH_ALEN);
2478 memcpy(td->addr[1], phy->macaddr, ETH_ALEN);
2479 memcpy(td->addr[2], phy->macaddr, ETH_ALEN);
2480+
2481+ INIT_LIST_HEAD(&phy->test.tm_entry_list);
2482 }
2483
2484 static int
developer1d9fede2022-08-29 15:24:07 +08002485@@ -332,8 +417,12 @@ __mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state)
developer4c6b6002022-05-30 16:36:44 +08002486 struct mt76_dev *dev = phy->dev;
2487 int err;
2488
2489- if (prev_state == MT76_TM_STATE_TX_FRAMES)
2490+ if (prev_state == MT76_TM_STATE_TX_FRAMES) {
2491+ /* MU needs to clean hwq for free done event */
2492+ if (phy->test.tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
2493+ dev->test_ops->set_state(phy, MT76_TM_STATE_IDLE);
2494 mt76_testmode_tx_stop(phy);
2495+ }
2496
2497 if (state == MT76_TM_STATE_TX_FRAMES) {
2498 err = mt76_testmode_tx_init(phy);
developer1d9fede2022-08-29 15:24:07 +08002499@@ -403,6 +492,44 @@ mt76_tm_get_u8(struct nlattr *attr, u8 *dest, u8 min, u8 max)
developer4c6b6002022-05-30 16:36:44 +08002500 return 0;
2501 }
2502
2503+static int
2504+mt76_testmode_set_eeprom(struct mt76_phy *phy, struct nlattr **tb)
2505+{
2506+ struct mt76_dev *dev = phy->dev;
2507+ u8 action, val[MT76_TM_EEPROM_BLOCK_SIZE];
2508+ u32 offset = 0;
2509+ int err = -EINVAL;
2510+
2511+ if (!dev->test_ops->set_eeprom)
2512+ return -EOPNOTSUPP;
2513+
2514+ if (mt76_tm_get_u8(tb[MT76_TM_ATTR_EEPROM_ACTION], &action,
2515+ 0, MT76_TM_EEPROM_ACTION_MAX))
2516+ goto out;
2517+
2518+ if (tb[MT76_TM_ATTR_EEPROM_OFFSET]) {
2519+ struct nlattr *cur;
2520+ int rem, idx = 0;
2521+
2522+ offset = nla_get_u32(tb[MT76_TM_ATTR_EEPROM_OFFSET]);
2523+ if (!!(offset % MT76_TM_EEPROM_BLOCK_SIZE) ||
2524+ !tb[MT76_TM_ATTR_EEPROM_VAL])
2525+ goto out;
2526+
2527+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_EEPROM_VAL], rem) {
2528+ if (nla_len(cur) != 1 || idx >= ARRAY_SIZE(val))
2529+ goto out;
2530+
2531+ val[idx++] = nla_get_u8(cur);
2532+ }
2533+ }
2534+
2535+ err = dev->test_ops->set_eeprom(phy, offset, val, action);
2536+
2537+out:
2538+ return err;
2539+}
2540+
2541 int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2542 void *data, int len)
2543 {
developer1d9fede2022-08-29 15:24:07 +08002544@@ -426,6 +553,11 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer4c6b6002022-05-30 16:36:44 +08002545
2546 mutex_lock(&dev->mutex);
2547
2548+ if (tb[MT76_TM_ATTR_EEPROM_ACTION]) {
2549+ err = mt76_testmode_set_eeprom(phy, tb);
2550+ goto out;
2551+ }
2552+
2553 if (tb[MT76_TM_ATTR_RESET]) {
2554 mt76_testmode_set_state(phy, MT76_TM_STATE_OFF);
2555 memset(td, 0, sizeof(*td));
developer5ce5ea42022-08-31 14:12:29 +08002556@@ -452,7 +584,10 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer4c6b6002022-05-30 16:36:44 +08002557 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_DUTY_CYCLE],
2558 &td->tx_duty_cycle, 0, 99) ||
2559 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_POWER_CONTROL],
2560- &td->tx_power_control, 0, 1))
2561+ &td->tx_power_control, 0, 1) ||
2562+ mt76_tm_get_u8(tb[MT76_TM_ATTR_AID], &td->aid, 0, 16) ||
2563+ mt76_tm_get_u8(tb[MT76_TM_ATTR_RU_ALLOC], &td->ru_alloc, 0, 0xff) ||
2564+ mt76_tm_get_u8(tb[MT76_TM_ATTR_RU_IDX], &td->ru_idx, 0, 68))
2565 goto out;
2566
2567 if (tb[MT76_TM_ATTR_TX_LENGTH]) {
developer5ce5ea42022-08-31 14:12:29 +08002568@@ -484,8 +619,7 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer4c6b6002022-05-30 16:36:44 +08002569
2570 if (tb[MT76_TM_ATTR_TX_POWER]) {
2571 struct nlattr *cur;
2572- int idx = 0;
2573- int rem;
2574+ int rem, idx = 0;
2575
2576 nla_for_each_nested(cur, tb[MT76_TM_ATTR_TX_POWER], rem) {
2577 if (nla_len(cur) != 1 ||
developer5ce5ea42022-08-31 14:12:29 +08002578@@ -505,11 +639,45 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer4c6b6002022-05-30 16:36:44 +08002579 if (nla_len(cur) != ETH_ALEN || idx >= 3)
2580 goto out;
2581
2582- memcpy(td->addr[idx], nla_data(cur), ETH_ALEN);
2583+ memcpy(td->addr[idx++], nla_data(cur), ETH_ALEN);
2584+ }
2585+ }
2586+
2587+ if (tb[MT76_TM_ATTR_CFG]) {
2588+ struct nlattr *cur;
2589+ int rem, idx = 0;
2590+
2591+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_CFG], rem) {
2592+ if (nla_len(cur) != 1 || idx >= 2)
2593+ goto out;
2594+
2595+ if (idx == 0)
2596+ td->cfg.type = nla_get_u8(cur);
2597+ else
2598+ td->cfg.enable = nla_get_u8(cur);
2599 idx++;
2600 }
2601 }
2602
2603+ if (tb[MT76_TM_ATTR_TXBF_ACT]) {
2604+ struct nlattr *cur;
2605+ int rem, idx = 0;
2606+
2607+ if (!tb[MT76_TM_ATTR_TXBF_PARAM] ||
2608+ mt76_tm_get_u8(tb[MT76_TM_ATTR_TXBF_ACT], &td->txbf_act,
2609+ 0, MT76_TM_TXBF_ACT_MAX))
2610+ goto out;
2611+
2612+ memset(td->txbf_param, 0, sizeof(td->txbf_param));
2613+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_TXBF_PARAM], rem) {
2614+ if (nla_len(cur) != 2 ||
2615+ idx >= ARRAY_SIZE(td->txbf_param))
2616+ goto out;
2617+
2618+ td->txbf_param[idx++] = nla_get_u16(cur);
2619+ }
2620+ }
2621+
2622 if (dev->test_ops->set_params) {
2623 err = dev->test_ops->set_params(phy, tb, state);
2624 if (err)
developer5ce5ea42022-08-31 14:12:29 +08002625@@ -574,6 +742,7 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer4c6b6002022-05-30 16:36:44 +08002626 struct mt76_phy *phy = hw->priv;
2627 struct mt76_dev *dev = phy->dev;
2628 struct mt76_testmode_data *td = &phy->test;
2629+ struct mt76_testmode_entry_data *ed = &td->ed;
2630 struct nlattr *tb[NUM_MT76_TM_ATTRS] = {};
2631 int err = 0;
2632 void *a;
developer5ce5ea42022-08-31 14:12:29 +08002633@@ -606,6 +775,19 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer4c6b6002022-05-30 16:36:44 +08002634 goto out;
2635 }
2636
2637+ if (tb[MT76_TM_ATTR_AID]) {
2638+ struct mt76_wcid *wcid;
2639+ u8 aid;
2640+
2641+ err = mt76_tm_get_u8(tb[MT76_TM_ATTR_AID], &aid, 1, 16);
2642+ if (err)
2643+ goto out;
2644+
2645+ mt76_tm_for_each_entry(phy, wcid, ed)
2646+ if (ed->aid == aid)
2647+ ed = mt76_testmode_entry_data(phy, wcid);
2648+ }
2649+
2650 mt76_testmode_init_defaults(phy);
2651
2652 err = -EMSGSIZE;
developer5ce5ea42022-08-31 14:12:29 +08002653@@ -618,12 +800,8 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer4c6b6002022-05-30 16:36:44 +08002654 goto out;
2655
2656 if (nla_put_u32(msg, MT76_TM_ATTR_TX_COUNT, td->tx_count) ||
2657- nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_mpdu_len) ||
2658 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_MODE, td->tx_rate_mode) ||
2659- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, td->tx_rate_nss) ||
2660- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, td->tx_rate_idx) ||
2661 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_SGI, td->tx_rate_sgi) ||
2662- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, td->tx_rate_ldpc) ||
2663 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_STBC, td->tx_rate_stbc) ||
2664 (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_LTF) &&
2665 nla_put_u8(msg, MT76_TM_ATTR_TX_LTF, td->tx_ltf)) ||
developer5ce5ea42022-08-31 14:12:29 +08002666@@ -643,6 +821,15 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer4c6b6002022-05-30 16:36:44 +08002667 nla_put_u8(msg, MT76_TM_ATTR_FREQ_OFFSET, td->freq_offset)))
2668 goto out;
2669
2670+ if (nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, ed->tx_mpdu_len) ||
2671+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, ed->tx_rate_nss) ||
2672+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, ed->tx_rate_idx) ||
2673+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, ed->tx_rate_ldpc) ||
2674+ nla_put_u8(msg, MT76_TM_ATTR_AID, ed->aid) ||
2675+ nla_put_u8(msg, MT76_TM_ATTR_RU_ALLOC, ed->ru_alloc) ||
2676+ nla_put_u8(msg, MT76_TM_ATTR_RU_IDX, ed->ru_idx))
2677+ goto out;
2678+
2679 if (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER)) {
2680 a = nla_nest_start(msg, MT76_TM_ATTR_TX_POWER);
2681 if (!a)
2682diff --git a/testmode.h b/testmode.h
developer81ca9d62022-10-14 11:23:22 +08002683index 89613266..57949f2b 100644
developer4c6b6002022-05-30 16:36:44 +08002684--- a/testmode.h
2685+++ b/testmode.h
2686@@ -6,6 +6,8 @@
2687 #define __MT76_TESTMODE_H
2688
2689 #define MT76_TM_TIMEOUT 10
2690+#define MT76_TM_MAX_ENTRY_NUM 16
2691+#define MT76_TM_EEPROM_BLOCK_SIZE 16
2692
2693 /**
2694 * enum mt76_testmode_attr - testmode attributes inside NL80211_ATTR_TESTDATA
2695@@ -47,6 +49,15 @@
2696 * @MT76_TM_ATTR_DRV_DATA: driver specific netlink attrs (nested)
2697 *
2698 * @MT76_TM_ATTR_MAC_ADDRS: array of nested MAC addresses (nested)
2699+ *
2700+ * @MT76_TM_ATTR_EEPROM_ACTION: eeprom setting actions
2701+ * (u8, see &enum mt76_testmode_eeprom_action)
2702+ * @MT76_TM_ATTR_EEPROM_OFFSET: offset of eeprom data block for writing (u32)
2703+ * @MT76_TM_ATTR_EEPROM_VAL: values for writing into a 16-byte data block
2704+ * (nested, u8 attrs)
2705+ *
2706+ * @MT76_TM_ATTR_CFG: config testmode rf feature (nested, see &mt76_testmode_cfg)
2707+ *
2708 */
2709 enum mt76_testmode_attr {
2710 MT76_TM_ATTR_UNSPEC,
2711@@ -84,6 +95,17 @@ enum mt76_testmode_attr {
2712 MT76_TM_ATTR_DRV_DATA,
2713
2714 MT76_TM_ATTR_MAC_ADDRS,
2715+ MT76_TM_ATTR_AID,
2716+ MT76_TM_ATTR_RU_ALLOC,
2717+ MT76_TM_ATTR_RU_IDX,
2718+
2719+ MT76_TM_ATTR_EEPROM_ACTION,
2720+ MT76_TM_ATTR_EEPROM_OFFSET,
2721+ MT76_TM_ATTR_EEPROM_VAL,
2722+
2723+ MT76_TM_ATTR_CFG,
2724+ MT76_TM_ATTR_TXBF_ACT,
2725+ MT76_TM_ATTR_TXBF_PARAM,
2726
2727 /* keep last */
2728 NUM_MT76_TM_ATTRS,
2729@@ -198,4 +220,57 @@ enum mt76_testmode_tx_mode {
2730
2731 extern const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS];
2732
2733+/**
2734+ * enum mt76_testmode_eeprom_action - eeprom setting actions
2735+ *
2736+ * @MT76_TM_EEPROM_ACTION_UPDATE_DATA: update rf values to specific
2737+ * eeprom data block
2738+ * @MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE: send updated eeprom data to fw
2739+ * @MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE: write eeprom data back to efuse
2740+ */
2741+enum mt76_testmode_eeprom_action {
2742+ MT76_TM_EEPROM_ACTION_UPDATE_DATA,
2743+ MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE,
2744+ MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE,
2745+
2746+ /* keep last */
2747+ NUM_MT76_TM_EEPROM_ACTION,
2748+ MT76_TM_EEPROM_ACTION_MAX = NUM_MT76_TM_EEPROM_ACTION - 1,
2749+};
2750+
2751+/**
2752+ * enum mt76_testmode_cfg - packet tx phy mode
2753+ *
2754+ * @MT76_TM_EEPROM_ACTION_UPDATE_DATA: update rf values to specific
2755+ * eeprom data block
2756+ * @MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE: send updated eeprom data to fw
2757+ * @MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE: write eeprom data back to efuse
2758+ */
2759+enum mt76_testmode_cfg {
2760+ MT76_TM_CFG_TSSI,
2761+ MT76_TM_CFG_DPD,
2762+ MT76_TM_CFG_RATE_POWER_OFFSET,
2763+ MT76_TM_CFG_THERMAL_COMP,
2764+
2765+ /* keep last */
2766+ NUM_MT76_TM_CFG,
2767+ MT76_TM_CFG_MAX = NUM_MT76_TM_CFG - 1,
2768+};
2769+
2770+enum mt76_testmode_txbf_act {
2771+ MT76_TM_TXBF_ACT_INIT,
2772+ MT76_TM_TXBF_ACT_UPDATE_CH,
2773+ MT76_TM_TXBF_ACT_PHASE_COMP,
2774+ MT76_TM_TXBF_ACT_TX_PREP,
2775+ MT76_TM_TXBF_ACT_IBF_PROF_UPDATE,
2776+ MT76_TM_TXBF_ACT_EBF_PROF_UPDATE,
2777+ MT76_TM_TXBF_ACT_PHASE_CAL,
2778+ MT76_TM_TXBF_ACT_PROF_UPDATE_ALL,
2779+ MT76_TM_TXBF_ACT_E2P_UPDATE,
2780+
2781+ /* keep last */
2782+ NUM_MT76_TM_TXBF_ACT,
2783+ MT76_TM_TXBF_ACT_MAX = NUM_MT76_TM_TXBF_ACT - 1,
2784+};
2785+
2786 #endif
2787diff --git a/tools/fields.c b/tools/fields.c
developer81ca9d62022-10-14 11:23:22 +08002788index e3f69089..6e36ab27 100644
developer4c6b6002022-05-30 16:36:44 +08002789--- a/tools/fields.c
2790+++ b/tools/fields.c
2791@@ -10,6 +10,7 @@ static const char * const testmode_state[] = {
2792 [MT76_TM_STATE_IDLE] = "idle",
2793 [MT76_TM_STATE_TX_FRAMES] = "tx_frames",
2794 [MT76_TM_STATE_RX_FRAMES] = "rx_frames",
2795+ [MT76_TM_STATE_TX_CONT] = "tx_cont",
2796 };
2797
2798 static const char * const testmode_tx_mode[] = {
2799@@ -201,6 +202,63 @@ static void print_extra_stats(const struct tm_field *field, struct nlattr **tb)
2800 printf("%srx_per=%.02f%%\n", prefix, 100 * failed / total);
2801 }
2802
2803+static bool parse_mac(const struct tm_field *field, int idx,
2804+ struct nl_msg *msg, const char *val)
2805+{
2806+#define ETH_ALEN 6
2807+ bool ret = true;
2808+ char *str, *cur, *ap;
2809+ void *a;
2810+
2811+ ap = str = strdup(val);
2812+
2813+ a = nla_nest_start(msg, idx);
2814+
2815+ idx = 0;
2816+ while ((cur = strsep(&ap, ",")) != NULL) {
2817+ unsigned char addr[ETH_ALEN];
2818+ char *val, *tmp = cur;
2819+ int i = 0;
2820+
2821+ while ((val = strsep(&tmp, ":")) != NULL) {
2822+ if (i >= ETH_ALEN)
2823+ break;
2824+
2825+ addr[i++] = strtoul(val, NULL, 16);
2826+ }
2827+
2828+ nla_put(msg, idx, ETH_ALEN, addr);
2829+
2830+ idx++;
2831+ }
2832+
2833+ nla_nest_end(msg, a);
2834+
2835+ free(str);
2836+
2837+ return ret;
2838+}
2839+
2840+static void print_mac(const struct tm_field *field, struct nlattr *attr)
2841+{
2842+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
2843+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
2844+ unsigned char addr[3][6];
2845+ struct nlattr *cur;
2846+ int idx = 0;
2847+ int rem;
2848+
2849+ nla_for_each_nested(cur, attr, rem) {
2850+ if (nla_len(cur) != 6)
2851+ continue;
2852+ memcpy(addr[idx++], nla_data(cur), 6);
2853+ }
2854+
2855+ printf("" MACSTR "," MACSTR "," MACSTR "",
2856+ MAC2STR(addr[0]), MAC2STR(addr[1]), MAC2STR(addr[2]));
2857+
2858+ return;
2859+}
2860
2861 #define FIELD_GENERIC(_field, _name, ...) \
2862 [FIELD_NAME(_field)] = { \
2863@@ -250,6 +308,13 @@ static void print_extra_stats(const struct tm_field *field, struct nlattr **tb)
2864 ##__VA_ARGS__ \
2865 )
2866
2867+#define FIELD_MAC(_field, _name) \
2868+ [FIELD_NAME(_field)] = { \
2869+ .name = _name, \
2870+ .parse = parse_mac, \
2871+ .print = print_mac \
2872+ }
2873+
2874 #define FIELD_NAME(_field) MT76_TM_RX_ATTR_##_field
2875 static const struct tm_field rx_fields[NUM_MT76_TM_RX_ATTRS] = {
2876 FIELD_RO(s32, FREQ_OFFSET, "freq_offset"),
2877@@ -300,10 +365,18 @@ static const struct tm_field testdata_fields[NUM_MT76_TM_ATTRS] = {
2878 FIELD(u8, TX_RATE_LDPC, "tx_rate_ldpc"),
2879 FIELD(u8, TX_RATE_STBC, "tx_rate_stbc"),
2880 FIELD(u8, TX_LTF, "tx_ltf"),
2881+ FIELD(u8, TX_DUTY_CYCLE, "tx_duty_cycle"),
2882+ FIELD(u32, TX_IPG, "tx_ipg"),
2883+ FIELD(u32, TX_TIME, "tx_time"),
2884 FIELD(u8, TX_POWER_CONTROL, "tx_power_control"),
2885 FIELD_ARRAY(u8, TX_POWER, "tx_power"),
2886 FIELD(u8, TX_ANTENNA, "tx_antenna"),
2887+ FIELD(u8, TX_SPE_IDX, "tx_spe_idx"),
2888 FIELD(u32, FREQ_OFFSET, "freq_offset"),
2889+ FIELD(u8, AID, "aid"),
2890+ FIELD(u8, RU_ALLOC, "ru_alloc"),
2891+ FIELD(u8, RU_IDX, "ru_idx"),
2892+ FIELD_MAC(MAC_ADDRS, "mac_addrs"),
2893 FIELD_NESTED_RO(STATS, stats, "",
2894 .print_extra = print_extra_stats),
2895 };
2896@@ -322,9 +395,16 @@ static struct nla_policy testdata_policy[NUM_MT76_TM_ATTRS] = {
2897 [MT76_TM_ATTR_TX_RATE_LDPC] = { .type = NLA_U8 },
2898 [MT76_TM_ATTR_TX_RATE_STBC] = { .type = NLA_U8 },
2899 [MT76_TM_ATTR_TX_LTF] = { .type = NLA_U8 },
2900+ [MT76_TM_ATTR_TX_DUTY_CYCLE] = { .type = NLA_U8 },
2901+ [MT76_TM_ATTR_TX_IPG] = { .type = NLA_U32 },
2902+ [MT76_TM_ATTR_TX_TIME] = { .type = NLA_U32 },
2903 [MT76_TM_ATTR_TX_POWER_CONTROL] = { .type = NLA_U8 },
2904 [MT76_TM_ATTR_TX_ANTENNA] = { .type = NLA_U8 },
2905+ [MT76_TM_ATTR_TX_SPE_IDX] = { .type = NLA_U8 },
2906 [MT76_TM_ATTR_FREQ_OFFSET] = { .type = NLA_U32 },
2907+ [MT76_TM_ATTR_AID] = { .type = NLA_U8 },
2908+ [MT76_TM_ATTR_RU_ALLOC] = { .type = NLA_U8 },
2909+ [MT76_TM_ATTR_RU_IDX] = { .type = NLA_U8 },
2910 [MT76_TM_ATTR_STATS] = { .type = NLA_NESTED },
2911 };
2912
2913diff --git a/tx.c b/tx.c
developereb6a0182022-12-12 18:53:32 +08002914index 1f309d05..6d55566f 100644
developer4c6b6002022-05-30 16:36:44 +08002915--- a/tx.c
2916+++ b/tx.c
developereb6a0182022-12-12 18:53:32 +08002917@@ -250,8 +250,7 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *
developer4c6b6002022-05-30 16:36:44 +08002918 if (mt76_is_testmode_skb(dev, skb, &hw)) {
2919 struct mt76_phy *phy = hw->priv;
2920
2921- if (skb == phy->test.tx_skb)
2922- phy->test.tx_done++;
2923+ phy->test.tx_done++;
2924 if (phy->test.tx_queued == phy->test.tx_done)
2925 wake_up(&dev->tx_wait);
2926
2927--
developer9851a292022-12-15 17:33:43 +080029282.18.0
developer4c6b6002022-05-30 16:36:44 +08002929