blob: 0353dfb99e6cccff5577c73a736a9b4742a9081e [file] [log] [blame]
developerd59e4772022-07-14 13:48:49 +08001From 969b99141e451f7af21a2e8f5ec051be6deaa026 Mon Sep 17 00:00:00 2001
developer4c6b6002022-05-30 16:36:44 +08002From: Shayne Chen <shayne.chen@mediatek.com>
3Date: Thu, 21 Apr 2022 15:43:19 +0800
developerd59e4772022-07-14 13:48:49 +08004Subject: [PATCH 1111/1119] mt76: testmode: additional supports
developer4c6b6002022-05-30 16:36:44 +08005
6Signed-off-by: Shayne Chen <shayne.chen@mediatek.com>
7---
developerd59e4772022-07-14 13:48:49 +08008 drivers/net/wireless/mediatek/mt76/dma.c | 3 +-
9 drivers/net/wireless/mediatek/mt76/mac80211.c | 12 +
10 drivers/net/wireless/mediatek/mt76/mt76.h | 111 +-
11 .../wireless/mediatek/mt76/mt76_connac_mcu.c | 4 +
12 .../wireless/mediatek/mt76/mt76_connac_mcu.h | 2 +
13 .../net/wireless/mediatek/mt76/mt7915/init.c | 2 +-
14 .../net/wireless/mediatek/mt76/mt7915/mac.c | 37 +-
15 .../net/wireless/mediatek/mt76/mt7915/main.c | 2 +-
16 .../net/wireless/mediatek/mt76/mt7915/mcu.c | 10 +-
17 .../net/wireless/mediatek/mt76/mt7915/mcu.h | 28 +-
18 .../net/wireless/mediatek/mt76/mt7915/mmio.c | 2 +
19 .../wireless/mediatek/mt76/mt7915/mt7915.h | 14 +-
20 .../net/wireless/mediatek/mt76/mt7915/regs.h | 3 +
21 .../wireless/mediatek/mt76/mt7915/testmode.c | 1171 +++++++++++++++--
22 .../wireless/mediatek/mt76/mt7915/testmode.h | 278 ++++
23 drivers/net/wireless/mediatek/mt76/testmode.c | 275 +++-
24 drivers/net/wireless/mediatek/mt76/testmode.h | 75 ++
25 .../net/wireless/mediatek/mt76/tools/fields.c | 80 ++
26 drivers/net/wireless/mediatek/mt76/tx.c | 3 +-
developerf1b69ea2022-07-04 10:54:39 +080027 19 files changed, 1963 insertions(+), 149 deletions(-)
developer4c6b6002022-05-30 16:36:44 +080028
29diff --git a/dma.c b/dma.c
developerf1b69ea2022-07-04 10:54:39 +080030index f22273cd..03ee9109 100644
developer4c6b6002022-05-30 16:36:44 +080031--- a/dma.c
32+++ b/dma.c
33@@ -426,8 +426,7 @@ free:
34 if (mt76_is_testmode_skb(dev, skb, &hw)) {
35 struct mt76_phy *phy = hw->priv;
36
37- if (tx_info.skb == phy->test.tx_skb)
38- phy->test.tx_done--;
39+ phy->test.tx_done--;
40 }
41 #endif
42
43diff --git a/mac80211.c b/mac80211.c
developerf1b69ea2022-07-04 10:54:39 +080044index a7e082f7..9984def5 100644
developer4c6b6002022-05-30 16:36:44 +080045--- a/mac80211.c
46+++ b/mac80211.c
47@@ -55,6 +55,13 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 CHAN5G(60, 5300),
49 CHAN5G(64, 5320),
50
51+ CHAN5G(68, 5340),
52+ CHAN5G(80, 5400),
53+ CHAN5G(84, 5420),
54+ CHAN5G(88, 5440),
55+ CHAN5G(92, 5460),
56+ CHAN5G(96, 5480),
57+
58 CHAN5G(100, 5500),
59 CHAN5G(104, 5520),
60 CHAN5G(108, 5540),
61@@ -75,6 +82,11 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = {
62 CHAN5G(165, 5825),
63 CHAN5G(169, 5845),
64 CHAN5G(173, 5865),
65+
66+ CHAN5G(184, 4920),
67+ CHAN5G(188, 4940),
68+ CHAN5G(192, 4960),
69+ CHAN5G(196, 4980),
70 };
71
72 static const struct ieee80211_channel mt76_channels_6ghz[] = {
73diff --git a/mt76.h b/mt76.h
developerf1b69ea2022-07-04 10:54:39 +080074index 8325409a..4c8a671f 100644
developer4c6b6002022-05-30 16:36:44 +080075--- a/mt76.h
76+++ b/mt76.h
77@@ -602,6 +602,21 @@ struct mt76_testmode_ops {
78 int (*set_params)(struct mt76_phy *phy, struct nlattr **tb,
79 enum mt76_testmode_state new_state);
80 int (*dump_stats)(struct mt76_phy *phy, struct sk_buff *msg);
81+ int (*set_eeprom)(struct mt76_phy *phy, u32 offset, u8 *val, u8 action);
82+};
83+
84+struct mt76_testmode_entry_data {
85+ struct sk_buff *tx_skb;
86+
87+ u16 tx_mpdu_len;
88+ u8 tx_rate_idx;
89+ u8 tx_rate_nss;
90+ u8 tx_rate_ldpc;
91+
92+ u8 addr[3][ETH_ALEN];
93+ u8 aid;
94+ u8 ru_alloc;
95+ u8 ru_idx;
96 };
97
98 #define MT_TM_FW_RX_COUNT BIT(0)
99@@ -610,16 +625,11 @@ struct mt76_testmode_data {
100 enum mt76_testmode_state state;
101
102 u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)];
103- struct sk_buff *tx_skb;
104
105 u32 tx_count;
106- u16 tx_mpdu_len;
107
108 u8 tx_rate_mode;
109- u8 tx_rate_idx;
110- u8 tx_rate_nss;
111 u8 tx_rate_sgi;
112- u8 tx_rate_ldpc;
113 u8 tx_rate_stbc;
114 u8 tx_ltf;
115
116@@ -635,10 +645,37 @@ struct mt76_testmode_data {
117 u8 tx_power[4];
118 u8 tx_power_control;
119
120- u8 addr[3][ETH_ALEN];
121+ struct list_head tm_entry_list;
122+ struct mt76_wcid *cur_entry;
123+ u8 entry_num;
124+ union {
125+ struct mt76_testmode_entry_data ed;
126+ struct {
127+ /* must be the same as mt76_testmode_entry_data */
128+ struct sk_buff *tx_skb;
129+
130+ u16 tx_mpdu_len;
131+ u8 tx_rate_idx;
132+ u8 tx_rate_nss;
133+ u8 tx_rate_ldpc;
134+
135+ u8 addr[3][ETH_ALEN];
136+ u8 aid;
137+ u8 ru_alloc;
138+ u8 ru_idx;
139+ };
140+ };
141
142 u8 flag;
143
144+ struct {
145+ u8 type;
146+ u8 enable;
147+ } cfg;
148+
149+ u8 txbf_act;
150+ u16 txbf_param[8];
151+
152 u32 tx_pending;
153 u32 tx_queued;
154 u16 tx_queued_limit;
155@@ -1120,14 +1157,69 @@ static inline bool mt76_testmode_enabled(struct mt76_phy *phy)
156 #endif
157 }
158
159+#ifdef CONFIG_NL80211_TESTMODE
160+static inline struct mt76_wcid *
161+mt76_testmode_first_entry(struct mt76_phy *phy)
162+{
163+ if (list_empty(&phy->test.tm_entry_list) && !phy->test.aid)
164+ return &phy->dev->global_wcid;
165+
166+ return list_first_entry(&phy->test.tm_entry_list,
167+ typeof(struct mt76_wcid),
168+ list);
169+}
170+
171+static inline struct mt76_testmode_entry_data *
172+mt76_testmode_entry_data(struct mt76_phy *phy, struct mt76_wcid *wcid)
173+{
174+ if (!wcid)
175+ return NULL;
176+ if (wcid == &phy->dev->global_wcid)
177+ return &phy->test.ed;
178+
179+ return (struct mt76_testmode_entry_data *)((u8 *)wcid +
180+ phy->hw->sta_data_size);
181+}
182+
183+#define mt76_tm_for_each_entry(phy, wcid, ed) \
184+ for (wcid = mt76_testmode_first_entry(phy), \
185+ ed = mt76_testmode_entry_data(phy, wcid); \
186+ ((phy->test.aid && \
187+ !list_entry_is_head(wcid, &phy->test.tm_entry_list, list)) || \
188+ (!phy->test.aid && wcid == &phy->dev->global_wcid)) && ed; \
189+ wcid = list_next_entry(wcid, list), \
190+ ed = mt76_testmode_entry_data(phy, wcid))
191+#endif
192+
193+static inline bool __mt76_is_testmode_skb(struct mt76_phy *phy,
194+ struct sk_buff *skb)
195+{
196+#ifdef CONFIG_NL80211_TESTMODE
197+ struct mt76_testmode_entry_data *ed = &phy->test.ed;
198+ struct mt76_wcid *wcid;
199+
200+ if (skb == ed->tx_skb)
201+ return true;
202+
203+ mt76_tm_for_each_entry(phy, wcid, ed)
204+ if (skb == ed->tx_skb)
205+ return true;
206+ return false;
207+#else
208+ return false;
209+#endif
210+}
211+
212 static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
213 struct sk_buff *skb,
214 struct ieee80211_hw **hw)
215 {
216 #ifdef CONFIG_NL80211_TESTMODE
217- if (skb == dev->phy.test.tx_skb)
218+ if (mt76_testmode_enabled(&dev->phy) &&
219+ __mt76_is_testmode_skb(&dev->phy, skb))
220 *hw = dev->phy.hw;
221- else if (dev->phy2 && skb == dev->phy2->test.tx_skb)
222+ else if (dev->phy2 && mt76_testmode_enabled(dev->phy2) &&
223+ __mt76_is_testmode_skb(dev->phy2, skb))
224 *hw = dev->phy2->hw;
225 else
226 return false;
227@@ -1227,7 +1319,8 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
228 int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
229 struct netlink_callback *cb, void *data, int len);
230 int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state);
231-int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len);
232+int mt76_testmode_init_skb(struct mt76_phy *phy, u32 len,
233+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN]);
234
235 static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable)
236 {
237diff --git a/mt76_connac_mcu.c b/mt76_connac_mcu.c
developerf1b69ea2022-07-04 10:54:39 +0800238index 261181dc..cd350689 100644
developer4c6b6002022-05-30 16:36:44 +0800239--- a/mt76_connac_mcu.c
240+++ b/mt76_connac_mcu.c
developerf64861f2022-06-22 11:44:53 +0800241@@ -391,6 +391,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
developer4c6b6002022-05-30 16:36:44 +0800242 switch (vif->type) {
243 case NL80211_IFTYPE_MESH_POINT:
244 case NL80211_IFTYPE_AP:
245+ case NL80211_IFTYPE_MONITOR:
246 if (vif->p2p)
247 conn_type = CONNECTION_P2P_GC;
248 else
developerf64861f2022-06-22 11:44:53 +0800249@@ -572,6 +573,9 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
developer4c6b6002022-05-30 16:36:44 +0800250 rx->rca2 = 1;
251 rx->rv = 1;
252
253+ if (vif->type == NL80211_IFTYPE_MONITOR)
254+ rx->rca1 = 0;
255+
256 if (!is_connac_v1(dev))
257 return;
258
259diff --git a/mt76_connac_mcu.h b/mt76_connac_mcu.h
developerd59e4772022-07-14 13:48:49 +0800260index 25aeedc2..cb1e94a3 100644
developer4c6b6002022-05-30 16:36:44 +0800261--- a/mt76_connac_mcu.h
262+++ b/mt76_connac_mcu.h
developerf64861f2022-06-22 11:44:53 +0800263@@ -967,6 +967,7 @@ enum {
developer4c6b6002022-05-30 16:36:44 +0800264 MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13,
265 MCU_EXT_EVENT_THERMAL_PROTECT = 0x22,
266 MCU_EXT_EVENT_ASSERT_DUMP = 0x23,
267+ MCU_EXT_EVENT_BF_STATUS_READ = 0x35,
268 MCU_EXT_EVENT_RDD_REPORT = 0x3a,
269 MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
270 MCU_EXT_EVENT_BCC_NOTIFY = 0x75,
developerf64861f2022-06-22 11:44:53 +0800271@@ -1144,6 +1145,7 @@ enum {
developer4c6b6002022-05-30 16:36:44 +0800272 MCU_EXT_CMD_PHY_STAT_INFO = 0xad,
273 /* for vendor csi and air monitor */
274 MCU_EXT_CMD_SMESH_CTRL = 0xae,
275+ MCU_EXT_CMD_RX_STAT_USER_CTRL = 0xb3,
276 MCU_EXT_CMD_CERT_CFG = 0xb7,
277 MCU_EXT_CMD_CSI_CTRL = 0xc2,
278 };
279diff --git a/mt7915/init.c b/mt7915/init.c
developerf1b69ea2022-07-04 10:54:39 +0800280index 0d5109a3..b549fa04 100644
developer4c6b6002022-05-30 16:36:44 +0800281--- a/mt7915/init.c
282+++ b/mt7915/init.c
developerf64861f2022-06-22 11:44:53 +0800283@@ -576,7 +576,7 @@ static void mt7915_init_work(struct work_struct *work)
developer4c6b6002022-05-30 16:36:44 +0800284 struct mt7915_dev *dev = container_of(work, struct mt7915_dev,
285 init_work);
286
287- mt7915_mcu_set_eeprom(dev);
288+ mt7915_mcu_set_eeprom(dev, dev->flash_mode);
289 mt7915_mac_init(dev);
290 mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband);
291 mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband);
292diff --git a/mt7915/mac.c b/mt7915/mac.c
developerf1b69ea2022-07-04 10:54:39 +0800293index f13456bf..5e5df23d 100644
developer4c6b6002022-05-30 16:36:44 +0800294--- a/mt7915/mac.c
295+++ b/mt7915/mac.c
developerf64861f2022-06-22 11:44:53 +0800296@@ -565,17 +565,39 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
developer4c6b6002022-05-30 16:36:44 +0800297 {
298 #ifdef CONFIG_NL80211_TESTMODE
299 struct mt76_testmode_data *td = &phy->mt76->test;
300+ struct mt76_testmode_entry_data *ed;
301+ struct mt76_wcid *wcid;
302 const struct ieee80211_rate *r;
303- u8 bw, mode, nss = td->tx_rate_nss;
304- u8 rate_idx = td->tx_rate_idx;
305+ u8 bw, mode, nss, rate_idx, ldpc;
306 u16 rateval = 0;
307 u32 val;
308 bool cck = false;
309 int band;
310
311- if (skb != phy->mt76->test.tx_skb)
312+ txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
313+ txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
314+ phy->test.spe_idx));
315+
316+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU) {
317+ txwi[1] |= cpu_to_le32(BIT(18));
318+ txwi[2] = 0;
319+ txwi[3] &= ~cpu_to_le32(MT_TXD3_NO_ACK);
320+ le32p_replace_bits(&txwi[3], 0x1f, MT_TXD3_REM_TX_COUNT);
321+
322+ return;
323+ }
324+
325+ mt76_tm_for_each_entry(phy->mt76, wcid, ed)
326+ if (ed->tx_skb == skb)
327+ break;
328+
329+ if (!ed)
330 return;
331
332+ nss = ed->tx_rate_nss;
333+ rate_idx = ed->tx_rate_idx;
334+ ldpc = ed->tx_rate_ldpc;
335+
336 switch (td->tx_rate_mode) {
337 case MT76_TM_TX_MODE_HT:
338 nss = 1 + (rate_idx >> 3);
developerf64861f2022-06-22 11:44:53 +0800339@@ -664,14 +686,13 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
developer4c6b6002022-05-30 16:36:44 +0800340 if (mode >= MT_PHY_TYPE_HE_SU)
341 val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf);
342
343- if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
344+ if (ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
345 val |= MT_TXD6_LDPC;
346
347- txwi[1] &= ~cpu_to_le32(MT_TXD1_VTA);
348- txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
349+ if (phy->test.bf_en)
350+ val |= MT_TXD6_TX_IBF | MT_TXD6_TX_EBF;
351+
352 txwi[6] |= cpu_to_le32(val);
353- txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
354- phy->test.spe_idx));
355 #endif
356 }
357
358diff --git a/mt7915/main.c b/mt7915/main.c
developerd59e4772022-07-14 13:48:49 +0800359index 60d990f3..dd0b3cc2 100644
developer4c6b6002022-05-30 16:36:44 +0800360--- a/mt7915/main.c
361+++ b/mt7915/main.c
developer4721e252022-06-21 16:41:28 +0800362@@ -224,7 +224,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
developer4c6b6002022-05-30 16:36:44 +0800363 mvif->phy = phy;
364 mvif->mt76.band_idx = phy->band_idx;
365
366- mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
367+ mvif->mt76.wmm_idx = (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_MONITOR);
368 if (ext_phy)
369 mvif->mt76.wmm_idx += 2;
370
371diff --git a/mt7915/mcu.c b/mt7915/mcu.c
developerd59e4772022-07-14 13:48:49 +0800372index 0f71356b..c65fee98 100644
developer4c6b6002022-05-30 16:36:44 +0800373--- a/mt7915/mcu.c
374+++ b/mt7915/mcu.c
developerf64861f2022-06-22 11:44:53 +0800375@@ -434,6 +434,11 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
developer4c6b6002022-05-30 16:36:44 +0800376 case MCU_EXT_EVENT_BCC_NOTIFY:
377 mt7915_mcu_rx_bcc_notify(dev, skb);
378 break;
379+#ifdef CONFIG_NL80211_TESTMODE
380+ case MCU_EXT_EVENT_BF_STATUS_READ:
381+ mt7915_tm_txbf_status_read(dev, skb);
382+ break;
383+#endif
384 default:
385 break;
386 }
developerf64861f2022-06-22 11:44:53 +0800387@@ -465,6 +470,7 @@ void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb)
developer4c6b6002022-05-30 16:36:44 +0800388 rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP ||
389 rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC ||
390 rxd->ext_eid == MCU_EXT_EVENT_BCC_NOTIFY ||
391+ rxd->ext_eid == MCU_EXT_EVENT_BF_STATUS_READ ||
392 !rxd->seq)
393 mt7915_mcu_rx_unsolicited_event(dev, skb);
394 else
developerd59e4772022-07-14 13:48:49 +0800395@@ -2831,14 +2837,14 @@ static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev)
developer4c6b6002022-05-30 16:36:44 +0800396 return 0;
397 }
398
399-int mt7915_mcu_set_eeprom(struct mt7915_dev *dev)
400+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev, bool flash_mode)
401 {
402 struct mt7915_mcu_eeprom req = {
403 .buffer_mode = EE_MODE_EFUSE,
404 .format = EE_FORMAT_WHOLE,
405 };
406
407- if (dev->flash_mode)
408+ if (flash_mode)
409 return mt7915_mcu_set_eeprom_flash(dev);
410
411 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_BUFFER_MODE),
412diff --git a/mt7915/mcu.h b/mt7915/mcu.h
developerd59e4772022-07-14 13:48:49 +0800413index 0a77ad0d..ad85e56c 100644
developer4c6b6002022-05-30 16:36:44 +0800414--- a/mt7915/mcu.h
415+++ b/mt7915/mcu.h
developerf64861f2022-06-22 11:44:53 +0800416@@ -8,10 +8,15 @@
developer4c6b6002022-05-30 16:36:44 +0800417
418 enum {
419 MCU_ATE_SET_TRX = 0x1,
420+ MCU_ATE_SET_TSSI = 0x5,
421+ MCU_ATE_SET_DPD = 0x6,
422+ MCU_ATE_SET_RATE_POWER_OFFSET = 0x7,
423+ MCU_ATE_SET_THERMAL_COMP = 0x8,
424 MCU_ATE_SET_FREQ_OFFSET = 0xa,
425 MCU_ATE_SET_PHY_COUNT = 0x11,
426 MCU_ATE_SET_SLOT_TIME = 0x13,
427 MCU_ATE_CLEAN_TXQUEUE = 0x1c,
428+ MCU_ATE_SET_MU_RX_AID = 0x1e,
429 };
430
developerf64861f2022-06-22 11:44:53 +0800431 struct mt7915_mcu_thermal_ctrl {
developerd59e4772022-07-14 13:48:49 +0800432@@ -432,6 +437,12 @@ enum {
developer4c6b6002022-05-30 16:36:44 +0800433
434 enum {
435 MT_BF_SOUNDING_ON = 1,
436+ MT_BF_DATA_PACKET_APPLY = 2,
437+ MT_BF_PFMU_TAG_READ = 5,
438+ MT_BF_PFMU_TAG_WRITE = 6,
439+ MT_BF_PHASE_CAL = 14,
440+ MT_BF_IBF_PHASE_COMP = 15,
441+ MT_BF_PROFILE_WRITE_ALL = 17,
442 MT_BF_TYPE_UPDATE = 20,
443 MT_BF_MODULE_UPDATE = 25
444 };
developerd59e4772022-07-14 13:48:49 +0800445@@ -665,10 +676,19 @@ struct mt7915_muru {
developer4c6b6002022-05-30 16:36:44 +0800446 #define MURU_OFDMA_SCH_TYPE_UL BIT(1)
447
developerf64861f2022-06-22 11:44:53 +0800448 /* Common Config */
developer4c6b6002022-05-30 16:36:44 +0800449-#define MURU_COMM_PPDU_FMT BIT(0)
450-#define MURU_COMM_SCH_TYPE BIT(1)
451-#define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_SCH_TYPE)
developer4c6b6002022-05-30 16:36:44 +0800452-/* DL&UL User config*/
developer4c6b6002022-05-30 16:36:44 +0800453+/* #define MURU_COMM_PPDU_FMT BIT(0) */
454+/* #define MURU_COMM_SCH_TYPE BIT(1) */
455+/* #define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_SCH_TYPE) */
developer4721e252022-06-21 16:41:28 +0800456+#define MURU_COMM_PPDU_FMT BIT(0)
457+#define MURU_COMM_SCH_TYPE BIT(1)
458+#define MURU_COMM_BAND BIT(2)
459+#define MURU_COMM_WMM BIT(3)
460+#define MURU_COMM_SPE_IDX BIT(4)
461+#define MURU_COMM_PROC_TYPE BIT(5)
462+#define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_BAND | \
463+ MURU_COMM_WMM | MURU_COMM_SPE_IDX)
developer4c6b6002022-05-30 16:36:44 +0800464+
465+/* DL&UL User config */
466 #define MURU_USER_CNT BIT(4)
467
468 enum {
469diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developerf1b69ea2022-07-04 10:54:39 +0800470index 6e140e2d..6d1dbdbd 100644
developer4c6b6002022-05-30 16:36:44 +0800471--- a/mt7915/mmio.c
472+++ b/mt7915/mmio.c
developerf64861f2022-06-22 11:44:53 +0800473@@ -76,6 +76,7 @@ static const u32 mt7915_offs[] = {
developer4c6b6002022-05-30 16:36:44 +0800474 [ARB_DRNGR0] = 0x194,
475 [ARB_SCR] = 0x080,
476 [RMAC_MIB_AIRTIME14] = 0x3b8,
477+ [AGG_AALCR0] = 0x048,
478 [AGG_AWSCR0] = 0x05c,
479 [AGG_PCR0] = 0x06c,
480 [AGG_ACR0] = 0x084,
developerf64861f2022-06-22 11:44:53 +0800481@@ -150,6 +151,7 @@ static const u32 mt7916_offs[] = {
developer4c6b6002022-05-30 16:36:44 +0800482 [ARB_DRNGR0] = 0x1e0,
483 [ARB_SCR] = 0x000,
484 [RMAC_MIB_AIRTIME14] = 0x0398,
485+ [AGG_AALCR0] = 0x028,
486 [AGG_AWSCR0] = 0x030,
487 [AGG_PCR0] = 0x040,
488 [AGG_ACR0] = 0x054,
489diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
developerf1b69ea2022-07-04 10:54:39 +0800490index 07a1c9ce..7c7d6bd3 100644
developer4c6b6002022-05-30 16:36:44 +0800491--- a/mt7915/mt7915.h
492+++ b/mt7915/mt7915.h
developerf64861f2022-06-22 11:44:53 +0800493@@ -303,6 +303,9 @@ struct mt7915_phy {
developer4c6b6002022-05-30 16:36:44 +0800494 u8 last_snr;
495
496 u8 spe_idx;
497+
498+ bool bf_en;
499+ bool bf_ever_en;
500 } test;
501 #endif
502
developerf64861f2022-06-22 11:44:53 +0800503@@ -394,6 +397,14 @@ struct mt7915_dev {
developer4c6b6002022-05-30 16:36:44 +0800504 void __iomem *dcm;
505 void __iomem *sku;
506
507+#ifdef CONFIG_NL80211_TESTMODE
508+ struct {
509+ void *txbf_phase_cal;
510+ void *txbf_pfmu_data;
511+ void *txbf_pfmu_tag;
512+ } test;
513+#endif
514+
515 #ifdef MTK_DEBUG
516 u16 wlan_idx;
517 struct {
developer4721e252022-06-21 16:41:28 +0800518@@ -571,7 +582,7 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
developer4c6b6002022-05-30 16:36:44 +0800519 struct ieee80211_vif *vif,
520 struct ieee80211_sta *sta,
521 void *data, u32 field);
522-int mt7915_mcu_set_eeprom(struct mt7915_dev *dev);
523+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev, bool flash_mode);
524 int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset);
525 int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num);
526 int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable,
developer4721e252022-06-21 16:41:28 +0800527@@ -604,6 +615,7 @@ int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl);
developer4c6b6002022-05-30 16:36:44 +0800528 int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level);
529 void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb);
530 void mt7915_mcu_exit(struct mt7915_dev *dev);
531+int mt7915_tm_txbf_status_read(struct mt7915_dev *dev, struct sk_buff *skb);
532
533 static inline u16 mt7915_wtbl_size(struct mt7915_dev *dev)
534 {
535diff --git a/mt7915/regs.h b/mt7915/regs.h
developerf1b69ea2022-07-04 10:54:39 +0800536index 47bae86e..444440e1 100644
developer4c6b6002022-05-30 16:36:44 +0800537--- a/mt7915/regs.h
538+++ b/mt7915/regs.h
developerf64861f2022-06-22 11:44:53 +0800539@@ -51,6 +51,7 @@ enum offs_rev {
developer4c6b6002022-05-30 16:36:44 +0800540 ARB_DRNGR0,
541 ARB_SCR,
542 RMAC_MIB_AIRTIME14,
543+ AGG_AALCR0,
544 AGG_AWSCR0,
545 AGG_PCR0,
546 AGG_ACR0,
developerf64861f2022-06-22 11:44:53 +0800547@@ -467,6 +468,8 @@ enum offs_rev {
developer4c6b6002022-05-30 16:36:44 +0800548 #define MT_WF_AGG_BASE(_band) ((_band) ? 0x820f2000 : 0x820e2000)
549 #define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
550
551+#define MT_AGG_AALCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AALCR0) + \
552+ (_n) * 4))
553 #define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AWSCR0) + \
554 (_n) * 4))
555 #define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_PCR0) + \
556diff --git a/mt7915/testmode.c b/mt7915/testmode.c
developerd59e4772022-07-14 13:48:49 +0800557index b2eee3f2..3efd1ff0 100644
developer4c6b6002022-05-30 16:36:44 +0800558--- a/mt7915/testmode.c
559+++ b/mt7915/testmode.c
560@@ -9,6 +9,9 @@
561 enum {
562 TM_CHANGED_TXPOWER,
563 TM_CHANGED_FREQ_OFFSET,
564+ TM_CHANGED_AID,
565+ TM_CHANGED_CFG,
566+ TM_CHANGED_TXBF_ACT,
567
568 /* must be last */
569 NUM_TM_CHANGED
570@@ -17,6 +20,9 @@ enum {
571 static const u8 tm_change_map[] = {
572 [TM_CHANGED_TXPOWER] = MT76_TM_ATTR_TX_POWER,
573 [TM_CHANGED_FREQ_OFFSET] = MT76_TM_ATTR_FREQ_OFFSET,
574+ [TM_CHANGED_AID] = MT76_TM_ATTR_AID,
575+ [TM_CHANGED_CFG] = MT76_TM_ATTR_CFG,
576+ [TM_CHANGED_TXBF_ACT] = MT76_TM_ATTR_TXBF_ACT,
577 };
578
579 struct reg_band {
developerc6f56bb2022-06-14 18:36:30 +0800580@@ -33,6 +39,38 @@ struct reg_band {
developer4c6b6002022-05-30 16:36:44 +0800581 #define TM_REG_MAX_ID 20
582 static struct reg_band reg_backup_list[TM_REG_MAX_ID];
583
developerc6f56bb2022-06-14 18:36:30 +0800584+static void mt7915_tm_update_entry(struct mt7915_phy *phy);
585+
developer4c6b6002022-05-30 16:36:44 +0800586+static u8 mt7915_tm_chan_bw(enum nl80211_chan_width width)
587+{
588+ static const u8 width_to_bw[] = {
589+ [NL80211_CHAN_WIDTH_40] = TM_CBW_40MHZ,
590+ [NL80211_CHAN_WIDTH_80] = TM_CBW_80MHZ,
591+ [NL80211_CHAN_WIDTH_80P80] = TM_CBW_8080MHZ,
592+ [NL80211_CHAN_WIDTH_160] = TM_CBW_160MHZ,
593+ [NL80211_CHAN_WIDTH_5] = TM_CBW_5MHZ,
594+ [NL80211_CHAN_WIDTH_10] = TM_CBW_10MHZ,
595+ [NL80211_CHAN_WIDTH_20] = TM_CBW_20MHZ,
596+ [NL80211_CHAN_WIDTH_20_NOHT] = TM_CBW_20MHZ,
597+ };
598+
599+ if (width >= ARRAY_SIZE(width_to_bw))
600+ return 0;
601+
602+ return width_to_bw[width];
603+}
604+
605+static void
606+mt7915_tm_update_channel(struct mt7915_phy *phy)
607+{
608+ mutex_unlock(&phy->dev->mt76.mutex);
609+ mt7915_set_channel(phy);
610+ mutex_lock(&phy->dev->mt76.mutex);
611+
612+ mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
developerc6f56bb2022-06-14 18:36:30 +0800613+
614+ mt7915_tm_update_entry(phy);
developer4c6b6002022-05-30 16:36:44 +0800615+}
616
617 static int
618 mt7915_tm_set_tx_power(struct mt7915_phy *phy)
developerc6f56bb2022-06-14 18:36:30 +0800619@@ -119,18 +157,28 @@ mt7915_tm_set_trx(struct mt7915_phy *phy, int type, bool en)
developer4c6b6002022-05-30 16:36:44 +0800620 }
621
622 static int
623-mt7915_tm_clean_hwq(struct mt7915_phy *phy, u8 wcid)
624+mt7915_tm_clean_hwq(struct mt7915_phy *phy)
625 {
626+ struct mt76_testmode_entry_data *ed;
627+ struct mt76_wcid *wcid;
628 struct mt7915_dev *dev = phy->dev;
629 struct mt7915_tm_cmd req = {
630 .testmode_en = 1,
631 .param_idx = MCU_ATE_CLEAN_TXQUEUE,
632- .param.clean.wcid = wcid,
633 .param.clean.band = phy != &dev->phy,
634 };
635
636- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
637- sizeof(req), false);
638+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
639+ int ret;
640+
641+ req.param.clean.wcid = wcid->idx;
642+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL),
643+ &req, sizeof(req), false);
644+ if (ret)
645+ return ret;
646+ }
647+
648+ return 0;
649 }
650
651 static int
developerf64861f2022-06-22 11:44:53 +0800652@@ -182,12 +230,738 @@ mt7915_tm_set_tam_arb(struct mt7915_phy *phy, bool enable, bool mu)
developer4c6b6002022-05-30 16:36:44 +0800653 return mt7915_mcu_set_muru_ctrl(dev, MURU_SET_ARB_OP_MODE, op_mode);
654 }
655
656+static int
657+mt7915_tm_set_cfg(struct mt7915_phy *phy)
658+{
659+ static const u8 cfg_cmd[] = {
660+ [MT76_TM_CFG_TSSI] = MCU_ATE_SET_TSSI,
661+ [MT76_TM_CFG_DPD] = MCU_ATE_SET_DPD,
662+ [MT76_TM_CFG_RATE_POWER_OFFSET] = MCU_ATE_SET_RATE_POWER_OFFSET,
663+ [MT76_TM_CFG_THERMAL_COMP] = MCU_ATE_SET_THERMAL_COMP,
664+ };
665+ struct mt76_testmode_data *td = &phy->mt76->test;
666+ struct mt7915_dev *dev = phy->dev;
667+ struct mt7915_tm_cmd req = {
668+ .testmode_en = !(phy->mt76->test.state == MT76_TM_STATE_OFF),
669+ .param_idx = cfg_cmd[td->cfg.type],
670+ .param.cfg.enable = td->cfg.enable,
671+ .param.cfg.band = phy->band_idx,
672+ };
673+
674+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
675+ sizeof(req), false);
676+}
677+
678+static int
679+mt7915_tm_add_txbf(struct mt7915_phy *phy, struct ieee80211_vif *vif,
680+ struct ieee80211_sta *sta, u8 pfmu_idx, u8 nr,
681+ u8 nc, bool ebf)
682+{
683+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
684+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
685+ struct mt7915_dev *dev = phy->dev;
686+ struct sk_buff *skb;
687+ struct sta_rec_bf *bf;
688+ struct tlv *tlv;
689+ u8 ndp_rate;
690+
691+ if (nr == 1)
692+ ndp_rate = 8;
693+ else if (nr == 2)
694+ ndp_rate = 16;
695+ else
696+ ndp_rate = 24;
697+
698+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
699+ &msta->wcid);
700+ if (IS_ERR(skb))
701+ return PTR_ERR(skb);
702+
703+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
704+ bf = (struct sta_rec_bf *)tlv;
705+
706+ bf->pfmu = cpu_to_le16(pfmu_idx);
707+ bf->sounding_phy = 1;
708+ bf->bf_cap = ebf;
709+ bf->ncol = nc;
710+ bf->nrow = nr;
711+ bf->ndp_rate = ndp_rate;
712+ bf->ibf_timeout = 0xff;
713+ bf->tx_mode = MT_PHY_TYPE_HT;
714+
715+ if (ebf) {
716+ bf->mem[0].row = 0;
717+ bf->mem[1].row = 1;
718+ bf->mem[2].row = 2;
719+ bf->mem[3].row = 3;
720+ } else {
721+ bf->mem[0].row = 4;
722+ bf->mem[1].row = 5;
723+ bf->mem[2].row = 6;
724+ bf->mem[3].row = 7;
725+ }
726+
727+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
728+ MCU_EXT_CMD(STA_REC_UPDATE), true);
729+}
730+
731+static int
732+mt7915_tm_entry_add(struct mt7915_phy *phy, u8 aid)
733+{
734+ struct mt76_testmode_data *td = &phy->mt76->test;
735+ struct mt76_testmode_entry_data *ed;
736+ struct ieee80211_sband_iftype_data *sdata;
737+ struct ieee80211_supported_band *sband;
738+ struct ieee80211_sta *sta;
739+ struct mt7915_sta *msta;
740+ int tid, ret;
741+
742+ if (td->entry_num >= MT76_TM_MAX_ENTRY_NUM)
743+ return -EINVAL;
744+
745+ sta = kzalloc(sizeof(*sta) + phy->mt76->hw->sta_data_size +
746+ sizeof(*ed), GFP_KERNEL);
747+ if (!sta)
748+ return -ENOMEM;
749+
750+ msta = (struct mt7915_sta *)sta->drv_priv;
751+ ed = mt76_testmode_entry_data(phy->mt76, &msta->wcid);
752+ memcpy(ed, &td->ed, sizeof(*ed));
753+
754+ if (phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ) {
755+ sband = &phy->mt76->sband_5g.sband;
756+ sdata = phy->iftype[NL80211_BAND_5GHZ];
757+ } else if (phy->mt76->chandef.chan->band == NL80211_BAND_6GHZ) {
758+ sband = &phy->mt76->sband_6g.sband;
759+ sdata = phy->iftype[NL80211_BAND_6GHZ];
760+ } else {
761+ sband = &phy->mt76->sband_2g.sband;
762+ sdata = phy->iftype[NL80211_BAND_2GHZ];
763+ }
764+
765+ memcpy(sta->addr, ed->addr[0], ETH_ALEN);
766+ if (phy->test.bf_en) {
767+ u8 addr[ETH_ALEN] = {0x00, 0x11, 0x11, 0x11, 0x11, 0x11};
768+
769+ memcpy(sta->addr, addr, ETH_ALEN);
770+ }
771+
772+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_HT)
773+ memcpy(&sta->ht_cap, &sband->ht_cap, sizeof(sta->ht_cap));
774+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_VHT)
775+ memcpy(&sta->vht_cap, &sband->vht_cap, sizeof(sta->vht_cap));
776+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_HE_SU)
777+ memcpy(&sta->he_cap, &sdata[NL80211_IFTYPE_STATION].he_cap,
778+ sizeof(sta->he_cap));
779+ sta->aid = aid;
780+ sta->wme = 1;
781+
782+ ret = mt7915_mac_sta_add(&phy->dev->mt76, phy->monitor_vif, sta);
783+ if (ret) {
784+ kfree(sta);
785+ return ret;
786+ }
787+
788+ /* prevent from starting tx ba session */
789+ for (tid = 0; tid < 8; tid++)
790+ set_bit(tid, &msta->ampdu_state);
791+
792+ list_add_tail(&msta->wcid.list, &td->tm_entry_list);
793+ td->entry_num++;
794+
795+ return 0;
796+}
797+
798+static void
799+mt7915_tm_entry_remove(struct mt7915_phy *phy, u8 aid)
800+{
801+ struct mt76_testmode_data *td = &phy->mt76->test;
802+ struct mt76_wcid *wcid, *tmp;
803+
804+ if (list_empty(&td->tm_entry_list))
805+ return;
806+
807+ list_for_each_entry_safe(wcid, tmp, &td->tm_entry_list, list) {
developerc6f56bb2022-06-14 18:36:30 +0800808+ struct mt76_testmode_entry_data *ed;
developer4c6b6002022-05-30 16:36:44 +0800809+ struct mt7915_dev *dev = phy->dev;
developerc6f56bb2022-06-14 18:36:30 +0800810+ struct ieee80211_sta *sta;
developer4c6b6002022-05-30 16:36:44 +0800811+
developerc6f56bb2022-06-14 18:36:30 +0800812+ ed = mt76_testmode_entry_data(phy->mt76, wcid);
813+ if (aid && ed->aid != aid)
814+ continue;
815+
816+ sta = wcid_to_sta(wcid);
developer4c6b6002022-05-30 16:36:44 +0800817+ mt7915_mac_sta_remove(&dev->mt76, phy->monitor_vif, sta);
818+ mt76_wcid_mask_clear(dev->mt76.wcid_mask, wcid->idx);
819+
820+ list_del_init(&wcid->list);
821+ kfree(sta);
822+ phy->mt76->test.entry_num--;
823+ }
824+}
825+
826+static int
827+mt7915_tm_set_entry(struct mt7915_phy *phy)
828+{
829+ struct mt76_testmode_data *td = &phy->mt76->test;
830+ struct mt76_testmode_entry_data *ed;
831+ struct mt76_wcid *wcid;
832+
833+ if (!td->aid) {
834+ if (td->state > MT76_TM_STATE_IDLE)
835+ mt76_testmode_set_state(phy->mt76, MT76_TM_STATE_IDLE);
836+ mt7915_tm_entry_remove(phy, td->aid);
837+ return 0;
838+ }
839+
840+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
841+ if (ed->aid == td->aid) {
842+ struct sk_buff *skb;
843+
844+ local_bh_disable();
845+ skb = ed->tx_skb;
846+ memcpy(ed, &td->ed, sizeof(*ed));
847+ ed->tx_skb = skb;
848+ local_bh_enable();
849+
850+ return 0;
851+ }
852+ }
853+
854+ return mt7915_tm_entry_add(phy, td->aid);
855+}
856+
developerc6f56bb2022-06-14 18:36:30 +0800857+static void
858+mt7915_tm_update_entry(struct mt7915_phy *phy)
859+{
860+ struct mt76_testmode_data *td = &phy->mt76->test;
861+ struct mt76_testmode_entry_data *ed, tmp;
862+ struct mt76_wcid *wcid, *last;
863+
864+ if (!td->aid || phy->test.bf_en)
865+ return;
866+
867+ memcpy(&tmp, &td->ed, sizeof(tmp));
868+ last = list_last_entry(&td->tm_entry_list,
869+ struct mt76_wcid, list);
870+
871+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
872+ memcpy(&td->ed, ed, sizeof(td->ed));
873+ mt7915_tm_entry_remove(phy, td->aid);
874+ mt7915_tm_entry_add(phy, td->aid);
875+ if (wcid == last)
876+ break;
877+ }
878+
879+ memcpy(&td->ed, &tmp, sizeof(td->ed));
880+}
881+
developer4c6b6002022-05-30 16:36:44 +0800882+static int
883+mt7915_tm_txbf_init(struct mt7915_phy *phy, u16 *val)
884+{
885+ struct mt76_testmode_data *td = &phy->mt76->test;
886+ struct mt7915_dev *dev = phy->dev;
887+ bool enable = val[0];
888+ void *phase_cal, *pfmu_data, *pfmu_tag;
889+ u8 addr[ETH_ALEN] = {0x00, 0x22, 0x22, 0x22, 0x22, 0x22};
890+
891+ if (!enable) {
892+ phy->test.bf_en = 0;
893+ return 0;
894+ }
895+
896+ if (!dev->test.txbf_phase_cal) {
897+ phase_cal = devm_kzalloc(dev->mt76.dev,
898+ sizeof(struct mt7915_tm_txbf_phase) *
899+ MAX_PHASE_GROUP_NUM,
900+ GFP_KERNEL);
901+ if (!phase_cal)
902+ return -ENOMEM;
903+
904+ dev->test.txbf_phase_cal = phase_cal;
905+ }
906+
907+ if (!dev->test.txbf_pfmu_data) {
908+ pfmu_data = devm_kzalloc(dev->mt76.dev, 512, GFP_KERNEL);
909+ if (!pfmu_data)
910+ return -ENOMEM;
911+
912+ dev->test.txbf_pfmu_data = pfmu_data;
913+ }
914+
915+ if (!dev->test.txbf_pfmu_tag) {
916+ pfmu_tag = devm_kzalloc(dev->mt76.dev,
917+ sizeof(struct mt7915_tm_pfmu_tag), GFP_KERNEL);
918+ if (!pfmu_tag)
919+ return -ENOMEM;
920+
921+ dev->test.txbf_pfmu_tag = pfmu_tag;
922+ }
923+
924+ memcpy(phy->monitor_vif->addr, addr, ETH_ALEN);
925+ mt7915_mcu_add_dev_info(phy, phy->monitor_vif, true);
926+
927+ td->tx_rate_mode = MT76_TM_TX_MODE_HT;
928+ td->tx_mpdu_len = 1024;
929+ td->tx_rate_sgi = 0;
930+ td->tx_ipg = 100;
931+ phy->test.bf_en = 1;
932+
933+ return mt7915_tm_set_trx(phy, TM_MAC_TX, true);
934+}
935+
936+static int
937+mt7915_tm_txbf_phase_comp(struct mt7915_phy *phy, u16 *val)
938+{
939+ struct mt7915_dev *dev = phy->dev;
940+ struct {
941+ u8 category;
942+ u8 wlan_idx_lo;
943+ u8 bw;
944+ u8 jp_band;
945+ u8 dbdc_idx;
946+ bool read_from_e2p;
947+ bool disable;
948+ u8 wlan_idx_hi;
949+ u8 buf[40];
950+ } __packed req = {
951+ .category = MT_BF_IBF_PHASE_COMP,
952+ .bw = val[0],
953+ .jp_band = (val[2] == 1) ? 1 : 0,
954+ .dbdc_idx = phy->band_idx,
955+ .read_from_e2p = val[3],
956+ .disable = val[4],
957+ };
958+ struct mt7915_tm_txbf_phase *phase =
959+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
960+
961+ wait_event_timeout(dev->mt76.tx_wait, phase[val[2]].status != 0, HZ);
962+ memcpy(req.buf, &phase[val[2]].phase, sizeof(req.buf));
963+
964+ pr_info("ibf cal process: phase comp info\n");
965+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1,
966+ &req, sizeof(req), 0);
967+
968+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
969+ sizeof(req), true);
970+}
971+
972+static int
973+mt7915_tm_txbf_profile_tag_read(struct mt7915_phy *phy, u8 pfmu_idx)
974+{
975+ struct mt7915_dev *dev = phy->dev;
976+ struct {
977+ u8 format_id;
978+ u8 pfmu_idx;
979+ bool bfer;
980+ u8 dbdc_idx;
981+ } __packed req = {
982+ .format_id = MT_BF_PFMU_TAG_READ,
983+ .pfmu_idx = pfmu_idx,
984+ .bfer = 1,
985+ .dbdc_idx = phy != &dev->phy,
986+ };
987+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
988+
989+ tag->t1.pfmu_idx = 0;
990+
991+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
992+ sizeof(req), true);
993+}
994+
995+static int
996+mt7915_tm_txbf_profile_tag_write(struct mt7915_phy *phy, u8 pfmu_idx,
997+ struct mt7915_tm_pfmu_tag *tag)
998+{
999+ struct mt7915_dev *dev = phy->dev;
1000+ struct {
1001+ u8 format_id;
1002+ u8 pfmu_idx;
1003+ bool bfer;
1004+ u8 dbdc_idx;
1005+ u8 buf[64];
1006+ } __packed req = {
1007+ .format_id = MT_BF_PFMU_TAG_WRITE,
1008+ .pfmu_idx = pfmu_idx,
1009+ .bfer = 1,
1010+ .dbdc_idx = phy != &dev->phy,
1011+ };
1012+
1013+ memcpy(req.buf, tag, sizeof(*tag));
1014+ wait_event_timeout(dev->mt76.tx_wait, tag->t1.pfmu_idx != 0, HZ);
1015+
1016+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1017+ sizeof(req), false);
1018+}
1019+
1020+static int
1021+mt7915_tm_txbf_apply_tx(struct mt7915_phy *phy, u16 wlan_idx, bool ebf,
1022+ bool ibf, bool phase_cal)
1023+{
1024+#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
1025+#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
1026+ struct mt7915_dev *dev = phy->dev;
1027+ struct {
1028+ u8 category;
1029+ u8 wlan_idx_lo;
1030+ bool ebf;
1031+ bool ibf;
1032+ bool mu_txbf;
1033+ bool phase_cal;
1034+ u8 wlan_idx_hi;
1035+ u8 _rsv;
1036+ } __packed req = {
1037+ .category = MT_BF_DATA_PACKET_APPLY,
1038+ .wlan_idx_lo = to_wcid_lo(wlan_idx),
1039+ .ebf = ebf,
1040+ .ibf = ibf,
1041+ .phase_cal = phase_cal,
1042+ .wlan_idx_hi = to_wcid_hi(wlan_idx),
1043+ };
1044+
1045+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1046+ sizeof(req), false);
1047+}
1048+
1049+static int mt7915_tm_txbf_set_rate(struct mt7915_phy *phy,
1050+ struct mt76_wcid *wcid)
1051+{
1052+ struct mt7915_dev *dev = phy->dev;
1053+ struct mt76_testmode_entry_data *ed = mt76_testmode_entry_data(phy->mt76, wcid);
1054+ struct ieee80211_sta *sta = wcid_to_sta(wcid);
1055+ struct sta_phy rate = {};
1056+
1057+ if (!sta)
1058+ return 0;
1059+
1060+ rate.type = MT_PHY_TYPE_HT;
1061+ rate.bw = mt7915_tm_chan_bw(phy->mt76->chandef.width);
1062+ rate.nss = ed->tx_rate_nss;
1063+ rate.mcs = ed->tx_rate_idx;
1064+ rate.ldpc = (rate.bw || ed->tx_rate_ldpc) * GENMASK(2, 0);
1065+
1066+ return mt7915_mcu_set_fixed_rate_ctrl(dev, phy->monitor_vif, sta,
1067+ &rate, RATE_PARAM_FIXED);
1068+}
1069+
1070+static int
1071+mt7915_tm_txbf_set_tx(struct mt7915_phy *phy, u16 *val)
1072+{
1073+ bool bf_on = val[0], update = val[3];
1074+ /* u16 wlan_idx = val[2]; */
1075+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
1076+ struct mt76_testmode_data *td = &phy->mt76->test;
1077+ struct mt76_wcid *wcid;
1078+
1079+ if (bf_on) {
1080+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
1081+ mt7915_tm_txbf_profile_tag_read(phy, 2);
1082+ tag->t1.invalid_prof = false;
1083+ mt7915_tm_txbf_profile_tag_write(phy, 2, tag);
1084+
1085+ phy->test.bf_ever_en = true;
1086+
1087+ if (update)
1088+ mt7915_tm_txbf_apply_tx(phy, 1, 0, 1, 1);
1089+ } else {
1090+ if (!phy->test.bf_ever_en) {
1091+ if (update)
1092+ mt7915_tm_txbf_apply_tx(phy, 1, 0, 0, 0);
1093+ } else {
1094+ phy->test.bf_ever_en = false;
1095+
1096+ mt7915_tm_txbf_profile_tag_read(phy, 2);
1097+ tag->t1.invalid_prof = true;
1098+ mt7915_tm_txbf_profile_tag_write(phy, 2, tag);
1099+ }
1100+ }
1101+
1102+ wcid = list_first_entry(&td->tm_entry_list, struct mt76_wcid, list);
1103+ mt7915_tm_txbf_set_rate(phy, wcid);
1104+
1105+ return 0;
1106+}
1107+
1108+static int
1109+mt7915_tm_txbf_profile_update(struct mt7915_phy *phy, u16 *val, bool ebf)
1110+{
1111+ static const u8 mode_to_lm[] = {
1112+ [MT76_TM_TX_MODE_CCK] = 0,
1113+ [MT76_TM_TX_MODE_OFDM] = 0,
1114+ [MT76_TM_TX_MODE_HT] = 1,
1115+ [MT76_TM_TX_MODE_VHT] = 2,
1116+ [MT76_TM_TX_MODE_HE_SU] = 3,
1117+ [MT76_TM_TX_MODE_HE_EXT_SU] = 3,
1118+ [MT76_TM_TX_MODE_HE_TB] = 3,
1119+ [MT76_TM_TX_MODE_HE_MU] = 3,
1120+ };
1121+ struct mt76_testmode_data *td = &phy->mt76->test;
1122+ struct mt76_wcid *wcid;
1123+ struct ieee80211_vif *vif = phy->monitor_vif;
1124+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
1125+ u8 pfmu_idx = val[0], nc = val[2], nr;
1126+ int ret;
1127+
1128+ if (td->tx_antenna_mask == 3)
1129+ nr = 1;
1130+ else if (td->tx_antenna_mask == 7)
1131+ nr = 2;
1132+ else
1133+ nr = 3;
1134+
1135+ memset(tag, 0, sizeof(*tag));
1136+ tag->t1.pfmu_idx = pfmu_idx;
1137+ tag->t1.ebf = ebf;
1138+ tag->t1.nr = nr;
1139+ tag->t1.nc = nc;
1140+ tag->t1.invalid_prof = true;
1141+
1142+ tag->t1.snr_sts4 = 0xc0;
1143+ tag->t1.snr_sts5 = 0xff;
1144+ tag->t1.snr_sts6 = 0xff;
1145+ tag->t1.snr_sts7 = 0xff;
1146+
1147+ if (ebf) {
1148+ tag->t1.row_id1 = 0;
1149+ tag->t1.row_id2 = 1;
1150+ tag->t1.row_id3 = 2;
1151+ tag->t1.row_id4 = 3;
1152+ tag->t1.lm = mode_to_lm[MT76_TM_TX_MODE_HT];
1153+ } else {
1154+ tag->t1.row_id1 = 4;
1155+ tag->t1.row_id2 = 5;
1156+ tag->t1.row_id3 = 6;
1157+ tag->t1.row_id4 = 7;
1158+ tag->t1.lm = mode_to_lm[MT76_TM_TX_MODE_OFDM];
1159+
1160+ tag->t2.ibf_timeout = 0xff;
1161+ tag->t2.ibf_nr = nr;
1162+ }
1163+
1164+ ret = mt7915_tm_txbf_profile_tag_write(phy, pfmu_idx, tag);
1165+ if (ret)
1166+ return ret;
1167+
1168+ wcid = list_first_entry(&td->tm_entry_list, struct mt76_wcid, list);
1169+ ret = mt7915_tm_add_txbf(phy, vif, wcid_to_sta(wcid), pfmu_idx, nr, nc, ebf);
1170+ if (ret)
1171+ return ret;
1172+
1173+ if (!ebf)
1174+ return mt7915_tm_txbf_apply_tx(phy, 1, false, true, true);
1175+
1176+ return 0;
1177+}
1178+
1179+static int
1180+mt7915_tm_txbf_phase_cal(struct mt7915_phy *phy, u16 *val)
1181+{
1182+#define GROUP_L 0
1183+#define GROUP_M 1
1184+#define GROUP_H 2
1185+ struct mt7915_dev *dev = phy->dev;
1186+ struct {
1187+ u8 category;
1188+ u8 group_l_m_n;
1189+ u8 group;
1190+ bool sx2;
1191+ u8 cal_type;
1192+ u8 lna_gain_level;
1193+ u8 _rsv[2];
1194+ } __packed req = {
1195+ .category = MT_BF_PHASE_CAL,
1196+ .group = val[0],
1197+ .group_l_m_n = val[1],
1198+ .sx2 = val[2],
1199+ .cal_type = val[3],
1200+ .lna_gain_level = 0, /* for test purpose */
1201+ };
1202+ struct mt7915_tm_txbf_phase *phase =
1203+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1204+
1205+ phase[req.group].status = 0;
1206+
1207+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1208+ sizeof(req), true);
1209+}
1210+
1211+int mt7915_tm_txbf_status_read(struct mt7915_dev *dev, struct sk_buff *skb)
1212+{
1213+#define BF_PFMU_TAG 16
1214+#define BF_CAL_PHASE 21
1215+ u8 format_id;
1216+
developerf64861f2022-06-22 11:44:53 +08001217+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
developer4c6b6002022-05-30 16:36:44 +08001218+ format_id = *(u8 *)skb->data;
1219+
1220+ if (format_id == BF_PFMU_TAG) {
1221+ struct mt7915_tm_pfmu_tag *tag = dev->test.txbf_pfmu_tag;
1222+
1223+ skb_pull(skb, 8);
1224+ memcpy(tag, skb->data, sizeof(struct mt7915_tm_pfmu_tag));
1225+ } else if (format_id == BF_CAL_PHASE) {
1226+ struct mt7915_tm_ibf_cal_info *cal;
1227+ struct mt7915_tm_txbf_phase *phase =
1228+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1229+
1230+ cal = (struct mt7915_tm_ibf_cal_info *)skb->data;
1231+ switch (cal->cal_type) {
1232+ case IBF_PHASE_CAL_NORMAL:
1233+ case IBF_PHASE_CAL_NORMAL_INSTRUMENT:
1234+ if (cal->group_l_m_n != GROUP_M)
1235+ break;
1236+ phase = &phase[cal->group];
1237+ memcpy(&phase->phase, cal->buf + 16, sizeof(phase->phase));
1238+ phase->status = cal->status;
1239+ break;
1240+ case IBF_PHASE_CAL_VERIFY:
1241+ case IBF_PHASE_CAL_VERIFY_INSTRUMENT:
1242+ break;
1243+ default:
1244+ break;
1245+ }
1246+ }
1247+
1248+ wake_up(&dev->mt76.tx_wait);
1249+
1250+ return 0;
1251+}
1252+
1253+static int
1254+mt7915_tm_txbf_profile_update_all(struct mt7915_phy *phy, u16 *val)
1255+{
1256+ struct mt76_testmode_data *td = &phy->mt76->test;
1257+ u16 pfmu_idx = val[0];
1258+ u16 subc_id = val[1];
1259+ u16 angle11 = val[2];
1260+ u16 angle21 = val[3];
1261+ u16 angle31 = val[4];
1262+ u16 angle41 = val[5];
1263+ s16 phi11 = 0, phi21 = 0, phi31 = 0;
1264+ struct mt7915_tm_pfmu_data *pfmu_data;
1265+
1266+ if (subc_id > 63)
1267+ return -EINVAL;
1268+
1269+ if (td->tx_antenna_mask == 2) {
1270+ phi11 = (s16)(angle21 - angle11);
1271+ } else if (td->tx_antenna_mask == 3) {
1272+ phi11 = (s16)(angle31 - angle11);
1273+ phi21 = (s16)(angle31 - angle21);
1274+ } else {
1275+ phi11 = (s16)(angle41 - angle11);
1276+ phi21 = (s16)(angle41 - angle21);
1277+ phi31 = (s16)(angle41 - angle31);
1278+ }
1279+
1280+ pfmu_data = (struct mt7915_tm_pfmu_data *)phy->dev->test.txbf_pfmu_data;
1281+ pfmu_data = &pfmu_data[subc_id];
1282+
1283+ if (subc_id < 32)
1284+ pfmu_data->subc_idx = cpu_to_le16(subc_id + 224);
1285+ else
1286+ pfmu_data->subc_idx = cpu_to_le16(subc_id - 32);
1287+ pfmu_data->phi11 = cpu_to_le16(phi11);
1288+ pfmu_data->phi21 = cpu_to_le16(phi21);
1289+ pfmu_data->phi31 = cpu_to_le16(phi31);
1290+
1291+ if (subc_id == 63) {
1292+ struct mt7915_dev *dev = phy->dev;
1293+ struct {
1294+ u8 format_id;
1295+ u8 pfmu_idx;
1296+ u8 dbdc_idx;
1297+ u8 _rsv;
1298+ u8 buf[512];
1299+ } __packed req = {
1300+ .format_id = MT_BF_PROFILE_WRITE_ALL,
1301+ .pfmu_idx = pfmu_idx,
1302+ .dbdc_idx = phy != &dev->phy,
1303+ };
1304+
1305+ memcpy(req.buf, dev->test.txbf_pfmu_data, 512);
1306+
1307+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION),
1308+ &req, sizeof(req), true);
1309+ }
1310+
1311+ return 0;
1312+}
1313+
1314+static int
1315+mt7915_tm_txbf_e2p_update(struct mt7915_phy *phy)
1316+{
1317+ struct mt7915_tm_txbf_phase *phase, *p;
1318+ struct mt7915_dev *dev = phy->dev;
1319+ u8 *eeprom = dev->mt76.eeprom.data;
1320+ u16 offset;
1321+ bool is_7976;
1322+ int i;
1323+
1324+ is_7976 = mt7915_check_adie(dev, false) || is_mt7916(&dev->mt76);
1325+ offset = is_7976 ? 0x60a : 0x651;
1326+
1327+ phase = (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1328+ for (i = 0; i < MAX_PHASE_GROUP_NUM; i++) {
1329+ p = &phase[i];
1330+
1331+ if (!p->status)
1332+ continue;
1333+
1334+ /* copy phase cal data to eeprom */
1335+ memcpy(eeprom + offset + i * sizeof(p->phase), &p->phase,
1336+ sizeof(p->phase));
1337+ }
1338+
1339+ return 0;
1340+}
1341+
1342+static int
1343+mt7915_tm_set_txbf(struct mt7915_phy *phy)
1344+{
1345+ struct mt76_testmode_data *td = &phy->mt76->test;
1346+ u16 *val = td->txbf_param;
1347+
1348+ pr_info("ibf cal process: act = %u, val = %u, %u, %u, %u, %u\n",
1349+ td->txbf_act, val[0], val[1], val[2], val[3], val[4]);
1350+
1351+ switch (td->txbf_act) {
1352+ case MT76_TM_TXBF_ACT_INIT:
1353+ return mt7915_tm_txbf_init(phy, val);
1354+ case MT76_TM_TXBF_ACT_UPDATE_CH:
1355+ mt7915_tm_update_channel(phy);
1356+ break;
1357+ case MT76_TM_TXBF_ACT_PHASE_COMP:
1358+ return mt7915_tm_txbf_phase_comp(phy, val);
1359+ case MT76_TM_TXBF_ACT_TX_PREP:
1360+ return mt7915_tm_txbf_set_tx(phy, val);
1361+ case MT76_TM_TXBF_ACT_IBF_PROF_UPDATE:
1362+ return mt7915_tm_txbf_profile_update(phy, val, false);
1363+ case MT76_TM_TXBF_ACT_EBF_PROF_UPDATE:
1364+ return mt7915_tm_txbf_profile_update(phy, val, true);
1365+ case MT76_TM_TXBF_ACT_PHASE_CAL:
1366+ return mt7915_tm_txbf_phase_cal(phy, val);
1367+ case MT76_TM_TXBF_ACT_PROF_UPDATE_ALL:
1368+ return mt7915_tm_txbf_profile_update_all(phy, val);
1369+ case MT76_TM_TXBF_ACT_E2P_UPDATE:
1370+ return mt7915_tm_txbf_e2p_update(phy);
1371+ default:
1372+ break;
1373+ };
1374+
1375+ return 0;
1376+}
1377+
1378 static int
developerf64861f2022-06-22 11:44:53 +08001379 mt7915_tm_set_wmm_qid(struct mt7915_phy *phy, u8 qid, u8 aifs, u8 cw_min,
developer4c6b6002022-05-30 16:36:44 +08001380- u16 cw_max, u16 txop)
1381+ u16 cw_max, u16 txop, u8 tx_cmd)
1382 {
developerf64861f2022-06-22 11:44:53 +08001383 struct mt7915_vif *mvif = (struct mt7915_vif *)phy->monitor_vif->drv_priv;
developer4c6b6002022-05-30 16:36:44 +08001384- struct mt7915_mcu_tx req = { .total = 1 };
1385+ struct mt7915_mcu_tx req = {
1386+ .valid = true,
1387+ .mode = tx_cmd,
1388+ .total = 1,
1389+ };
1390 struct edca *e = &req.edca[0];
1391
developerf64861f2022-06-22 11:44:53 +08001392 e->queue = qid + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS;
1393@@ -262,7 +1036,8 @@ done:
developer4c6b6002022-05-30 16:36:44 +08001394
developerf64861f2022-06-22 11:44:53 +08001395 return mt7915_tm_set_wmm_qid(phy,
developer4c6b6002022-05-30 16:36:44 +08001396 mt76_connac_lmac_mapping(IEEE80211_AC_BE),
1397- aifsn, cw, cw, 0);
1398+ aifsn, cw, cw, 0,
1399+ mode == MT76_TM_TX_MODE_HE_MU);
1400 }
1401
1402 static int
developerf64861f2022-06-22 11:44:53 +08001403@@ -338,7 +1113,7 @@ mt7915_tm_set_tx_len(struct mt7915_phy *phy, u32 tx_time)
developer4c6b6002022-05-30 16:36:44 +08001404 bitrate = cfg80211_calculate_bitrate(&rate);
1405 tx_len = bitrate * tx_time / 10 / 8;
1406
1407- ret = mt76_testmode_alloc_skb(phy->mt76, tx_len);
1408+ ret = mt76_testmode_init_skb(phy->mt76, tx_len, &td->tx_skb, td->addr);
1409 if (ret)
1410 return ret;
1411
developerd59e4772022-07-14 13:48:49 +08001412@@ -456,64 +1231,227 @@ mt7915_tm_init(struct mt7915_phy *phy, bool en)
developer4c6b6002022-05-30 16:36:44 +08001413
1414 phy->mt76->test.flag |= MT_TM_FW_RX_COUNT;
1415
1416- if (!en)
1417+ if (!en) {
1418 mt7915_tm_set_tam_arb(phy, en, 0);
1419+
1420+ phy->mt76->test.aid = 0;
1421+ phy->mt76->test.tx_mpdu_len = 0;
1422+ phy->test.bf_en = 0;
1423+ mt7915_tm_set_entry(phy);
1424+ }
1425+}
1426+
1427+static bool
1428+mt7915_tm_check_skb(struct mt7915_phy *phy)
1429+{
1430+ struct mt76_testmode_entry_data *ed;
1431+ struct mt76_wcid *wcid;
1432+
1433+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
1434+ struct ieee80211_tx_info *info;
1435+
1436+ if (!ed->tx_skb)
1437+ return false;
1438+
1439+ info = IEEE80211_SKB_CB(ed->tx_skb);
1440+ info->control.vif = phy->monitor_vif;
1441+ }
1442+
1443+ return true;
1444+}
1445+
1446+static int
1447+mt7915_tm_set_ba(struct mt7915_phy *phy)
1448+{
1449+ struct mt7915_dev *dev = phy->dev;
1450+ struct mt76_testmode_data *td = &phy->mt76->test;
1451+ struct mt76_wcid *wcid;
1452+ struct ieee80211_vif *vif = phy->monitor_vif;
1453+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1454+ struct ieee80211_ampdu_params params = { .buf_size = 256 };
1455+
1456+ list_for_each_entry(wcid, &td->tm_entry_list, list) {
1457+ int tid, ret;
1458+
1459+ params.sta = wcid_to_sta(wcid);
1460+ for (tid = 0; tid < 8; tid++) {
1461+ params.tid = tid;
1462+ ret = mt7915_mcu_add_tx_ba(phy->dev, &params, true);
1463+ if (ret)
1464+ return ret;
1465+ }
1466+ }
1467+
1468+ mt76_wr(dev, MT_AGG_AALCR0(mvif->mt76.band_idx, mvif->mt76.wmm_idx),
1469+ 0x01010101);
1470+
1471+ return 0;
1472+}
1473+
1474+static int
1475+mt7915_tm_set_muru_cfg(struct mt7915_phy *phy, struct mt7915_tm_muru *muru)
1476+{
1477+/* #define MURU_SET_MANUAL_CFG 100 */
1478+ struct mt7915_dev *dev = phy->dev;
1479+ struct {
1480+ __le32 cmd;
1481+ struct mt7915_tm_muru muru;
1482+ } __packed req = {
1483+ .cmd = cpu_to_le32(MURU_SET_MANUAL_CFG),
1484+ };
1485+
1486+ memcpy(&req.muru, muru, sizeof(struct mt7915_tm_muru));
1487+
1488+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
1489+ sizeof(req), false);
1490+}
1491+
1492+static int
1493+mt7915_tm_set_muru_dl(struct mt7915_phy *phy)
1494+{
1495+ struct mt76_testmode_data *td = &phy->mt76->test;
1496+ struct mt76_testmode_entry_data *ed;
1497+ struct mt76_wcid *wcid;
1498+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
1499+ struct ieee80211_vif *vif = phy->monitor_vif;
1500+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1501+ struct mt7915_tm_muru muru = {};
1502+ struct mt7915_tm_muru_comm *comm = &muru.comm;
1503+ struct mt7915_tm_muru_dl *dl = &muru.dl;
1504+ int i;
1505+
1506+ comm->ppdu_format = MURU_PPDU_HE_MU;
1507+ comm->band = mvif->mt76.band_idx;
1508+ comm->wmm_idx = mvif->mt76.wmm_idx;
1509+ comm->spe_idx = phy->test.spe_idx;
1510+
1511+ dl->bw = mt7915_tm_chan_bw(chandef->width);
1512+ dl->gi = td->tx_rate_sgi;;
1513+ dl->ltf = td->tx_ltf;
1514+ dl->tx_mode = MT_PHY_TYPE_HE_MU;
1515+
1516+ for (i = 0; i < sizeof(dl->ru); i++)
1517+ dl->ru[i] = 0x71;
1518+
1519+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
1520+ struct mt7915_tm_muru_dl_usr *dl_usr = &dl->usr[dl->user_num];
1521+
1522+ dl_usr->wlan_idx = cpu_to_le16(wcid->idx);
1523+ dl_usr->ru_alloc_seg = ed->aid < 8 ? 0 : 1;
1524+ dl_usr->ru_idx = ed->ru_idx;
1525+ dl_usr->mcs = ed->tx_rate_idx;
1526+ dl_usr->nss = ed->tx_rate_nss - 1;
1527+ dl_usr->ldpc = ed->tx_rate_ldpc;
1528+ dl->ru[dl->user_num] = ed->ru_alloc;
1529+
1530+ dl->user_num++;
1531+ }
1532+
1533+ muru.cfg_comm = cpu_to_le32(MURU_COMM_SET);
1534+ muru.cfg_dl = cpu_to_le32(MURU_DL_SET);
1535+
1536+ return mt7915_tm_set_muru_cfg(phy, &muru);
1537+}
1538+
1539+static int
1540+mt7915_tm_set_muru_pkt_cnt(struct mt7915_phy *phy, bool enable, u32 tx_count)
1541+{
1542+#define MURU_SET_TX_PKT_CNT 105
1543+#define MURU_SET_TX_EN 106
1544+ struct mt7915_dev *dev = phy->dev;
1545+ struct {
1546+ __le32 cmd;
1547+ u8 band;
1548+ u8 enable;
1549+ u8 _rsv[2];
1550+ __le32 tx_count;
1551+ } __packed req = {
1552+ .band = phy != &dev->phy,
1553+ .enable = enable,
1554+ .tx_count = enable ? cpu_to_le32(tx_count) : 0,
1555+ };
1556+ int ret;
1557+
1558+ req.cmd = enable ? cpu_to_le32(MURU_SET_TX_PKT_CNT) :
1559+ cpu_to_le32(MURU_SET_TX_EN);
1560+
1561+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
1562+ sizeof(req), false);
1563+ if (ret)
1564+ return ret;
1565+
1566+ req.cmd = enable ? cpu_to_le32(MURU_SET_TX_EN) :
1567+ cpu_to_le32(MURU_SET_TX_PKT_CNT);
1568+
1569+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
1570+ sizeof(req), false);
1571 }
1572
1573 static void
1574-mt7915_tm_update_channel(struct mt7915_phy *phy)
1575+mt7915_tm_tx_frames_mu(struct mt7915_phy *phy, bool enable)
1576 {
1577- mutex_unlock(&phy->dev->mt76.mutex);
1578- mt7915_set_channel(phy);
1579- mutex_lock(&phy->dev->mt76.mutex);
1580+ struct mt76_testmode_data *td = &phy->mt76->test;
1581
1582- mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
1583+ if (enable) {
1584+ struct mt7915_dev *dev = phy->dev;
1585+
1586+ mt7915_tm_set_ba(phy);
1587+ mt7915_tm_set_muru_dl(phy);
1588+ mt76_rr(dev, MT_MIB_DR8(phy != &dev->phy));
1589+ } else {
1590+ /* set to zero for counting real tx free num */
1591+ td->tx_done = 0;
1592+ }
1593+
1594+ mt7915_tm_set_muru_pkt_cnt(phy, enable, td->tx_count);
1595+ usleep_range(100000, 200000);
1596 }
1597
1598 static void
developerd59e4772022-07-14 13:48:49 +08001599 mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
1600 {
developer4c6b6002022-05-30 16:36:44 +08001601 struct mt76_testmode_data *td = &phy->mt76->test;
1602- struct mt7915_dev *dev = phy->dev;
1603- struct ieee80211_tx_info *info;
1604- u8 duty_cycle = td->tx_duty_cycle;
1605- u32 tx_time = td->tx_time;
1606- u32 ipg = td->tx_ipg;
1607
1608 mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
1609- mt7915_tm_clean_hwq(phy, dev->mt76.global_wcid.idx);
1610+ mt7915_tm_set_trx(phy, TM_MAC_TX, false);
1611
1612 if (en) {
1613- mt7915_tm_update_channel(phy);
1614+ u32 tx_time = td->tx_time, ipg = td->tx_ipg;
1615+ u8 duty_cycle = td->tx_duty_cycle;
1616+
1617+ if (!phy->test.bf_en)
1618+ mt7915_tm_update_channel(phy);
1619
developerd59e4772022-07-14 13:48:49 +08001620 if (td->tx_spe_idx)
developer4c6b6002022-05-30 16:36:44 +08001621 phy->test.spe_idx = td->tx_spe_idx;
developerd59e4772022-07-14 13:48:49 +08001622 else
1623 phy->test.spe_idx = mt76_connac_spe_idx(td->tx_antenna_mask);
developer4c6b6002022-05-30 16:36:44 +08001624- }
1625
1626- mt7915_tm_set_tam_arb(phy, en,
1627- td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU);
1628+ /* if all three params are set, duty_cycle will be ignored */
1629+ if (duty_cycle && tx_time && !ipg) {
1630+ ipg = tx_time * 100 / duty_cycle - tx_time;
1631+ } else if (duty_cycle && !tx_time && ipg) {
1632+ if (duty_cycle < 100)
1633+ tx_time = duty_cycle * ipg / (100 - duty_cycle);
1634+ }
1635
1636- /* if all three params are set, duty_cycle will be ignored */
1637- if (duty_cycle && tx_time && !ipg) {
1638- ipg = tx_time * 100 / duty_cycle - tx_time;
1639- } else if (duty_cycle && !tx_time && ipg) {
1640- if (duty_cycle < 100)
1641- tx_time = duty_cycle * ipg / (100 - duty_cycle);
1642- }
1643+ mt7915_tm_set_ipg_params(phy, ipg, td->tx_rate_mode);
1644+ mt7915_tm_set_tx_len(phy, tx_time);
1645
1646- mt7915_tm_set_ipg_params(phy, ipg, td->tx_rate_mode);
1647- mt7915_tm_set_tx_len(phy, tx_time);
1648+ if (ipg)
1649+ td->tx_queued_limit = MT76_TM_TIMEOUT * 1000000 / ipg / 2;
1650
1651- if (ipg)
1652- td->tx_queued_limit = MT76_TM_TIMEOUT * 1000000 / ipg / 2;
1653+ if (!mt7915_tm_check_skb(phy))
1654+ return;
1655+ } else {
1656+ mt7915_tm_clean_hwq(phy);
1657+ }
1658
1659- if (!en || !td->tx_skb)
1660- return;
1661+ mt7915_tm_set_tam_arb(phy, en,
1662+ td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU);
1663
1664- info = IEEE80211_SKB_CB(td->tx_skb);
1665- info->control.vif = phy->monitor_vif;
1666+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
1667+ mt7915_tm_tx_frames_mu(phy, en);
1668
1669 mt7915_tm_set_trx(phy, TM_MAC_TX, en);
1670 }
developerd59e4772022-07-14 13:48:49 +08001671@@ -542,10 +1480,6 @@ mt7915_tm_get_rx_stats(struct mt7915_phy *phy, bool clear)
developer4c6b6002022-05-30 16:36:44 +08001672 return ret;
1673
1674 rs_band = (struct mt7915_tm_rx_stat_band *)skb->data;
1675- /* pr_info("mdrdy_cnt = %d\n", le32_to_cpu(rs_band->mdrdy_cnt)); */
1676- /* pr_info("fcs_err = %d\n", le16_to_cpu(rs_band->fcs_err)); */
1677- /* pr_info("len_mismatch = %d\n", le16_to_cpu(rs_band->len_mismatch)); */
1678- /* pr_info("fcs_ok = %d\n", le16_to_cpu(rs_band->fcs_succ)); */
1679
1680 if (!clear) {
1681 enum mt76_rxq_id q = req.band ? MT_RXQ_EXT : MT_RXQ_MAIN;
developerd59e4772022-07-14 13:48:49 +08001682@@ -560,13 +1494,61 @@ mt7915_tm_get_rx_stats(struct mt7915_phy *phy, bool clear)
developer4c6b6002022-05-30 16:36:44 +08001683 return 0;
1684 }
1685
1686+static int
1687+mt7915_tm_set_rx_user_idx(struct mt7915_phy *phy, u8 aid)
1688+{
1689+ struct mt7915_dev *dev = phy->dev;
1690+ struct mt76_wcid *wcid = NULL;
1691+ struct mt76_testmode_entry_data *ed;
1692+ struct {
1693+ u8 band;
1694+ u8 _rsv;
1695+ __le16 wlan_idx;
1696+ } __packed req = {
1697+ .band = phy->band_idx,
1698+ };
1699+
1700+ mt76_tm_for_each_entry(phy->mt76, wcid, ed)
1701+ if (ed->aid == aid)
1702+ break;
1703+
1704+ if (!wcid)
1705+ return -EINVAL;
1706+
1707+ req.wlan_idx = cpu_to_le16(wcid->idx);
1708+
1709+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_STAT_USER_CTRL),
1710+ &req, sizeof(req), false);
1711+}
1712+
1713+static int
1714+mt7915_tm_set_muru_aid(struct mt7915_phy *phy, u16 aid)
1715+{
1716+ struct mt7915_dev *dev = phy->dev;
1717+ struct mt7915_tm_cmd req = {
1718+ .testmode_en = 1,
1719+ .param_idx = MCU_ATE_SET_MU_RX_AID,
1720+ .param.rx_aid.band = cpu_to_le32(phy->band_idx),
1721+ .param.rx_aid.aid = cpu_to_le16(aid),
1722+ };
1723+
1724+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
1725+ sizeof(req), false);
1726+}
1727+
1728 static void
1729 mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
1730 {
1731+ struct mt76_testmode_data *td = &phy->mt76->test;
1732+
1733+ mt7915_tm_set_trx(phy, TM_MAC_TX, false);
1734 mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
1735
1736 if (en) {
1737- mt7915_tm_update_channel(phy);
1738+ if (!phy->test.bf_en)
1739+ mt7915_tm_update_channel(phy);
1740+ if (td->aid)
1741+ mt7915_tm_set_rx_user_idx(phy, td->aid);
1742
1743 /* read-clear */
1744 mt7915_tm_get_rx_stats(phy, true);
developerd59e4772022-07-14 13:48:49 +08001745@@ -574,9 +1556,12 @@ mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
developer4c6b6002022-05-30 16:36:44 +08001746 /* clear fw count */
1747 mt7915_tm_set_phy_count(phy, 0);
1748 mt7915_tm_set_phy_count(phy, 1);
1749-
1750- mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
1751 }
1752+
1753+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
1754+ mt7915_tm_set_muru_aid(phy, en ? td->aid : 0xf800);
1755+
1756+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
1757 }
1758
1759 static int
developerd59e4772022-07-14 13:48:49 +08001760@@ -613,35 +1598,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
developer4c6b6002022-05-30 16:36:44 +08001761 tx_cont->center_ch = freq1;
1762 tx_cont->tx_ant = td->tx_antenna_mask;
1763 tx_cont->band = phy != &dev->phy;
1764-
1765- switch (chandef->width) {
1766- case NL80211_CHAN_WIDTH_40:
1767- tx_cont->bw = CMD_CBW_40MHZ;
1768- break;
1769- case NL80211_CHAN_WIDTH_80:
1770- tx_cont->bw = CMD_CBW_80MHZ;
1771- break;
1772- case NL80211_CHAN_WIDTH_80P80:
1773- tx_cont->bw = CMD_CBW_8080MHZ;
1774- break;
1775- case NL80211_CHAN_WIDTH_160:
1776- tx_cont->bw = CMD_CBW_160MHZ;
1777- break;
1778- case NL80211_CHAN_WIDTH_5:
1779- tx_cont->bw = CMD_CBW_5MHZ;
1780- break;
1781- case NL80211_CHAN_WIDTH_10:
1782- tx_cont->bw = CMD_CBW_10MHZ;
1783- break;
1784- case NL80211_CHAN_WIDTH_20:
1785- tx_cont->bw = CMD_CBW_20MHZ;
1786- break;
1787- case NL80211_CHAN_WIDTH_20_NOHT:
1788- tx_cont->bw = CMD_CBW_20MHZ;
1789- break;
1790- default:
1791- return -EINVAL;
1792- }
1793+ tx_cont->bw = mt7915_tm_chan_bw(chandef->width);
1794
1795 if (!en) {
1796 req.op.rf.param.func_data = cpu_to_le32(phy != &dev->phy);
developerd59e4772022-07-14 13:48:49 +08001797@@ -725,6 +1682,12 @@ mt7915_tm_update_params(struct mt7915_phy *phy, u32 changed)
developer4c6b6002022-05-30 16:36:44 +08001798 mt7915_tm_set_freq_offset(phy, en, en ? td->freq_offset : 0);
1799 if (changed & BIT(TM_CHANGED_TXPOWER))
1800 mt7915_tm_set_tx_power(phy);
1801+ if (changed & BIT(TM_CHANGED_AID))
1802+ mt7915_tm_set_entry(phy);
1803+ if (changed & BIT(TM_CHANGED_CFG))
1804+ mt7915_tm_set_cfg(phy);
1805+ if (changed & BIT(TM_CHANGED_TXBF_ACT))
1806+ mt7915_tm_set_txbf(phy);
1807 }
1808
1809 static int
developerd59e4772022-07-14 13:48:49 +08001810@@ -798,6 +1761,7 @@ static int
developer4c6b6002022-05-30 16:36:44 +08001811 mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
1812 {
1813 struct mt7915_phy *phy = mphy->priv;
1814+ struct mt7915_dev *dev = phy->dev;
1815 void *rx, *rssi;
1816 int i;
1817
developerd59e4772022-07-14 13:48:49 +08001818@@ -843,11 +1807,68 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
developer4c6b6002022-05-30 16:36:44 +08001819
1820 nla_nest_end(msg, rx);
1821
1822+ if (mphy->test.tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
1823+ mphy->test.tx_done += mt76_rr(dev, MT_MIB_DR8(phy != &dev->phy));
1824+
1825 return mt7915_tm_get_rx_stats(phy, false);
1826 }
1827
1828+static int
1829+mt7915_tm_write_back_to_efuse(struct mt7915_dev *dev)
1830+{
1831+ struct mt7915_mcu_eeprom_info req = {};
1832+ u8 *eeprom = dev->mt76.eeprom.data;
1833+ int i, ret = -EINVAL;
1834+
1835+ /* prevent from damaging chip id in efuse */
1836+ if (mt76_chip(&dev->mt76) != get_unaligned_le16(eeprom))
1837+ goto out;
1838+
1839+ for (i = 0; i < mt7915_eeprom_size(dev); i += MT76_TM_EEPROM_BLOCK_SIZE) {
1840+ req.addr = cpu_to_le32(i);
1841+ memcpy(&req.data, eeprom + i, MT76_TM_EEPROM_BLOCK_SIZE);
1842+
1843+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_ACCESS),
1844+ &req, sizeof(req), true);
1845+ if (ret)
1846+ return ret;
1847+ }
1848+
1849+out:
1850+ return ret;
1851+}
1852+
1853+static int
1854+mt7915_tm_set_eeprom(struct mt76_phy *mphy, u32 offset, u8 *val, u8 action)
1855+{
1856+ struct mt7915_phy *phy = mphy->priv;
1857+ struct mt7915_dev *dev = phy->dev;
1858+ u8 *eeprom = dev->mt76.eeprom.data;
1859+ int ret = 0;
1860+
1861+ if (offset >= mt7915_eeprom_size(dev))
1862+ return -EINVAL;
1863+
1864+ switch (action) {
1865+ case MT76_TM_EEPROM_ACTION_UPDATE_DATA:
1866+ memcpy(eeprom + offset, val, MT76_TM_EEPROM_BLOCK_SIZE);
1867+ break;
1868+ case MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE:
1869+ ret = mt7915_mcu_set_eeprom(dev, true);
1870+ break;
1871+ case MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE:
1872+ ret = mt7915_tm_write_back_to_efuse(dev);
1873+ break;
1874+ default:
1875+ break;
1876+ }
1877+
1878+ return ret;
1879+}
1880+
1881 const struct mt76_testmode_ops mt7915_testmode_ops = {
1882 .set_state = mt7915_tm_set_state,
1883 .set_params = mt7915_tm_set_params,
1884 .dump_stats = mt7915_tm_dump_stats,
1885+ .set_eeprom = mt7915_tm_set_eeprom,
1886 };
1887diff --git a/mt7915/testmode.h b/mt7915/testmode.h
developerf1b69ea2022-07-04 10:54:39 +08001888index a1c54c89..01b08e9e 100644
developer4c6b6002022-05-30 16:36:44 +08001889--- a/mt7915/testmode.h
1890+++ b/mt7915/testmode.h
1891@@ -4,6 +4,8 @@
1892 #ifndef __MT7915_TESTMODE_H
1893 #define __MT7915_TESTMODE_H
1894
1895+#include "mcu.h"
1896+
1897 struct mt7915_tm_trx {
1898 u8 type;
1899 u8 enable;
1900@@ -39,6 +41,11 @@ struct mt7915_tm_cfg {
1901 u8 _rsv[2];
1902 };
1903
1904+struct mt7915_tm_mu_rx_aid {
1905+ __le32 band;
1906+ __le16 aid;
1907+};
1908+
1909 struct mt7915_tm_cmd {
1910 u8 testmode_en;
1911 u8 param_idx;
1912@@ -50,6 +57,7 @@ struct mt7915_tm_cmd {
1913 struct mt7915_tm_slot_time slot;
1914 struct mt7915_tm_clean_txq clean;
1915 struct mt7915_tm_cfg cfg;
1916+ struct mt7915_tm_mu_rx_aid rx_aid;
1917 u8 test[72];
1918 } param;
1919 } __packed;
1920@@ -109,6 +117,16 @@ enum {
1921 TAM_ARB_OP_MODE_FORCE_SU = 5,
1922 };
1923
1924+enum {
1925+ TM_CBW_20MHZ,
1926+ TM_CBW_40MHZ,
1927+ TM_CBW_80MHZ,
1928+ TM_CBW_10MHZ,
1929+ TM_CBW_5MHZ,
1930+ TM_CBW_160MHZ,
1931+ TM_CBW_8080MHZ,
1932+};
1933+
1934 struct mt7915_tm_rx_stat_band {
1935 u8 category;
1936
1937@@ -130,4 +148,264 @@ struct mt7915_tm_rx_stat_band {
1938 __le16 mdrdy_cnt_ofdm;
1939 };
1940
1941+struct mt7915_tm_muru_comm {
1942+ u8 ppdu_format;
1943+ u8 sch_type;
1944+ u8 band;
1945+ u8 wmm_idx;
1946+ u8 spe_idx;
1947+ u8 proc_type;
1948+};
1949+
1950+struct mt7915_tm_muru_dl_usr {
1951+ __le16 wlan_idx;
1952+ u8 ru_alloc_seg;
1953+ u8 ru_idx;
1954+ u8 ldpc;
1955+ u8 nss;
1956+ u8 mcs;
1957+ u8 mu_group_idx;
1958+ u8 vht_groud_id;
1959+ u8 vht_up;
1960+ u8 he_start_stream;
1961+ u8 he_mu_spatial;
1962+ u8 ack_policy;
1963+ __le16 tx_power_alpha;
1964+};
1965+
1966+struct mt7915_tm_muru_dl {
1967+ u8 user_num;
1968+ u8 tx_mode;
1969+ u8 bw;
1970+ u8 gi;
1971+ u8 ltf;
1972+ /* sigB */
1973+ u8 mcs;
1974+ u8 dcm;
1975+ u8 cmprs;
1976+
1977+ u8 tx_power;
1978+ u8 ru[8];
1979+ u8 c26[2];
1980+ u8 ack_policy;
1981+
1982+ struct mt7915_tm_muru_dl_usr usr[16];
1983+};
1984+
1985+struct mt7915_tm_muru_ul_usr {
1986+ __le16 wlan_idx;
1987+ u8 ru_alloc;
1988+ u8 ru_idx;
1989+ u8 ldpc;
1990+ u8 nss;
1991+ u8 mcs;
1992+ u8 target_rssi;
1993+ __le32 trig_pkt_size;
1994+};
1995+
1996+struct mt7915_tm_muru_ul {
1997+ u8 user_num;
1998+
1999+ /* UL TX */
2000+ u8 trig_type;
2001+ __le16 trig_cnt;
2002+ __le16 trig_intv;
2003+ u8 bw;
2004+ u8 gi_ltf;
2005+ __le16 ul_len;
2006+ u8 pad;
2007+ u8 trig_ta[ETH_ALEN];
2008+ u8 ru[8];
2009+ u8 c26[2];
2010+
2011+ struct mt7915_tm_muru_ul_usr usr[16];
2012+ /* HE TB RX Debug */
2013+ __le32 rx_hetb_nonsf_en_bitmap;
2014+ __le32 rx_hetb_cfg[2];
2015+
2016+ /* DL TX */
2017+ u8 ba_type;
2018+};
2019+
2020+struct mt7915_tm_muru {
2021+ __le32 cfg_comm;
2022+ __le32 cfg_dl;
2023+ __le32 cfg_ul;
2024+
2025+ struct mt7915_tm_muru_comm comm;
2026+ struct mt7915_tm_muru_dl dl;
2027+ struct mt7915_tm_muru_ul ul;
2028+};
2029+
2030+#define MURU_PPDU_HE_MU BIT(3)
2031+
2032+/* Common Config */
2033+/* #define MURU_COMM_PPDU_FMT BIT(0) */
2034+/* #define MURU_COMM_SCH_TYPE BIT(1) */
2035+/* #define MURU_COMM_BAND BIT(2) */
2036+/* #define MURU_COMM_WMM BIT(3) */
2037+/* #define MURU_COMM_SPE_IDX BIT(4) */
2038+/* #define MURU_COMM_PROC_TYPE BIT(5) */
2039+/* #define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_BAND | \ */
2040+/* MURU_COMM_WMM | MURU_COMM_SPE_IDX) */
2041+/* DL Config */
2042+#define MURU_DL_BW BIT(0)
2043+#define MURU_DL_GI BIT(1)
2044+#define MURU_DL_TX_MODE BIT(2)
2045+#define MURU_DL_TONE_PLAN BIT(3)
2046+#define MURU_DL_USER_CNT BIT(4)
2047+#define MURU_DL_LTF BIT(5)
2048+#define MURU_DL_SIGB_MCS BIT(6)
2049+#define MURU_DL_SIGB_DCM BIT(7)
2050+#define MURU_DL_SIGB_CMPRS BIT(8)
2051+#define MURU_DL_ACK_POLICY BIT(9)
2052+#define MURU_DL_TXPOWER BIT(10)
2053+/* DL Per User Config */
2054+#define MURU_DL_USER_WLAN_ID BIT(16)
2055+#define MURU_DL_USER_COD BIT(17)
2056+#define MURU_DL_USER_MCS BIT(18)
2057+#define MURU_DL_USER_NSS BIT(19)
2058+#define MURU_DL_USER_RU_ALLOC BIT(20)
2059+#define MURU_DL_USER_MUMIMO_GRP BIT(21)
2060+#define MURU_DL_USER_MUMIMO_VHT BIT(22)
2061+#define MURU_DL_USER_ACK_POLICY BIT(23)
2062+#define MURU_DL_USER_MUMIMO_HE BIT(24)
2063+#define MURU_DL_USER_PWR_ALPHA BIT(25)
2064+#define MURU_DL_SET (GENMASK(7, 0) | GENMASK(20, 16) | BIT(25))
2065+
2066+#define MAX_PHASE_GROUP_NUM 9
2067+
2068+struct mt7915_tm_txbf_phase {
2069+ u8 status;
2070+ struct {
2071+ u8 r0_uh;
2072+ u8 r0_h;
2073+ u8 r0_m;
2074+ u8 r0_l;
2075+ u8 r0_ul;
2076+ u8 r1_uh;
2077+ u8 r1_h;
2078+ u8 r1_m;
2079+ u8 r1_l;
2080+ u8 r1_ul;
2081+ u8 r2_uh;
2082+ u8 r2_h;
2083+ u8 r2_m;
2084+ u8 r2_l;
2085+ u8 r2_ul;
2086+ u8 r3_uh;
2087+ u8 r3_h;
2088+ u8 r3_m;
2089+ u8 r3_l;
2090+ u8 r3_ul;
2091+ u8 r2_uh_sx2;
2092+ u8 r2_h_sx2;
2093+ u8 r2_m_sx2;
2094+ u8 r2_l_sx2;
2095+ u8 r2_ul_sx2;
2096+ u8 r3_uh_sx2;
2097+ u8 r3_h_sx2;
2098+ u8 r3_m_sx2;
2099+ u8 r3_l_sx2;
2100+ u8 r3_ul_sx2;
2101+ u8 m_t0_h;
2102+ u8 m_t1_h;
2103+ u8 m_t2_h;
2104+ u8 m_t2_h_sx2;
2105+ u8 r0_reserved;
2106+ u8 r1_reserved;
2107+ u8 r2_reserved;
2108+ u8 r3_reserved;
2109+ u8 r2_sx2_reserved;
2110+ u8 r3_sx2_reserved;
2111+ } phase;
2112+};
2113+
2114+struct mt7915_tm_pfmu_tag1 {
2115+ __le32 pfmu_idx:10;
2116+ __le32 ebf:1;
2117+ __le32 data_bw:2;
2118+ __le32 lm:2;
2119+ __le32 is_mu:1;
2120+ __le32 nr:3, nc:3;
2121+ __le32 codebook:2;
2122+ __le32 ngroup:2;
2123+ __le32 _rsv:2;
2124+ __le32 invalid_prof:1;
2125+ __le32 rmsd:3;
2126+
2127+ __le32 col_id1:6, row_id1:10;
2128+ __le32 col_id2:6, row_id2:10;
2129+ __le32 col_id3:6, row_id3:10;
2130+ __le32 col_id4:6, row_id4:10;
2131+
2132+ __le32 ru_start_id:7;
2133+ __le32 _rsv1:1;
2134+ __le32 ru_end_id:7;
2135+ __le32 _rsv2:1;
2136+ __le32 mob_cal_en:1;
2137+ __le32 _rsv3:15;
2138+
2139+ __le32 snr_sts0:8, snr_sts1:8, snr_sts2:8, snr_sts3:8;
2140+ __le32 snr_sts4:8, snr_sts5:8, snr_sts6:8, snr_sts7:8;
2141+
2142+ __le32 _rsv4;
2143+} __packed;
2144+
2145+struct mt7915_tm_pfmu_tag2 {
2146+ __le32 smart_ant:24;
2147+ __le32 se_idx:5;
2148+ __le32 _rsv:3;
2149+
2150+ __le32 _rsv1:8;
2151+ __le32 rmsd_thres:3;
2152+ __le32 _rsv2:5;
2153+ __le32 ibf_timeout:8;
2154+ __le32 _rsv3:8;
2155+
2156+ __le32 _rsv4:16;
2157+ __le32 ibf_data_bw:2;
2158+ __le32 ibf_nc:3;
2159+ __le32 ibf_nr:3;
2160+ __le32 ibf_ru:8;
2161+
2162+ __le32 mob_delta_t:8;
2163+ __le32 mob_lq_result:7;
2164+ __le32 _rsv5:1;
2165+ __le32 _rsv6:16;
2166+
2167+ __le32 _rsv7;
2168+} __packed;
2169+
2170+struct mt7915_tm_pfmu_tag {
2171+ struct mt7915_tm_pfmu_tag1 t1;
2172+ struct mt7915_tm_pfmu_tag2 t2;
2173+};
2174+
2175+struct mt7915_tm_pfmu_data {
2176+ __le16 subc_idx;
2177+ __le16 phi11;
2178+ __le16 phi21;
2179+ __le16 phi31;
2180+};
2181+
2182+struct mt7915_tm_ibf_cal_info {
2183+ u8 format_id;
2184+ u8 group_l_m_n;
2185+ u8 group;
2186+ bool sx2;
2187+ u8 status;
2188+ u8 cal_type;
2189+ u8 _rsv[2];
2190+ u8 buf[1000];
2191+} __packed;
2192+
2193+enum {
2194+ IBF_PHASE_CAL_UNSPEC,
2195+ IBF_PHASE_CAL_NORMAL,
2196+ IBF_PHASE_CAL_VERIFY,
2197+ IBF_PHASE_CAL_NORMAL_INSTRUMENT,
2198+ IBF_PHASE_CAL_VERIFY_INSTRUMENT,
2199+};
2200+
2201 #endif
2202diff --git a/testmode.c b/testmode.c
developere9954402022-07-12 10:15:11 -07002203index 31439b39..86323f64 100644
developer4c6b6002022-05-30 16:36:44 +08002204--- a/testmode.c
2205+++ b/testmode.c
developere9954402022-07-12 10:15:11 -07002206@@ -27,28 +27,16 @@ const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
developer4c6b6002022-05-30 16:36:44 +08002207 };
2208 EXPORT_SYMBOL_GPL(mt76_tm_policy);
2209
2210-void mt76_testmode_tx_pending(struct mt76_phy *phy)
2211+static void
2212+mt76_testmode_queue_tx(struct mt76_phy *phy, struct mt76_wcid *wcid,
developerf1b69ea2022-07-04 10:54:39 +08002213+ struct sk_buff *skb, struct mt76_queue *q, int qid,
2214+ u16 limit)
developer4c6b6002022-05-30 16:36:44 +08002215 {
2216 struct mt76_testmode_data *td = &phy->test;
2217 struct mt76_dev *dev = phy->dev;
2218- struct mt76_wcid *wcid = &dev->global_wcid;
2219- struct sk_buff *skb = td->tx_skb;
2220- struct mt76_queue *q;
2221- u16 tx_queued_limit;
2222- int qid;
2223-
2224- if (!skb || !td->tx_pending)
2225- return;
2226+ u16 count = limit;
2227
2228- qid = skb_get_queue_mapping(skb);
2229- q = phy->q_tx[qid];
2230-
2231- tx_queued_limit = td->tx_queued_limit ? td->tx_queued_limit : 1000;
2232-
2233- spin_lock_bh(&q->lock);
2234-
2235- while (td->tx_pending > 0 &&
2236- td->tx_queued - td->tx_done < tx_queued_limit &&
2237+ while (td->tx_pending > 0 && count &&
2238 q->queued < q->ndesc / 2) {
2239 int ret;
2240
developere9954402022-07-12 10:15:11 -07002241@@ -57,13 +45,65 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002242 if (ret < 0)
2243 break;
2244
2245+ count--;
2246 td->tx_pending--;
2247 td->tx_queued++;
2248+
2249+ if (td->tx_rate_mode != MT76_TM_TX_MODE_HE_MU)
2250+ if (td->tx_queued - td->tx_done >= limit)
2251+ break;
2252 }
2253
2254 dev->queue_ops->kick(dev, q);
2255+}
2256+
2257+void mt76_testmode_tx_pending(struct mt76_phy *phy)
2258+{
2259+ struct mt76_testmode_data *td = &phy->test;
2260+ struct mt76_testmode_entry_data *ed;
2261+ struct mt76_queue *q;
2262+ int qid;
2263+ u16 tx_queued_limit;
2264+ u32 remain;
2265+ bool is_mu;
2266+
2267+ if (!td->tx_pending)
2268+ return;
2269+
2270+ /* tx_queued_limit = td->tx_queued_limit ?: 100; */
2271+ tx_queued_limit = 100;
2272+
2273+ if (!td->aid) {
2274+ qid = skb_get_queue_mapping(td->tx_skb);
2275+ q = phy->q_tx[qid];
2276+ spin_lock_bh(&q->lock);
2277+ mt76_testmode_queue_tx(phy, &phy->dev->global_wcid,
developerf1b69ea2022-07-04 10:54:39 +08002278+ td->tx_skb, q, qid, tx_queued_limit);
developer4c6b6002022-05-30 16:36:44 +08002279+ spin_unlock_bh(&q->lock);
2280+
2281+ return;
2282+ }
2283+
2284+ is_mu = td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU;
2285+ ed = mt76_testmode_entry_data(phy, td->cur_entry);
2286+ qid = skb_get_queue_mapping(ed->tx_skb);
2287+ q = phy->q_tx[qid];
2288+
2289+ spin_lock_bh(&q->lock);
2290+
2291+ remain = is_mu ? 1 : (td->tx_pending % td->tx_count) ?: td->tx_count;
2292+ if (remain < tx_queued_limit)
2293+ tx_queued_limit = remain;
2294+
developerf1b69ea2022-07-04 10:54:39 +08002295+ mt76_testmode_queue_tx(phy, td->cur_entry, ed->tx_skb, q, qid, tx_queued_limit);
developer4c6b6002022-05-30 16:36:44 +08002296+
2297+ if (td->tx_pending % td->tx_count == 0 || is_mu)
2298+ td->cur_entry = list_next_entry(td->cur_entry, list);
2299
2300 spin_unlock_bh(&q->lock);
2301+
2302+ if (is_mu && td->tx_pending)
2303+ mt76_worker_schedule(&phy->dev->tx_worker);
2304 }
2305
2306 static u32
developere9954402022-07-12 10:15:11 -07002307@@ -89,15 +129,31 @@ mt76_testmode_max_mpdu_len(struct mt76_phy *phy, u8 tx_rate_mode)
developer4c6b6002022-05-30 16:36:44 +08002308 }
2309
2310 static void
2311-mt76_testmode_free_skb(struct mt76_phy *phy)
2312+mt76_testmode_free_skb(struct sk_buff **tx_skb)
2313+{
2314+ if (!(*tx_skb))
2315+ return;
2316+
2317+ dev_kfree_skb(*tx_skb);
2318+ *tx_skb = NULL;
2319+}
2320+
2321+static void
2322+mt76_testmode_free_skb_all(struct mt76_phy *phy)
2323 {
2324 struct mt76_testmode_data *td = &phy->test;
2325+ struct mt76_testmode_entry_data *ed = &td->ed;
2326+ struct mt76_wcid *wcid;
2327+
2328+ mt76_testmode_free_skb(&ed->tx_skb);
2329
2330- dev_kfree_skb(td->tx_skb);
2331- td->tx_skb = NULL;
2332+ mt76_tm_for_each_entry(phy, wcid, ed)
2333+ mt76_testmode_free_skb(&ed->tx_skb);
2334 }
2335
2336-int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
2337+static int
2338+mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len,
2339+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN])
2340 {
2341 #define MT_TXP_MAX_LEN 4095
2342 u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
developere9954402022-07-12 10:15:11 -07002343@@ -119,7 +175,8 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer4c6b6002022-05-30 16:36:44 +08002344 nfrags = len / MT_TXP_MAX_LEN;
2345 head_len = nfrags ? MT_TXP_MAX_LEN : len;
2346
2347- if (len > IEEE80211_MAX_FRAME_LEN)
2348+ if (len > IEEE80211_MAX_FRAME_LEN ||
2349+ td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
2350 fc |= IEEE80211_STYPE_QOS_DATA;
2351
2352 head = alloc_skb(head_len, GFP_KERNEL);
developere9954402022-07-12 10:15:11 -07002353@@ -128,9 +185,9 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer4c6b6002022-05-30 16:36:44 +08002354
developere9954402022-07-12 10:15:11 -07002355 hdr = __skb_put_zero(head, sizeof(*hdr));
developer4c6b6002022-05-30 16:36:44 +08002356 hdr->frame_control = cpu_to_le16(fc);
2357- memcpy(hdr->addr1, td->addr[0], ETH_ALEN);
2358- memcpy(hdr->addr2, td->addr[1], ETH_ALEN);
2359- memcpy(hdr->addr3, td->addr[2], ETH_ALEN);
2360+ memcpy(hdr->addr1, addr[0], ETH_ALEN);
2361+ memcpy(hdr->addr2, addr[1], ETH_ALEN);
2362+ memcpy(hdr->addr3, addr[2], ETH_ALEN);
2363 skb_set_queue_mapping(head, IEEE80211_AC_BE);
developere9954402022-07-12 10:15:11 -07002364 get_random_bytes(__skb_put(head, head_len - sizeof(*hdr)),
2365 head_len - sizeof(*hdr));
2366@@ -156,7 +213,7 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer4c6b6002022-05-30 16:36:44 +08002367
2368 frag = alloc_skb(frag_len, GFP_KERNEL);
2369 if (!frag) {
2370- mt76_testmode_free_skb(phy);
2371+ mt76_testmode_free_skb(tx_skb);
2372 dev_kfree_skb(head);
2373 return -ENOMEM;
2374 }
developere9954402022-07-12 10:15:11 -07002375@@ -169,15 +226,14 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer4c6b6002022-05-30 16:36:44 +08002376 frag_tail = &(*frag_tail)->next;
2377 }
2378
2379- mt76_testmode_free_skb(phy);
2380- td->tx_skb = head;
2381+ mt76_testmode_free_skb(tx_skb);
2382+ *tx_skb = head;
2383
2384 return 0;
2385 }
2386-EXPORT_SYMBOL(mt76_testmode_alloc_skb);
2387
2388-static int
2389-mt76_testmode_tx_init(struct mt76_phy *phy)
2390+int mt76_testmode_init_skb(struct mt76_phy *phy, u32 len,
2391+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN])
2392 {
2393 struct mt76_testmode_data *td = &phy->test;
2394 struct ieee80211_tx_info *info;
developere9954402022-07-12 10:15:11 -07002395@@ -185,7 +241,7 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002396 u8 max_nss = hweight8(phy->antenna_mask);
2397 int ret;
2398
2399- ret = mt76_testmode_alloc_skb(phy, td->tx_mpdu_len);
2400+ ret = mt76_testmode_alloc_skb(phy, len, tx_skb, addr);
2401 if (ret)
2402 return ret;
2403
developere9954402022-07-12 10:15:11 -07002404@@ -195,7 +251,7 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002405 if (td->tx_antenna_mask)
2406 max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask));
2407
2408- info = IEEE80211_SKB_CB(td->tx_skb);
2409+ info = IEEE80211_SKB_CB(*tx_skb);
2410 rate = &info->control.rates[0];
2411 rate->count = 1;
2412 rate->idx = td->tx_rate_idx;
developere9954402022-07-12 10:15:11 -07002413@@ -267,6 +323,25 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002414 out:
2415 return 0;
2416 }
2417+EXPORT_SYMBOL(mt76_testmode_init_skb);
2418+
2419+static int
2420+mt76_testmode_tx_init(struct mt76_phy *phy)
2421+{
2422+ struct mt76_testmode_entry_data *ed;
2423+ struct mt76_wcid *wcid;
2424+
2425+ mt76_tm_for_each_entry(phy, wcid, ed) {
2426+ int ret;
2427+
2428+ ret = mt76_testmode_init_skb(phy, ed->tx_mpdu_len,
2429+ &ed->tx_skb, ed->addr);
2430+ if (ret)
2431+ return ret;
2432+ }
2433+
2434+ return 0;
2435+}
2436
2437 static void
2438 mt76_testmode_tx_start(struct mt76_phy *phy)
developere9954402022-07-12 10:15:11 -07002439@@ -277,6 +352,14 @@ mt76_testmode_tx_start(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002440 td->tx_queued = 0;
2441 td->tx_done = 0;
2442 td->tx_pending = td->tx_count;
2443+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
2444+ td->tx_pending = 1;
2445+ if (td->entry_num) {
2446+ td->tx_pending *= td->entry_num;
2447+ td->cur_entry = list_first_entry(&td->tm_entry_list,
2448+ struct mt76_wcid, list);
2449+ }
2450+
2451 mt76_worker_schedule(&dev->tx_worker);
2452 }
2453
developere9954402022-07-12 10:15:11 -07002454@@ -295,7 +378,7 @@ mt76_testmode_tx_stop(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002455 wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued,
2456 MT76_TM_TIMEOUT * HZ);
2457
2458- mt76_testmode_free_skb(phy);
2459+ mt76_testmode_free_skb_all(phy);
2460 }
2461
2462 static inline void
developere9954402022-07-12 10:15:11 -07002463@@ -326,6 +409,8 @@ mt76_testmode_init_defaults(struct mt76_phy *phy)
developer4c6b6002022-05-30 16:36:44 +08002464 memcpy(td->addr[0], phy->macaddr, ETH_ALEN);
2465 memcpy(td->addr[1], phy->macaddr, ETH_ALEN);
2466 memcpy(td->addr[2], phy->macaddr, ETH_ALEN);
2467+
2468+ INIT_LIST_HEAD(&phy->test.tm_entry_list);
2469 }
2470
2471 static int
developere9954402022-07-12 10:15:11 -07002472@@ -335,8 +420,12 @@ __mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state)
developer4c6b6002022-05-30 16:36:44 +08002473 struct mt76_dev *dev = phy->dev;
2474 int err;
2475
2476- if (prev_state == MT76_TM_STATE_TX_FRAMES)
2477+ if (prev_state == MT76_TM_STATE_TX_FRAMES) {
2478+ /* MU needs to clean hwq for free done event */
2479+ if (phy->test.tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
2480+ dev->test_ops->set_state(phy, MT76_TM_STATE_IDLE);
2481 mt76_testmode_tx_stop(phy);
2482+ }
2483
2484 if (state == MT76_TM_STATE_TX_FRAMES) {
2485 err = mt76_testmode_tx_init(phy);
developere9954402022-07-12 10:15:11 -07002486@@ -406,6 +495,44 @@ mt76_tm_get_u8(struct nlattr *attr, u8 *dest, u8 min, u8 max)
developer4c6b6002022-05-30 16:36:44 +08002487 return 0;
2488 }
2489
2490+static int
2491+mt76_testmode_set_eeprom(struct mt76_phy *phy, struct nlattr **tb)
2492+{
2493+ struct mt76_dev *dev = phy->dev;
2494+ u8 action, val[MT76_TM_EEPROM_BLOCK_SIZE];
2495+ u32 offset = 0;
2496+ int err = -EINVAL;
2497+
2498+ if (!dev->test_ops->set_eeprom)
2499+ return -EOPNOTSUPP;
2500+
2501+ if (mt76_tm_get_u8(tb[MT76_TM_ATTR_EEPROM_ACTION], &action,
2502+ 0, MT76_TM_EEPROM_ACTION_MAX))
2503+ goto out;
2504+
2505+ if (tb[MT76_TM_ATTR_EEPROM_OFFSET]) {
2506+ struct nlattr *cur;
2507+ int rem, idx = 0;
2508+
2509+ offset = nla_get_u32(tb[MT76_TM_ATTR_EEPROM_OFFSET]);
2510+ if (!!(offset % MT76_TM_EEPROM_BLOCK_SIZE) ||
2511+ !tb[MT76_TM_ATTR_EEPROM_VAL])
2512+ goto out;
2513+
2514+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_EEPROM_VAL], rem) {
2515+ if (nla_len(cur) != 1 || idx >= ARRAY_SIZE(val))
2516+ goto out;
2517+
2518+ val[idx++] = nla_get_u8(cur);
2519+ }
2520+ }
2521+
2522+ err = dev->test_ops->set_eeprom(phy, offset, val, action);
2523+
2524+out:
2525+ return err;
2526+}
2527+
2528 int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2529 void *data, int len)
2530 {
developere9954402022-07-12 10:15:11 -07002531@@ -429,6 +556,11 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer4c6b6002022-05-30 16:36:44 +08002532
2533 mutex_lock(&dev->mutex);
2534
2535+ if (tb[MT76_TM_ATTR_EEPROM_ACTION]) {
2536+ err = mt76_testmode_set_eeprom(phy, tb);
2537+ goto out;
2538+ }
2539+
2540 if (tb[MT76_TM_ATTR_RESET]) {
2541 mt76_testmode_set_state(phy, MT76_TM_STATE_OFF);
2542 memset(td, 0, sizeof(*td));
developere9954402022-07-12 10:15:11 -07002543@@ -456,7 +588,10 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer4c6b6002022-05-30 16:36:44 +08002544 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_DUTY_CYCLE],
2545 &td->tx_duty_cycle, 0, 99) ||
2546 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_POWER_CONTROL],
2547- &td->tx_power_control, 0, 1))
2548+ &td->tx_power_control, 0, 1) ||
2549+ mt76_tm_get_u8(tb[MT76_TM_ATTR_AID], &td->aid, 0, 16) ||
2550+ mt76_tm_get_u8(tb[MT76_TM_ATTR_RU_ALLOC], &td->ru_alloc, 0, 0xff) ||
2551+ mt76_tm_get_u8(tb[MT76_TM_ATTR_RU_IDX], &td->ru_idx, 0, 68))
2552 goto out;
2553
2554 if (tb[MT76_TM_ATTR_TX_LENGTH]) {
developere9954402022-07-12 10:15:11 -07002555@@ -488,8 +623,7 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer4c6b6002022-05-30 16:36:44 +08002556
2557 if (tb[MT76_TM_ATTR_TX_POWER]) {
2558 struct nlattr *cur;
2559- int idx = 0;
2560- int rem;
2561+ int rem, idx = 0;
2562
2563 nla_for_each_nested(cur, tb[MT76_TM_ATTR_TX_POWER], rem) {
2564 if (nla_len(cur) != 1 ||
developere9954402022-07-12 10:15:11 -07002565@@ -509,11 +643,45 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer4c6b6002022-05-30 16:36:44 +08002566 if (nla_len(cur) != ETH_ALEN || idx >= 3)
2567 goto out;
2568
2569- memcpy(td->addr[idx], nla_data(cur), ETH_ALEN);
2570+ memcpy(td->addr[idx++], nla_data(cur), ETH_ALEN);
2571+ }
2572+ }
2573+
2574+ if (tb[MT76_TM_ATTR_CFG]) {
2575+ struct nlattr *cur;
2576+ int rem, idx = 0;
2577+
2578+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_CFG], rem) {
2579+ if (nla_len(cur) != 1 || idx >= 2)
2580+ goto out;
2581+
2582+ if (idx == 0)
2583+ td->cfg.type = nla_get_u8(cur);
2584+ else
2585+ td->cfg.enable = nla_get_u8(cur);
2586 idx++;
2587 }
2588 }
2589
2590+ if (tb[MT76_TM_ATTR_TXBF_ACT]) {
2591+ struct nlattr *cur;
2592+ int rem, idx = 0;
2593+
2594+ if (!tb[MT76_TM_ATTR_TXBF_PARAM] ||
2595+ mt76_tm_get_u8(tb[MT76_TM_ATTR_TXBF_ACT], &td->txbf_act,
2596+ 0, MT76_TM_TXBF_ACT_MAX))
2597+ goto out;
2598+
2599+ memset(td->txbf_param, 0, sizeof(td->txbf_param));
2600+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_TXBF_PARAM], rem) {
2601+ if (nla_len(cur) != 2 ||
2602+ idx >= ARRAY_SIZE(td->txbf_param))
2603+ goto out;
2604+
2605+ td->txbf_param[idx++] = nla_get_u16(cur);
2606+ }
2607+ }
2608+
2609 if (dev->test_ops->set_params) {
2610 err = dev->test_ops->set_params(phy, tb, state);
2611 if (err)
developere9954402022-07-12 10:15:11 -07002612@@ -578,6 +746,7 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer4c6b6002022-05-30 16:36:44 +08002613 struct mt76_phy *phy = hw->priv;
2614 struct mt76_dev *dev = phy->dev;
2615 struct mt76_testmode_data *td = &phy->test;
2616+ struct mt76_testmode_entry_data *ed = &td->ed;
2617 struct nlattr *tb[NUM_MT76_TM_ATTRS] = {};
2618 int err = 0;
2619 void *a;
developere9954402022-07-12 10:15:11 -07002620@@ -610,6 +779,19 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer4c6b6002022-05-30 16:36:44 +08002621 goto out;
2622 }
2623
2624+ if (tb[MT76_TM_ATTR_AID]) {
2625+ struct mt76_wcid *wcid;
2626+ u8 aid;
2627+
2628+ err = mt76_tm_get_u8(tb[MT76_TM_ATTR_AID], &aid, 1, 16);
2629+ if (err)
2630+ goto out;
2631+
2632+ mt76_tm_for_each_entry(phy, wcid, ed)
2633+ if (ed->aid == aid)
2634+ ed = mt76_testmode_entry_data(phy, wcid);
2635+ }
2636+
2637 mt76_testmode_init_defaults(phy);
2638
2639 err = -EMSGSIZE;
developere9954402022-07-12 10:15:11 -07002640@@ -622,12 +804,8 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer4c6b6002022-05-30 16:36:44 +08002641 goto out;
2642
2643 if (nla_put_u32(msg, MT76_TM_ATTR_TX_COUNT, td->tx_count) ||
2644- nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_mpdu_len) ||
2645 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_MODE, td->tx_rate_mode) ||
2646- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, td->tx_rate_nss) ||
2647- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, td->tx_rate_idx) ||
2648 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_SGI, td->tx_rate_sgi) ||
2649- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, td->tx_rate_ldpc) ||
2650 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_STBC, td->tx_rate_stbc) ||
2651 (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_LTF) &&
2652 nla_put_u8(msg, MT76_TM_ATTR_TX_LTF, td->tx_ltf)) ||
developere9954402022-07-12 10:15:11 -07002653@@ -647,6 +825,15 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer4c6b6002022-05-30 16:36:44 +08002654 nla_put_u8(msg, MT76_TM_ATTR_FREQ_OFFSET, td->freq_offset)))
2655 goto out;
2656
2657+ if (nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, ed->tx_mpdu_len) ||
2658+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, ed->tx_rate_nss) ||
2659+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, ed->tx_rate_idx) ||
2660+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, ed->tx_rate_ldpc) ||
2661+ nla_put_u8(msg, MT76_TM_ATTR_AID, ed->aid) ||
2662+ nla_put_u8(msg, MT76_TM_ATTR_RU_ALLOC, ed->ru_alloc) ||
2663+ nla_put_u8(msg, MT76_TM_ATTR_RU_IDX, ed->ru_idx))
2664+ goto out;
2665+
2666 if (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER)) {
2667 a = nla_nest_start(msg, MT76_TM_ATTR_TX_POWER);
2668 if (!a)
2669diff --git a/testmode.h b/testmode.h
developerf1b69ea2022-07-04 10:54:39 +08002670index 89613266..57949f2b 100644
developer4c6b6002022-05-30 16:36:44 +08002671--- a/testmode.h
2672+++ b/testmode.h
2673@@ -6,6 +6,8 @@
2674 #define __MT76_TESTMODE_H
2675
2676 #define MT76_TM_TIMEOUT 10
2677+#define MT76_TM_MAX_ENTRY_NUM 16
2678+#define MT76_TM_EEPROM_BLOCK_SIZE 16
2679
2680 /**
2681 * enum mt76_testmode_attr - testmode attributes inside NL80211_ATTR_TESTDATA
2682@@ -47,6 +49,15 @@
2683 * @MT76_TM_ATTR_DRV_DATA: driver specific netlink attrs (nested)
2684 *
2685 * @MT76_TM_ATTR_MAC_ADDRS: array of nested MAC addresses (nested)
2686+ *
2687+ * @MT76_TM_ATTR_EEPROM_ACTION: eeprom setting actions
2688+ * (u8, see &enum mt76_testmode_eeprom_action)
2689+ * @MT76_TM_ATTR_EEPROM_OFFSET: offset of eeprom data block for writing (u32)
2690+ * @MT76_TM_ATTR_EEPROM_VAL: values for writing into a 16-byte data block
2691+ * (nested, u8 attrs)
2692+ *
2693+ * @MT76_TM_ATTR_CFG: config testmode rf feature (nested, see &mt76_testmode_cfg)
2694+ *
2695 */
2696 enum mt76_testmode_attr {
2697 MT76_TM_ATTR_UNSPEC,
2698@@ -84,6 +95,17 @@ enum mt76_testmode_attr {
2699 MT76_TM_ATTR_DRV_DATA,
2700
2701 MT76_TM_ATTR_MAC_ADDRS,
2702+ MT76_TM_ATTR_AID,
2703+ MT76_TM_ATTR_RU_ALLOC,
2704+ MT76_TM_ATTR_RU_IDX,
2705+
2706+ MT76_TM_ATTR_EEPROM_ACTION,
2707+ MT76_TM_ATTR_EEPROM_OFFSET,
2708+ MT76_TM_ATTR_EEPROM_VAL,
2709+
2710+ MT76_TM_ATTR_CFG,
2711+ MT76_TM_ATTR_TXBF_ACT,
2712+ MT76_TM_ATTR_TXBF_PARAM,
2713
2714 /* keep last */
2715 NUM_MT76_TM_ATTRS,
2716@@ -198,4 +220,57 @@ enum mt76_testmode_tx_mode {
2717
2718 extern const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS];
2719
2720+/**
2721+ * enum mt76_testmode_eeprom_action - eeprom setting actions
2722+ *
2723+ * @MT76_TM_EEPROM_ACTION_UPDATE_DATA: update rf values to specific
2724+ * eeprom data block
2725+ * @MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE: send updated eeprom data to fw
2726+ * @MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE: write eeprom data back to efuse
2727+ */
2728+enum mt76_testmode_eeprom_action {
2729+ MT76_TM_EEPROM_ACTION_UPDATE_DATA,
2730+ MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE,
2731+ MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE,
2732+
2733+ /* keep last */
2734+ NUM_MT76_TM_EEPROM_ACTION,
2735+ MT76_TM_EEPROM_ACTION_MAX = NUM_MT76_TM_EEPROM_ACTION - 1,
2736+};
2737+
2738+/**
2739+ * enum mt76_testmode_cfg - packet tx phy mode
2740+ *
2741+ * @MT76_TM_EEPROM_ACTION_UPDATE_DATA: update rf values to specific
2742+ * eeprom data block
2743+ * @MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE: send updated eeprom data to fw
2744+ * @MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE: write eeprom data back to efuse
2745+ */
2746+enum mt76_testmode_cfg {
2747+ MT76_TM_CFG_TSSI,
2748+ MT76_TM_CFG_DPD,
2749+ MT76_TM_CFG_RATE_POWER_OFFSET,
2750+ MT76_TM_CFG_THERMAL_COMP,
2751+
2752+ /* keep last */
2753+ NUM_MT76_TM_CFG,
2754+ MT76_TM_CFG_MAX = NUM_MT76_TM_CFG - 1,
2755+};
2756+
2757+enum mt76_testmode_txbf_act {
2758+ MT76_TM_TXBF_ACT_INIT,
2759+ MT76_TM_TXBF_ACT_UPDATE_CH,
2760+ MT76_TM_TXBF_ACT_PHASE_COMP,
2761+ MT76_TM_TXBF_ACT_TX_PREP,
2762+ MT76_TM_TXBF_ACT_IBF_PROF_UPDATE,
2763+ MT76_TM_TXBF_ACT_EBF_PROF_UPDATE,
2764+ MT76_TM_TXBF_ACT_PHASE_CAL,
2765+ MT76_TM_TXBF_ACT_PROF_UPDATE_ALL,
2766+ MT76_TM_TXBF_ACT_E2P_UPDATE,
2767+
2768+ /* keep last */
2769+ NUM_MT76_TM_TXBF_ACT,
2770+ MT76_TM_TXBF_ACT_MAX = NUM_MT76_TM_TXBF_ACT - 1,
2771+};
2772+
2773 #endif
2774diff --git a/tools/fields.c b/tools/fields.c
developerf1b69ea2022-07-04 10:54:39 +08002775index e3f69089..6e36ab27 100644
developer4c6b6002022-05-30 16:36:44 +08002776--- a/tools/fields.c
2777+++ b/tools/fields.c
2778@@ -10,6 +10,7 @@ static const char * const testmode_state[] = {
2779 [MT76_TM_STATE_IDLE] = "idle",
2780 [MT76_TM_STATE_TX_FRAMES] = "tx_frames",
2781 [MT76_TM_STATE_RX_FRAMES] = "rx_frames",
2782+ [MT76_TM_STATE_TX_CONT] = "tx_cont",
2783 };
2784
2785 static const char * const testmode_tx_mode[] = {
2786@@ -201,6 +202,63 @@ static void print_extra_stats(const struct tm_field *field, struct nlattr **tb)
2787 printf("%srx_per=%.02f%%\n", prefix, 100 * failed / total);
2788 }
2789
2790+static bool parse_mac(const struct tm_field *field, int idx,
2791+ struct nl_msg *msg, const char *val)
2792+{
2793+#define ETH_ALEN 6
2794+ bool ret = true;
2795+ char *str, *cur, *ap;
2796+ void *a;
2797+
2798+ ap = str = strdup(val);
2799+
2800+ a = nla_nest_start(msg, idx);
2801+
2802+ idx = 0;
2803+ while ((cur = strsep(&ap, ",")) != NULL) {
2804+ unsigned char addr[ETH_ALEN];
2805+ char *val, *tmp = cur;
2806+ int i = 0;
2807+
2808+ while ((val = strsep(&tmp, ":")) != NULL) {
2809+ if (i >= ETH_ALEN)
2810+ break;
2811+
2812+ addr[i++] = strtoul(val, NULL, 16);
2813+ }
2814+
2815+ nla_put(msg, idx, ETH_ALEN, addr);
2816+
2817+ idx++;
2818+ }
2819+
2820+ nla_nest_end(msg, a);
2821+
2822+ free(str);
2823+
2824+ return ret;
2825+}
2826+
2827+static void print_mac(const struct tm_field *field, struct nlattr *attr)
2828+{
2829+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
2830+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
2831+ unsigned char addr[3][6];
2832+ struct nlattr *cur;
2833+ int idx = 0;
2834+ int rem;
2835+
2836+ nla_for_each_nested(cur, attr, rem) {
2837+ if (nla_len(cur) != 6)
2838+ continue;
2839+ memcpy(addr[idx++], nla_data(cur), 6);
2840+ }
2841+
2842+ printf("" MACSTR "," MACSTR "," MACSTR "",
2843+ MAC2STR(addr[0]), MAC2STR(addr[1]), MAC2STR(addr[2]));
2844+
2845+ return;
2846+}
2847
2848 #define FIELD_GENERIC(_field, _name, ...) \
2849 [FIELD_NAME(_field)] = { \
2850@@ -250,6 +308,13 @@ static void print_extra_stats(const struct tm_field *field, struct nlattr **tb)
2851 ##__VA_ARGS__ \
2852 )
2853
2854+#define FIELD_MAC(_field, _name) \
2855+ [FIELD_NAME(_field)] = { \
2856+ .name = _name, \
2857+ .parse = parse_mac, \
2858+ .print = print_mac \
2859+ }
2860+
2861 #define FIELD_NAME(_field) MT76_TM_RX_ATTR_##_field
2862 static const struct tm_field rx_fields[NUM_MT76_TM_RX_ATTRS] = {
2863 FIELD_RO(s32, FREQ_OFFSET, "freq_offset"),
2864@@ -300,10 +365,18 @@ static const struct tm_field testdata_fields[NUM_MT76_TM_ATTRS] = {
2865 FIELD(u8, TX_RATE_LDPC, "tx_rate_ldpc"),
2866 FIELD(u8, TX_RATE_STBC, "tx_rate_stbc"),
2867 FIELD(u8, TX_LTF, "tx_ltf"),
2868+ FIELD(u8, TX_DUTY_CYCLE, "tx_duty_cycle"),
2869+ FIELD(u32, TX_IPG, "tx_ipg"),
2870+ FIELD(u32, TX_TIME, "tx_time"),
2871 FIELD(u8, TX_POWER_CONTROL, "tx_power_control"),
2872 FIELD_ARRAY(u8, TX_POWER, "tx_power"),
2873 FIELD(u8, TX_ANTENNA, "tx_antenna"),
2874+ FIELD(u8, TX_SPE_IDX, "tx_spe_idx"),
2875 FIELD(u32, FREQ_OFFSET, "freq_offset"),
2876+ FIELD(u8, AID, "aid"),
2877+ FIELD(u8, RU_ALLOC, "ru_alloc"),
2878+ FIELD(u8, RU_IDX, "ru_idx"),
2879+ FIELD_MAC(MAC_ADDRS, "mac_addrs"),
2880 FIELD_NESTED_RO(STATS, stats, "",
2881 .print_extra = print_extra_stats),
2882 };
2883@@ -322,9 +395,16 @@ static struct nla_policy testdata_policy[NUM_MT76_TM_ATTRS] = {
2884 [MT76_TM_ATTR_TX_RATE_LDPC] = { .type = NLA_U8 },
2885 [MT76_TM_ATTR_TX_RATE_STBC] = { .type = NLA_U8 },
2886 [MT76_TM_ATTR_TX_LTF] = { .type = NLA_U8 },
2887+ [MT76_TM_ATTR_TX_DUTY_CYCLE] = { .type = NLA_U8 },
2888+ [MT76_TM_ATTR_TX_IPG] = { .type = NLA_U32 },
2889+ [MT76_TM_ATTR_TX_TIME] = { .type = NLA_U32 },
2890 [MT76_TM_ATTR_TX_POWER_CONTROL] = { .type = NLA_U8 },
2891 [MT76_TM_ATTR_TX_ANTENNA] = { .type = NLA_U8 },
2892+ [MT76_TM_ATTR_TX_SPE_IDX] = { .type = NLA_U8 },
2893 [MT76_TM_ATTR_FREQ_OFFSET] = { .type = NLA_U32 },
2894+ [MT76_TM_ATTR_AID] = { .type = NLA_U8 },
2895+ [MT76_TM_ATTR_RU_ALLOC] = { .type = NLA_U8 },
2896+ [MT76_TM_ATTR_RU_IDX] = { .type = NLA_U8 },
2897 [MT76_TM_ATTR_STATS] = { .type = NLA_NESTED },
2898 };
2899
2900diff --git a/tx.c b/tx.c
developerf1b69ea2022-07-04 10:54:39 +08002901index 6c8d50d3..ae44afe0 100644
developer4c6b6002022-05-30 16:36:44 +08002902--- a/tx.c
2903+++ b/tx.c
2904@@ -245,8 +245,7 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *
2905 if (mt76_is_testmode_skb(dev, skb, &hw)) {
2906 struct mt76_phy *phy = hw->priv;
2907
2908- if (skb == phy->test.tx_skb)
2909- phy->test.tx_done++;
2910+ phy->test.tx_done++;
2911 if (phy->test.tx_queued == phy->test.tx_done)
2912 wake_up(&dev->tx_wait);
2913
2914--
developerd59e4772022-07-14 13:48:49 +080029152.25.1
developer4c6b6002022-05-30 16:36:44 +08002916