blob: 1430ef9c2247d6ef89bdfc7e1562a991eab9307d [file] [log] [blame]
developer60a3d662023-02-07 15:24:34 +08001From 947b0a84d32ca9fbdfc5befc133da50c1349694e Mon Sep 17 00:00:00 2001
developer6caa5e22022-06-16 13:33:13 +08002From: Shayne Chen <shayne.chen@mediatek.com>
3Date: Thu, 21 Apr 2022 15:43:19 +0800
developer60a3d662023-02-07 15:24:34 +08004Subject: [PATCH 1112/1133] mt76: testmode: additional supports
developer6caa5e22022-06-16 13:33:13 +08005
6Signed-off-by: Shayne Chen <shayne.chen@mediatek.com>
7---
developerf7a3ca32022-09-01 14:44:55 +08008 dma.c | 3 +-
9 mac80211.c | 12 +
10 mt76.h | 108 ++++-
11 mt76_connac_mcu.c | 4 +
12 mt76_connac_mcu.h | 2 +
13 mt7915/init.c | 2 +-
developer3f784572023-01-31 15:21:28 +080014 mt7915/mac.c | 39 +-
developerf7a3ca32022-09-01 14:44:55 +080015 mt7915/main.c | 2 +-
16 mt7915/mcu.c | 10 +-
17 mt7915/mcu.h | 28 +-
18 mt7915/mmio.c | 2 +
19 mt7915/mt7915.h | 14 +-
20 mt7915/regs.h | 3 +
developer17bb0a82022-12-13 15:52:04 +080021 mt7915/testmode.c | 1172 ++++++++++++++++++++++++++++++++++++++++++---
developerf7a3ca32022-09-01 14:44:55 +080022 mt7915/testmode.h | 278 +++++++++++
23 testmode.c | 275 +++++++++--
24 testmode.h | 75 +++
25 tools/fields.c | 80 ++++
26 tx.c | 3 +-
developer3f784572023-01-31 15:21:28 +080027 19 files changed, 1964 insertions(+), 148 deletions(-)
developer6caa5e22022-06-16 13:33:13 +080028
29diff --git a/dma.c b/dma.c
developer60a3d662023-02-07 15:24:34 +080030index e3fa4f39..a6bb3730 100644
developer6caa5e22022-06-16 13:33:13 +080031--- a/dma.c
32+++ b/dma.c
developer60a3d662023-02-07 15:24:34 +080033@@ -566,8 +566,7 @@ free:
developer6caa5e22022-06-16 13:33:13 +080034 if (mt76_is_testmode_skb(dev, skb, &hw)) {
35 struct mt76_phy *phy = hw->priv;
36
37- if (tx_info.skb == phy->test.tx_skb)
38- phy->test.tx_done--;
39+ phy->test.tx_done--;
40 }
41 #endif
42
43diff --git a/mac80211.c b/mac80211.c
developer60a3d662023-02-07 15:24:34 +080044index 00e7b4f3..2a66b1dd 100644
developer6caa5e22022-06-16 13:33:13 +080045--- a/mac80211.c
46+++ b/mac80211.c
developer60a3d662023-02-07 15:24:34 +080047@@ -56,6 +56,13 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = {
developer6caa5e22022-06-16 13:33:13 +080048 CHAN5G(60, 5300),
49 CHAN5G(64, 5320),
50
51+ CHAN5G(68, 5340),
52+ CHAN5G(80, 5400),
53+ CHAN5G(84, 5420),
54+ CHAN5G(88, 5440),
55+ CHAN5G(92, 5460),
56+ CHAN5G(96, 5480),
57+
58 CHAN5G(100, 5500),
59 CHAN5G(104, 5520),
60 CHAN5G(108, 5540),
developer60a3d662023-02-07 15:24:34 +080061@@ -76,6 +83,11 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = {
developer6caa5e22022-06-16 13:33:13 +080062 CHAN5G(165, 5825),
63 CHAN5G(169, 5845),
64 CHAN5G(173, 5865),
65+
66+ CHAN5G(184, 4920),
67+ CHAN5G(188, 4940),
68+ CHAN5G(192, 4960),
69+ CHAN5G(196, 4980),
70 };
71
72 static const struct ieee80211_channel mt76_channels_6ghz[] = {
73diff --git a/mt76.h b/mt76.h
developer60a3d662023-02-07 15:24:34 +080074index 25ad0f6b..cde52268 100644
developer6caa5e22022-06-16 13:33:13 +080075--- a/mt76.h
76+++ b/mt76.h
developer60a3d662023-02-07 15:24:34 +080077@@ -641,6 +641,21 @@ struct mt76_testmode_ops {
developer6caa5e22022-06-16 13:33:13 +080078 int (*set_params)(struct mt76_phy *phy, struct nlattr **tb,
79 enum mt76_testmode_state new_state);
80 int (*dump_stats)(struct mt76_phy *phy, struct sk_buff *msg);
81+ int (*set_eeprom)(struct mt76_phy *phy, u32 offset, u8 *val, u8 action);
82+};
83+
84+struct mt76_testmode_entry_data {
85+ struct sk_buff *tx_skb;
86+
87+ u16 tx_mpdu_len;
88+ u8 tx_rate_idx;
89+ u8 tx_rate_nss;
90+ u8 tx_rate_ldpc;
91+
92+ u8 addr[3][ETH_ALEN];
93+ u8 aid;
94+ u8 ru_alloc;
95+ u8 ru_idx;
96 };
97
98 #define MT_TM_FW_RX_COUNT BIT(0)
developer60a3d662023-02-07 15:24:34 +080099@@ -649,16 +664,11 @@ struct mt76_testmode_data {
developer6caa5e22022-06-16 13:33:13 +0800100 enum mt76_testmode_state state;
101
102 u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)];
103- struct sk_buff *tx_skb;
104
105 u32 tx_count;
106- u16 tx_mpdu_len;
107
108 u8 tx_rate_mode;
109- u8 tx_rate_idx;
110- u8 tx_rate_nss;
111 u8 tx_rate_sgi;
112- u8 tx_rate_ldpc;
113 u8 tx_rate_stbc;
114 u8 tx_ltf;
115
developer60a3d662023-02-07 15:24:34 +0800116@@ -674,10 +684,37 @@ struct mt76_testmode_data {
developer6caa5e22022-06-16 13:33:13 +0800117 u8 tx_power[4];
118 u8 tx_power_control;
119
120- u8 addr[3][ETH_ALEN];
121+ struct list_head tm_entry_list;
122+ struct mt76_wcid *cur_entry;
123+ u8 entry_num;
124+ union {
125+ struct mt76_testmode_entry_data ed;
126+ struct {
127+ /* must be the same as mt76_testmode_entry_data */
128+ struct sk_buff *tx_skb;
129+
130+ u16 tx_mpdu_len;
131+ u8 tx_rate_idx;
132+ u8 tx_rate_nss;
133+ u8 tx_rate_ldpc;
134+
135+ u8 addr[3][ETH_ALEN];
136+ u8 aid;
137+ u8 ru_alloc;
138+ u8 ru_idx;
139+ };
140+ };
141
142 u8 flag;
143
144+ struct {
145+ u8 type;
146+ u8 enable;
147+ } cfg;
148+
149+ u8 txbf_act;
150+ u16 txbf_param[8];
151+
152 u32 tx_pending;
153 u32 tx_queued;
154 u16 tx_queued_limit;
developer60a3d662023-02-07 15:24:34 +0800155@@ -1141,6 +1178,59 @@ static inline bool mt76_testmode_enabled(struct mt76_phy *phy)
developer6caa5e22022-06-16 13:33:13 +0800156 #endif
157 }
158
159+#ifdef CONFIG_NL80211_TESTMODE
160+static inline struct mt76_wcid *
161+mt76_testmode_first_entry(struct mt76_phy *phy)
162+{
163+ if (list_empty(&phy->test.tm_entry_list) && !phy->test.aid)
164+ return &phy->dev->global_wcid;
165+
166+ return list_first_entry(&phy->test.tm_entry_list,
167+ typeof(struct mt76_wcid),
168+ list);
169+}
170+
171+static inline struct mt76_testmode_entry_data *
172+mt76_testmode_entry_data(struct mt76_phy *phy, struct mt76_wcid *wcid)
173+{
174+ if (!wcid)
175+ return NULL;
176+ if (wcid == &phy->dev->global_wcid)
177+ return &phy->test.ed;
178+
179+ return (struct mt76_testmode_entry_data *)((u8 *)wcid +
180+ phy->hw->sta_data_size);
181+}
182+
183+#define mt76_tm_for_each_entry(phy, wcid, ed) \
184+ for (wcid = mt76_testmode_first_entry(phy), \
185+ ed = mt76_testmode_entry_data(phy, wcid); \
186+ ((phy->test.aid && \
187+ !list_entry_is_head(wcid, &phy->test.tm_entry_list, list)) || \
188+ (!phy->test.aid && wcid == &phy->dev->global_wcid)) && ed; \
189+ wcid = list_next_entry(wcid, list), \
190+ ed = mt76_testmode_entry_data(phy, wcid))
191+#endif
192+
193+static inline bool __mt76_is_testmode_skb(struct mt76_phy *phy,
194+ struct sk_buff *skb)
195+{
196+#ifdef CONFIG_NL80211_TESTMODE
197+ struct mt76_testmode_entry_data *ed = &phy->test.ed;
198+ struct mt76_wcid *wcid;
199+
200+ if (skb == ed->tx_skb)
201+ return true;
202+
203+ mt76_tm_for_each_entry(phy, wcid, ed)
204+ if (skb == ed->tx_skb)
205+ return true;
206+ return false;
207+#else
208+ return false;
209+#endif
210+}
211+
212 static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
213 struct sk_buff *skb,
214 struct ieee80211_hw **hw)
developer60a3d662023-02-07 15:24:34 +0800215@@ -1151,7 +1241,8 @@ static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
developerf7a3ca32022-09-01 14:44:55 +0800216 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
217 struct mt76_phy *phy = dev->phys[i];
218
219- if (phy && skb == phy->test.tx_skb) {
220+ if (phy && mt76_testmode_enabled(phy) &&
221+ __mt76_is_testmode_skb(phy, skb)) {
222 *hw = dev->phys[i]->hw;
223 return true;
224 }
developer60a3d662023-02-07 15:24:34 +0800225@@ -1253,7 +1344,8 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer6caa5e22022-06-16 13:33:13 +0800226 int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
227 struct netlink_callback *cb, void *data, int len);
228 int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state);
229-int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len);
230+int mt76_testmode_init_skb(struct mt76_phy *phy, u32 len,
231+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN]);
232
233 static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable)
234 {
235diff --git a/mt76_connac_mcu.c b/mt76_connac_mcu.c
developer60a3d662023-02-07 15:24:34 +0800236index 2fefac68..b6c2ccf0 100644
developer6caa5e22022-06-16 13:33:13 +0800237--- a/mt76_connac_mcu.c
238+++ b/mt76_connac_mcu.c
developer3609d782022-11-29 18:07:22 +0800239@@ -394,6 +394,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
developer6caa5e22022-06-16 13:33:13 +0800240 switch (vif->type) {
241 case NL80211_IFTYPE_MESH_POINT:
242 case NL80211_IFTYPE_AP:
243+ case NL80211_IFTYPE_MONITOR:
244 if (vif->p2p)
245 conn_type = CONNECTION_P2P_GC;
246 else
developer3609d782022-11-29 18:07:22 +0800247@@ -575,6 +576,9 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
developer6caa5e22022-06-16 13:33:13 +0800248 rx->rca2 = 1;
249 rx->rv = 1;
250
251+ if (vif->type == NL80211_IFTYPE_MONITOR)
252+ rx->rca1 = 0;
253+
254 if (!is_connac_v1(dev))
255 return;
256
257diff --git a/mt76_connac_mcu.h b/mt76_connac_mcu.h
developer60a3d662023-02-07 15:24:34 +0800258index 1a146563..f616bcea 100644
developer6caa5e22022-06-16 13:33:13 +0800259--- a/mt76_connac_mcu.h
260+++ b/mt76_connac_mcu.h
developer60a3d662023-02-07 15:24:34 +0800261@@ -999,6 +999,7 @@ enum {
developer6caa5e22022-06-16 13:33:13 +0800262 MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13,
263 MCU_EXT_EVENT_THERMAL_PROTECT = 0x22,
264 MCU_EXT_EVENT_ASSERT_DUMP = 0x23,
265+ MCU_EXT_EVENT_BF_STATUS_READ = 0x35,
266 MCU_EXT_EVENT_RDD_REPORT = 0x3a,
267 MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
268 MCU_EXT_EVENT_BCC_NOTIFY = 0x75,
developer60a3d662023-02-07 15:24:34 +0800269@@ -1200,6 +1201,7 @@ enum {
developer6caa5e22022-06-16 13:33:13 +0800270 MCU_EXT_CMD_PHY_STAT_INFO = 0xad,
271 /* for vendor csi and air monitor */
272 MCU_EXT_CMD_SMESH_CTRL = 0xae,
273+ MCU_EXT_CMD_RX_STAT_USER_CTRL = 0xb3,
274 MCU_EXT_CMD_CERT_CFG = 0xb7,
275 MCU_EXT_CMD_CSI_CTRL = 0xc2,
276 };
277diff --git a/mt7915/init.c b/mt7915/init.c
developer60a3d662023-02-07 15:24:34 +0800278index f73d34a9..19447ad1 100644
developer6caa5e22022-06-16 13:33:13 +0800279--- a/mt7915/init.c
280+++ b/mt7915/init.c
developer60a3d662023-02-07 15:24:34 +0800281@@ -681,7 +681,7 @@ static void mt7915_init_work(struct work_struct *work)
developer6caa5e22022-06-16 13:33:13 +0800282 struct mt7915_dev *dev = container_of(work, struct mt7915_dev,
283 init_work);
284
285- mt7915_mcu_set_eeprom(dev);
286+ mt7915_mcu_set_eeprom(dev, dev->flash_mode);
287 mt7915_mac_init(dev);
288 mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband);
289 mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband);
290diff --git a/mt7915/mac.c b/mt7915/mac.c
developer60a3d662023-02-07 15:24:34 +0800291index de2bdba5..1460a32b 100644
developer6caa5e22022-06-16 13:33:13 +0800292--- a/mt7915/mac.c
293+++ b/mt7915/mac.c
developer60a3d662023-02-07 15:24:34 +0800294@@ -627,16 +627,38 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
developer6caa5e22022-06-16 13:33:13 +0800295 {
296 #ifdef CONFIG_NL80211_TESTMODE
297 struct mt76_testmode_data *td = &phy->mt76->test;
298+ struct mt76_testmode_entry_data *ed;
299+ struct mt76_wcid *wcid;
300 const struct ieee80211_rate *r;
301- u8 bw, mode, nss = td->tx_rate_nss;
302- u8 rate_idx = td->tx_rate_idx;
303+ u8 bw, mode, nss, rate_idx, ldpc;
304 u16 rateval = 0;
305 u32 val;
306 bool cck = false;
307 int band;
308
309- if (skb != phy->mt76->test.tx_skb)
310+ txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
311+ txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
312+ phy->test.spe_idx));
313+
314+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU) {
315+ txwi[1] |= cpu_to_le32(BIT(18));
316+ txwi[2] = 0;
317+ txwi[3] &= ~cpu_to_le32(MT_TXD3_NO_ACK);
318+ le32p_replace_bits(&txwi[3], 0x1f, MT_TXD3_REM_TX_COUNT);
319+
developerf7a3ca32022-09-01 14:44:55 +0800320 return;
developer6caa5e22022-06-16 13:33:13 +0800321+ }
322+
323+ mt76_tm_for_each_entry(phy->mt76, wcid, ed)
324+ if (ed->tx_skb == skb)
325+ break;
326+
327+ if (!ed)
developerf7a3ca32022-09-01 14:44:55 +0800328+ return;
329+
developer6caa5e22022-06-16 13:33:13 +0800330+ nss = ed->tx_rate_nss;
331+ rate_idx = ed->tx_rate_idx;
332+ ldpc = ed->tx_rate_ldpc;
developerf7a3ca32022-09-01 14:44:55 +0800333
developer6caa5e22022-06-16 13:33:13 +0800334 switch (td->tx_rate_mode) {
335 case MT76_TM_TX_MODE_HT:
developer60a3d662023-02-07 15:24:34 +0800336@@ -667,7 +689,7 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
developer3f784572023-01-31 15:21:28 +0800337 rate_idx += 4;
338
339 r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx];
340- val = cck ? r->hw_value_short : r->hw_value;
341+ val = r->hw_value;
342
343 mode = val >> 8;
344 rate_idx = val & 0xff;
developer60a3d662023-02-07 15:24:34 +0800345@@ -726,13 +748,14 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
developer6caa5e22022-06-16 13:33:13 +0800346 if (mode >= MT_PHY_TYPE_HE_SU)
347 val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf);
348
349- if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
350+ if (ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
351 val |= MT_TXD6_LDPC;
352
developerf7a3ca32022-09-01 14:44:55 +0800353 txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
developer6caa5e22022-06-16 13:33:13 +0800354+ if (phy->test.bf_en)
355+ val |= MT_TXD6_TX_IBF | MT_TXD6_TX_EBF;
356+
357 txwi[6] |= cpu_to_le32(val);
358- txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
359- phy->test.spe_idx));
360 #endif
361 }
362
developer60a3d662023-02-07 15:24:34 +0800363@@ -1479,7 +1502,7 @@ mt7915_mac_restart(struct mt7915_dev *dev)
developer047bc182022-11-16 12:20:48 +0800364 goto out;
365
366 /* set the necessary init items */
367- ret = mt7915_mcu_set_eeprom(dev);
368+ ret = mt7915_mcu_set_eeprom(dev, dev->flash_mode);
369 if (ret)
370 goto out;
371
developer6caa5e22022-06-16 13:33:13 +0800372diff --git a/mt7915/main.c b/mt7915/main.c
developer60a3d662023-02-07 15:24:34 +0800373index e4d1c27b..ea0d22fe 100644
developer6caa5e22022-06-16 13:33:13 +0800374--- a/mt7915/main.c
375+++ b/mt7915/main.c
developerc5ce7502022-12-19 11:33:22 +0800376@@ -238,7 +238,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
developer6caa5e22022-06-16 13:33:13 +0800377 mvif->phy = phy;
developer17bb0a82022-12-13 15:52:04 +0800378 mvif->mt76.band_idx = phy->mt76->band_idx;
developer6caa5e22022-06-16 13:33:13 +0800379
380- mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
381+ mvif->mt76.wmm_idx = (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_MONITOR);
382 if (ext_phy)
383 mvif->mt76.wmm_idx += 2;
384
385diff --git a/mt7915/mcu.c b/mt7915/mcu.c
developer60a3d662023-02-07 15:24:34 +0800386index 6ec12fd2..4d878665 100644
developer6caa5e22022-06-16 13:33:13 +0800387--- a/mt7915/mcu.c
388+++ b/mt7915/mcu.c
developer17bb0a82022-12-13 15:52:04 +0800389@@ -383,6 +383,11 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
developer6caa5e22022-06-16 13:33:13 +0800390 case MCU_EXT_EVENT_BCC_NOTIFY:
391 mt7915_mcu_rx_bcc_notify(dev, skb);
392 break;
393+#ifdef CONFIG_NL80211_TESTMODE
394+ case MCU_EXT_EVENT_BF_STATUS_READ:
395+ mt7915_tm_txbf_status_read(dev, skb);
396+ break;
397+#endif
398 default:
399 break;
400 }
developer17bb0a82022-12-13 15:52:04 +0800401@@ -414,6 +419,7 @@ void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb)
developer6caa5e22022-06-16 13:33:13 +0800402 rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP ||
403 rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC ||
404 rxd->ext_eid == MCU_EXT_EVENT_BCC_NOTIFY ||
405+ rxd->ext_eid == MCU_EXT_EVENT_BF_STATUS_READ ||
406 !rxd->seq)
407 mt7915_mcu_rx_unsolicited_event(dev, skb);
408 else
developer60a3d662023-02-07 15:24:34 +0800409@@ -2847,14 +2853,14 @@ static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev)
developer6caa5e22022-06-16 13:33:13 +0800410 return 0;
411 }
412
413-int mt7915_mcu_set_eeprom(struct mt7915_dev *dev)
414+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev, bool flash_mode)
415 {
416 struct mt7915_mcu_eeprom req = {
417 .buffer_mode = EE_MODE_EFUSE,
418 .format = EE_FORMAT_WHOLE,
419 };
420
421- if (dev->flash_mode)
422+ if (flash_mode)
423 return mt7915_mcu_set_eeprom_flash(dev);
424
425 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_BUFFER_MODE),
426diff --git a/mt7915/mcu.h b/mt7915/mcu.h
developer60a3d662023-02-07 15:24:34 +0800427index a4b7ef82..1671d563 100644
developer6caa5e22022-06-16 13:33:13 +0800428--- a/mt7915/mcu.h
429+++ b/mt7915/mcu.h
developer7c3a5082022-06-24 13:40:42 +0800430@@ -8,10 +8,15 @@
developer6caa5e22022-06-16 13:33:13 +0800431
432 enum {
433 MCU_ATE_SET_TRX = 0x1,
434+ MCU_ATE_SET_TSSI = 0x5,
435+ MCU_ATE_SET_DPD = 0x6,
436+ MCU_ATE_SET_RATE_POWER_OFFSET = 0x7,
437+ MCU_ATE_SET_THERMAL_COMP = 0x8,
438 MCU_ATE_SET_FREQ_OFFSET = 0xa,
439 MCU_ATE_SET_PHY_COUNT = 0x11,
440 MCU_ATE_SET_SLOT_TIME = 0x13,
441 MCU_ATE_CLEAN_TXQUEUE = 0x1c,
442+ MCU_ATE_SET_MU_RX_AID = 0x1e,
443 };
444
developer7c3a5082022-06-24 13:40:42 +0800445 struct mt7915_mcu_thermal_ctrl {
developer60a3d662023-02-07 15:24:34 +0800446@@ -472,6 +477,12 @@ enum {
developer6caa5e22022-06-16 13:33:13 +0800447
448 enum {
449 MT_BF_SOUNDING_ON = 1,
450+ MT_BF_DATA_PACKET_APPLY = 2,
451+ MT_BF_PFMU_TAG_READ = 5,
452+ MT_BF_PFMU_TAG_WRITE = 6,
453+ MT_BF_PHASE_CAL = 14,
454+ MT_BF_IBF_PHASE_COMP = 15,
455+ MT_BF_PROFILE_WRITE_ALL = 17,
456 MT_BF_TYPE_UPDATE = 20,
457 MT_BF_MODULE_UPDATE = 25
458 };
developer60a3d662023-02-07 15:24:34 +0800459@@ -718,10 +729,19 @@ struct mt7915_muru {
developer6caa5e22022-06-16 13:33:13 +0800460 #define MURU_OFDMA_SCH_TYPE_UL BIT(1)
461
developer7c3a5082022-06-24 13:40:42 +0800462 /* Common Config */
developer6caa5e22022-06-16 13:33:13 +0800463-#define MURU_COMM_PPDU_FMT BIT(0)
464-#define MURU_COMM_SCH_TYPE BIT(1)
465-#define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_SCH_TYPE)
developer6caa5e22022-06-16 13:33:13 +0800466-/* DL&UL User config*/
developer6caa5e22022-06-16 13:33:13 +0800467+/* #define MURU_COMM_PPDU_FMT BIT(0) */
468+/* #define MURU_COMM_SCH_TYPE BIT(1) */
469+/* #define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_SCH_TYPE) */
developer7c3a5082022-06-24 13:40:42 +0800470+#define MURU_COMM_PPDU_FMT BIT(0)
471+#define MURU_COMM_SCH_TYPE BIT(1)
472+#define MURU_COMM_BAND BIT(2)
473+#define MURU_COMM_WMM BIT(3)
474+#define MURU_COMM_SPE_IDX BIT(4)
475+#define MURU_COMM_PROC_TYPE BIT(5)
476+#define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_BAND | \
477+ MURU_COMM_WMM | MURU_COMM_SPE_IDX)
developer6caa5e22022-06-16 13:33:13 +0800478+
479+/* DL&UL User config */
480 #define MURU_USER_CNT BIT(4)
481
482 enum {
483diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developer60a3d662023-02-07 15:24:34 +0800484index 6d8455d5..9a666d0f 100644
developer6caa5e22022-06-16 13:33:13 +0800485--- a/mt7915/mmio.c
486+++ b/mt7915/mmio.c
developer60a3d662023-02-07 15:24:34 +0800487@@ -134,6 +134,7 @@ static const u32 mt7915_offs[] = {
developer6caa5e22022-06-16 13:33:13 +0800488 [ARB_DRNGR0] = 0x194,
489 [ARB_SCR] = 0x080,
490 [RMAC_MIB_AIRTIME14] = 0x3b8,
491+ [AGG_AALCR0] = 0x048,
492 [AGG_AWSCR0] = 0x05c,
493 [AGG_PCR0] = 0x06c,
494 [AGG_ACR0] = 0x084,
developer60a3d662023-02-07 15:24:34 +0800495@@ -209,6 +210,7 @@ static const u32 mt7916_offs[] = {
developer6caa5e22022-06-16 13:33:13 +0800496 [ARB_DRNGR0] = 0x1e0,
497 [ARB_SCR] = 0x000,
498 [RMAC_MIB_AIRTIME14] = 0x0398,
499+ [AGG_AALCR0] = 0x028,
500 [AGG_AWSCR0] = 0x030,
501 [AGG_PCR0] = 0x040,
502 [AGG_ACR0] = 0x054,
503diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
developer60a3d662023-02-07 15:24:34 +0800504index 018fd23e..c45e42c5 100644
developer6caa5e22022-06-16 13:33:13 +0800505--- a/mt7915/mt7915.h
506+++ b/mt7915/mt7915.h
developer60a3d662023-02-07 15:24:34 +0800507@@ -321,6 +321,9 @@ struct mt7915_phy {
developer6caa5e22022-06-16 13:33:13 +0800508 u8 last_snr;
509
510 u8 spe_idx;
511+
512+ bool bf_en;
513+ bool bf_ever_en;
514 } test;
515 #endif
516
developer60a3d662023-02-07 15:24:34 +0800517@@ -420,6 +423,14 @@ struct mt7915_dev {
developer6caa5e22022-06-16 13:33:13 +0800518 void __iomem *dcm;
519 void __iomem *sku;
520
521+#ifdef CONFIG_NL80211_TESTMODE
522+ struct {
523+ void *txbf_phase_cal;
524+ void *txbf_pfmu_data;
525+ void *txbf_pfmu_tag;
526+ } test;
527+#endif
528+
529 #ifdef MTK_DEBUG
530 u16 wlan_idx;
531 struct {
developer60a3d662023-02-07 15:24:34 +0800532@@ -591,7 +602,7 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
developer6caa5e22022-06-16 13:33:13 +0800533 struct ieee80211_vif *vif,
534 struct ieee80211_sta *sta,
535 void *data, u32 field);
536-int mt7915_mcu_set_eeprom(struct mt7915_dev *dev);
537+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev, bool flash_mode);
538 int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset);
539 int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num);
540 int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable,
developer60a3d662023-02-07 15:24:34 +0800541@@ -629,6 +640,7 @@ int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl);
developer6caa5e22022-06-16 13:33:13 +0800542 int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level);
543 void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb);
544 void mt7915_mcu_exit(struct mt7915_dev *dev);
545+int mt7915_tm_txbf_status_read(struct mt7915_dev *dev, struct sk_buff *skb);
546
547 static inline u16 mt7915_wtbl_size(struct mt7915_dev *dev)
548 {
549diff --git a/mt7915/regs.h b/mt7915/regs.h
developer60a3d662023-02-07 15:24:34 +0800550index d6a05f13..e8768488 100644
developer6caa5e22022-06-16 13:33:13 +0800551--- a/mt7915/regs.h
552+++ b/mt7915/regs.h
developer3609d782022-11-29 18:07:22 +0800553@@ -62,6 +62,7 @@ enum offs_rev {
developer6caa5e22022-06-16 13:33:13 +0800554 ARB_DRNGR0,
555 ARB_SCR,
556 RMAC_MIB_AIRTIME14,
557+ AGG_AALCR0,
558 AGG_AWSCR0,
559 AGG_PCR0,
560 AGG_ACR0,
developer3609d782022-11-29 18:07:22 +0800561@@ -482,6 +483,8 @@ enum offs_rev {
developer6caa5e22022-06-16 13:33:13 +0800562 #define MT_WF_AGG_BASE(_band) ((_band) ? 0x820f2000 : 0x820e2000)
563 #define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
564
565+#define MT_AGG_AALCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AALCR0) + \
566+ (_n) * 4))
567 #define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AWSCR0) + \
568 (_n) * 4))
569 #define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_PCR0) + \
570diff --git a/mt7915/testmode.c b/mt7915/testmode.c
developer60a3d662023-02-07 15:24:34 +0800571index 46939191..e0ba088f 100644
developer6caa5e22022-06-16 13:33:13 +0800572--- a/mt7915/testmode.c
573+++ b/mt7915/testmode.c
574@@ -9,6 +9,9 @@
575 enum {
576 TM_CHANGED_TXPOWER,
577 TM_CHANGED_FREQ_OFFSET,
578+ TM_CHANGED_AID,
579+ TM_CHANGED_CFG,
580+ TM_CHANGED_TXBF_ACT,
581
582 /* must be last */
583 NUM_TM_CHANGED
584@@ -17,6 +20,9 @@ enum {
585 static const u8 tm_change_map[] = {
586 [TM_CHANGED_TXPOWER] = MT76_TM_ATTR_TX_POWER,
587 [TM_CHANGED_FREQ_OFFSET] = MT76_TM_ATTR_FREQ_OFFSET,
588+ [TM_CHANGED_AID] = MT76_TM_ATTR_AID,
589+ [TM_CHANGED_CFG] = MT76_TM_ATTR_CFG,
590+ [TM_CHANGED_TXBF_ACT] = MT76_TM_ATTR_TXBF_ACT,
591 };
592
593 struct reg_band {
594@@ -33,6 +39,38 @@ struct reg_band {
595 #define TM_REG_MAX_ID 20
596 static struct reg_band reg_backup_list[TM_REG_MAX_ID];
597
598+static void mt7915_tm_update_entry(struct mt7915_phy *phy);
599+
600+static u8 mt7915_tm_chan_bw(enum nl80211_chan_width width)
601+{
602+ static const u8 width_to_bw[] = {
603+ [NL80211_CHAN_WIDTH_40] = TM_CBW_40MHZ,
604+ [NL80211_CHAN_WIDTH_80] = TM_CBW_80MHZ,
605+ [NL80211_CHAN_WIDTH_80P80] = TM_CBW_8080MHZ,
606+ [NL80211_CHAN_WIDTH_160] = TM_CBW_160MHZ,
607+ [NL80211_CHAN_WIDTH_5] = TM_CBW_5MHZ,
608+ [NL80211_CHAN_WIDTH_10] = TM_CBW_10MHZ,
609+ [NL80211_CHAN_WIDTH_20] = TM_CBW_20MHZ,
610+ [NL80211_CHAN_WIDTH_20_NOHT] = TM_CBW_20MHZ,
611+ };
612+
613+ if (width >= ARRAY_SIZE(width_to_bw))
614+ return 0;
615+
616+ return width_to_bw[width];
617+}
618+
619+static void
620+mt7915_tm_update_channel(struct mt7915_phy *phy)
621+{
622+ mutex_unlock(&phy->dev->mt76.mutex);
623+ mt7915_set_channel(phy);
624+ mutex_lock(&phy->dev->mt76.mutex);
625+
626+ mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
627+
628+ mt7915_tm_update_entry(phy);
629+}
630
631 static int
632 mt7915_tm_set_tx_power(struct mt7915_phy *phy)
633@@ -119,18 +157,28 @@ mt7915_tm_set_trx(struct mt7915_phy *phy, int type, bool en)
634 }
635
636 static int
637-mt7915_tm_clean_hwq(struct mt7915_phy *phy, u8 wcid)
638+mt7915_tm_clean_hwq(struct mt7915_phy *phy)
639 {
640+ struct mt76_testmode_entry_data *ed;
641+ struct mt76_wcid *wcid;
642 struct mt7915_dev *dev = phy->dev;
643 struct mt7915_tm_cmd req = {
644 .testmode_en = 1,
645 .param_idx = MCU_ATE_CLEAN_TXQUEUE,
646- .param.clean.wcid = wcid,
developer17bb0a82022-12-13 15:52:04 +0800647 .param.clean.band = phy->mt76->band_idx,
developer6caa5e22022-06-16 13:33:13 +0800648 };
649
650- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
651- sizeof(req), false);
652+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
653+ int ret;
654+
655+ req.param.clean.wcid = wcid->idx;
656+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL),
657+ &req, sizeof(req), false);
658+ if (ret)
659+ return ret;
660+ }
661+
662+ return 0;
663 }
664
665 static int
developer17bb0a82022-12-13 15:52:04 +0800666@@ -141,7 +189,7 @@ mt7915_tm_set_phy_count(struct mt7915_phy *phy, u8 control)
667 .testmode_en = 1,
668 .param_idx = MCU_ATE_SET_PHY_COUNT,
669 .param.cfg.enable = control,
670- .param.cfg.band = phy != &dev->phy,
671+ .param.cfg.band = phy->mt76->band_idx,
672 };
673
674 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
developer7c3a5082022-06-24 13:40:42 +0800675@@ -182,12 +230,738 @@ mt7915_tm_set_tam_arb(struct mt7915_phy *phy, bool enable, bool mu)
developer6caa5e22022-06-16 13:33:13 +0800676 return mt7915_mcu_set_muru_ctrl(dev, MURU_SET_ARB_OP_MODE, op_mode);
677 }
678
679+static int
680+mt7915_tm_set_cfg(struct mt7915_phy *phy)
681+{
682+ static const u8 cfg_cmd[] = {
683+ [MT76_TM_CFG_TSSI] = MCU_ATE_SET_TSSI,
684+ [MT76_TM_CFG_DPD] = MCU_ATE_SET_DPD,
685+ [MT76_TM_CFG_RATE_POWER_OFFSET] = MCU_ATE_SET_RATE_POWER_OFFSET,
686+ [MT76_TM_CFG_THERMAL_COMP] = MCU_ATE_SET_THERMAL_COMP,
687+ };
688+ struct mt76_testmode_data *td = &phy->mt76->test;
689+ struct mt7915_dev *dev = phy->dev;
690+ struct mt7915_tm_cmd req = {
691+ .testmode_en = !(phy->mt76->test.state == MT76_TM_STATE_OFF),
692+ .param_idx = cfg_cmd[td->cfg.type],
693+ .param.cfg.enable = td->cfg.enable,
developer17bb0a82022-12-13 15:52:04 +0800694+ .param.cfg.band = phy->mt76->band_idx,
developer6caa5e22022-06-16 13:33:13 +0800695+ };
696+
697+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
698+ sizeof(req), false);
699+}
700+
701+static int
702+mt7915_tm_add_txbf(struct mt7915_phy *phy, struct ieee80211_vif *vif,
703+ struct ieee80211_sta *sta, u8 pfmu_idx, u8 nr,
704+ u8 nc, bool ebf)
705+{
706+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
707+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
708+ struct mt7915_dev *dev = phy->dev;
709+ struct sk_buff *skb;
710+ struct sta_rec_bf *bf;
711+ struct tlv *tlv;
712+ u8 ndp_rate;
713+
714+ if (nr == 1)
715+ ndp_rate = 8;
716+ else if (nr == 2)
717+ ndp_rate = 16;
718+ else
719+ ndp_rate = 24;
720+
721+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
722+ &msta->wcid);
723+ if (IS_ERR(skb))
724+ return PTR_ERR(skb);
725+
726+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
727+ bf = (struct sta_rec_bf *)tlv;
728+
729+ bf->pfmu = cpu_to_le16(pfmu_idx);
730+ bf->sounding_phy = 1;
731+ bf->bf_cap = ebf;
732+ bf->ncol = nc;
733+ bf->nrow = nr;
734+ bf->ndp_rate = ndp_rate;
735+ bf->ibf_timeout = 0xff;
736+ bf->tx_mode = MT_PHY_TYPE_HT;
737+
738+ if (ebf) {
739+ bf->mem[0].row = 0;
740+ bf->mem[1].row = 1;
741+ bf->mem[2].row = 2;
742+ bf->mem[3].row = 3;
743+ } else {
744+ bf->mem[0].row = 4;
745+ bf->mem[1].row = 5;
746+ bf->mem[2].row = 6;
747+ bf->mem[3].row = 7;
748+ }
749+
750+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
751+ MCU_EXT_CMD(STA_REC_UPDATE), true);
752+}
753+
754+static int
755+mt7915_tm_entry_add(struct mt7915_phy *phy, u8 aid)
756+{
757+ struct mt76_testmode_data *td = &phy->mt76->test;
758+ struct mt76_testmode_entry_data *ed;
759+ struct ieee80211_sband_iftype_data *sdata;
760+ struct ieee80211_supported_band *sband;
761+ struct ieee80211_sta *sta;
762+ struct mt7915_sta *msta;
763+ int tid, ret;
764+
765+ if (td->entry_num >= MT76_TM_MAX_ENTRY_NUM)
766+ return -EINVAL;
767+
768+ sta = kzalloc(sizeof(*sta) + phy->mt76->hw->sta_data_size +
769+ sizeof(*ed), GFP_KERNEL);
770+ if (!sta)
771+ return -ENOMEM;
772+
773+ msta = (struct mt7915_sta *)sta->drv_priv;
774+ ed = mt76_testmode_entry_data(phy->mt76, &msta->wcid);
775+ memcpy(ed, &td->ed, sizeof(*ed));
776+
777+ if (phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ) {
778+ sband = &phy->mt76->sband_5g.sband;
779+ sdata = phy->iftype[NL80211_BAND_5GHZ];
780+ } else if (phy->mt76->chandef.chan->band == NL80211_BAND_6GHZ) {
781+ sband = &phy->mt76->sband_6g.sband;
782+ sdata = phy->iftype[NL80211_BAND_6GHZ];
783+ } else {
784+ sband = &phy->mt76->sband_2g.sband;
785+ sdata = phy->iftype[NL80211_BAND_2GHZ];
786+ }
787+
788+ memcpy(sta->addr, ed->addr[0], ETH_ALEN);
789+ if (phy->test.bf_en) {
790+ u8 addr[ETH_ALEN] = {0x00, 0x11, 0x11, 0x11, 0x11, 0x11};
791+
792+ memcpy(sta->addr, addr, ETH_ALEN);
793+ }
794+
795+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_HT)
developer17bb0a82022-12-13 15:52:04 +0800796+ memcpy(&sta->deflink.ht_cap, &sband->ht_cap, sizeof(sta->deflink.ht_cap));
developer6caa5e22022-06-16 13:33:13 +0800797+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_VHT)
developer17bb0a82022-12-13 15:52:04 +0800798+ memcpy(&sta->deflink.vht_cap, &sband->vht_cap, sizeof(sta->deflink.vht_cap));
developer6caa5e22022-06-16 13:33:13 +0800799+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_HE_SU)
developer17bb0a82022-12-13 15:52:04 +0800800+ memcpy(&sta->deflink.he_cap, &sdata[NL80211_IFTYPE_STATION].he_cap,
801+ sizeof(sta->deflink.he_cap));
developer6caa5e22022-06-16 13:33:13 +0800802+ sta->aid = aid;
803+ sta->wme = 1;
804+
805+ ret = mt7915_mac_sta_add(&phy->dev->mt76, phy->monitor_vif, sta);
806+ if (ret) {
807+ kfree(sta);
808+ return ret;
809+ }
810+
811+ /* prevent from starting tx ba session */
812+ for (tid = 0; tid < 8; tid++)
813+ set_bit(tid, &msta->ampdu_state);
814+
815+ list_add_tail(&msta->wcid.list, &td->tm_entry_list);
816+ td->entry_num++;
817+
818+ return 0;
819+}
820+
821+static void
822+mt7915_tm_entry_remove(struct mt7915_phy *phy, u8 aid)
823+{
824+ struct mt76_testmode_data *td = &phy->mt76->test;
825+ struct mt76_wcid *wcid, *tmp;
826+
827+ if (list_empty(&td->tm_entry_list))
828+ return;
829+
830+ list_for_each_entry_safe(wcid, tmp, &td->tm_entry_list, list) {
831+ struct mt76_testmode_entry_data *ed;
832+ struct mt7915_dev *dev = phy->dev;
833+ struct ieee80211_sta *sta;
834+
835+ ed = mt76_testmode_entry_data(phy->mt76, wcid);
836+ if (aid && ed->aid != aid)
837+ continue;
838+
839+ sta = wcid_to_sta(wcid);
840+ mt7915_mac_sta_remove(&dev->mt76, phy->monitor_vif, sta);
841+ mt76_wcid_mask_clear(dev->mt76.wcid_mask, wcid->idx);
842+
843+ list_del_init(&wcid->list);
844+ kfree(sta);
845+ phy->mt76->test.entry_num--;
846+ }
847+}
848+
849+static int
850+mt7915_tm_set_entry(struct mt7915_phy *phy)
851+{
852+ struct mt76_testmode_data *td = &phy->mt76->test;
853+ struct mt76_testmode_entry_data *ed;
854+ struct mt76_wcid *wcid;
855+
856+ if (!td->aid) {
857+ if (td->state > MT76_TM_STATE_IDLE)
858+ mt76_testmode_set_state(phy->mt76, MT76_TM_STATE_IDLE);
859+ mt7915_tm_entry_remove(phy, td->aid);
860+ return 0;
861+ }
862+
863+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
864+ if (ed->aid == td->aid) {
865+ struct sk_buff *skb;
866+
867+ local_bh_disable();
868+ skb = ed->tx_skb;
869+ memcpy(ed, &td->ed, sizeof(*ed));
870+ ed->tx_skb = skb;
871+ local_bh_enable();
872+
873+ return 0;
874+ }
875+ }
876+
877+ return mt7915_tm_entry_add(phy, td->aid);
878+}
879+
880+static void
881+mt7915_tm_update_entry(struct mt7915_phy *phy)
882+{
883+ struct mt76_testmode_data *td = &phy->mt76->test;
884+ struct mt76_testmode_entry_data *ed, tmp;
885+ struct mt76_wcid *wcid, *last;
886+
887+ if (!td->aid || phy->test.bf_en)
888+ return;
889+
890+ memcpy(&tmp, &td->ed, sizeof(tmp));
891+ last = list_last_entry(&td->tm_entry_list,
892+ struct mt76_wcid, list);
893+
894+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
895+ memcpy(&td->ed, ed, sizeof(td->ed));
896+ mt7915_tm_entry_remove(phy, td->aid);
897+ mt7915_tm_entry_add(phy, td->aid);
898+ if (wcid == last)
899+ break;
900+ }
901+
902+ memcpy(&td->ed, &tmp, sizeof(td->ed));
903+}
904+
905+static int
906+mt7915_tm_txbf_init(struct mt7915_phy *phy, u16 *val)
907+{
908+ struct mt76_testmode_data *td = &phy->mt76->test;
909+ struct mt7915_dev *dev = phy->dev;
910+ bool enable = val[0];
911+ void *phase_cal, *pfmu_data, *pfmu_tag;
912+ u8 addr[ETH_ALEN] = {0x00, 0x22, 0x22, 0x22, 0x22, 0x22};
913+
914+ if (!enable) {
915+ phy->test.bf_en = 0;
916+ return 0;
917+ }
918+
919+ if (!dev->test.txbf_phase_cal) {
920+ phase_cal = devm_kzalloc(dev->mt76.dev,
921+ sizeof(struct mt7915_tm_txbf_phase) *
922+ MAX_PHASE_GROUP_NUM,
923+ GFP_KERNEL);
924+ if (!phase_cal)
925+ return -ENOMEM;
926+
927+ dev->test.txbf_phase_cal = phase_cal;
928+ }
929+
930+ if (!dev->test.txbf_pfmu_data) {
931+ pfmu_data = devm_kzalloc(dev->mt76.dev, 512, GFP_KERNEL);
932+ if (!pfmu_data)
933+ return -ENOMEM;
934+
935+ dev->test.txbf_pfmu_data = pfmu_data;
936+ }
937+
938+ if (!dev->test.txbf_pfmu_tag) {
939+ pfmu_tag = devm_kzalloc(dev->mt76.dev,
940+ sizeof(struct mt7915_tm_pfmu_tag), GFP_KERNEL);
941+ if (!pfmu_tag)
942+ return -ENOMEM;
943+
944+ dev->test.txbf_pfmu_tag = pfmu_tag;
945+ }
946+
947+ memcpy(phy->monitor_vif->addr, addr, ETH_ALEN);
948+ mt7915_mcu_add_dev_info(phy, phy->monitor_vif, true);
949+
950+ td->tx_rate_mode = MT76_TM_TX_MODE_HT;
951+ td->tx_mpdu_len = 1024;
952+ td->tx_rate_sgi = 0;
953+ td->tx_ipg = 100;
954+ phy->test.bf_en = 1;
955+
956+ return mt7915_tm_set_trx(phy, TM_MAC_TX, true);
957+}
958+
959+static int
960+mt7915_tm_txbf_phase_comp(struct mt7915_phy *phy, u16 *val)
961+{
962+ struct mt7915_dev *dev = phy->dev;
963+ struct {
964+ u8 category;
965+ u8 wlan_idx_lo;
966+ u8 bw;
967+ u8 jp_band;
968+ u8 dbdc_idx;
969+ bool read_from_e2p;
970+ bool disable;
971+ u8 wlan_idx_hi;
972+ u8 buf[40];
973+ } __packed req = {
974+ .category = MT_BF_IBF_PHASE_COMP,
975+ .bw = val[0],
976+ .jp_band = (val[2] == 1) ? 1 : 0,
developer17bb0a82022-12-13 15:52:04 +0800977+ .dbdc_idx = phy->mt76->band_idx,
developer6caa5e22022-06-16 13:33:13 +0800978+ .read_from_e2p = val[3],
979+ .disable = val[4],
980+ };
981+ struct mt7915_tm_txbf_phase *phase =
982+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
983+
984+ wait_event_timeout(dev->mt76.tx_wait, phase[val[2]].status != 0, HZ);
985+ memcpy(req.buf, &phase[val[2]].phase, sizeof(req.buf));
986+
987+ pr_info("ibf cal process: phase comp info\n");
988+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1,
989+ &req, sizeof(req), 0);
990+
991+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
992+ sizeof(req), true);
993+}
994+
995+static int
996+mt7915_tm_txbf_profile_tag_read(struct mt7915_phy *phy, u8 pfmu_idx)
997+{
998+ struct mt7915_dev *dev = phy->dev;
999+ struct {
1000+ u8 format_id;
1001+ u8 pfmu_idx;
1002+ bool bfer;
1003+ u8 dbdc_idx;
1004+ } __packed req = {
1005+ .format_id = MT_BF_PFMU_TAG_READ,
1006+ .pfmu_idx = pfmu_idx,
1007+ .bfer = 1,
1008+ .dbdc_idx = phy != &dev->phy,
1009+ };
1010+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
1011+
1012+ tag->t1.pfmu_idx = 0;
1013+
1014+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1015+ sizeof(req), true);
1016+}
1017+
1018+static int
1019+mt7915_tm_txbf_profile_tag_write(struct mt7915_phy *phy, u8 pfmu_idx,
1020+ struct mt7915_tm_pfmu_tag *tag)
1021+{
1022+ struct mt7915_dev *dev = phy->dev;
1023+ struct {
1024+ u8 format_id;
1025+ u8 pfmu_idx;
1026+ bool bfer;
1027+ u8 dbdc_idx;
1028+ u8 buf[64];
1029+ } __packed req = {
1030+ .format_id = MT_BF_PFMU_TAG_WRITE,
1031+ .pfmu_idx = pfmu_idx,
1032+ .bfer = 1,
1033+ .dbdc_idx = phy != &dev->phy,
1034+ };
1035+
1036+ memcpy(req.buf, tag, sizeof(*tag));
1037+ wait_event_timeout(dev->mt76.tx_wait, tag->t1.pfmu_idx != 0, HZ);
1038+
1039+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1040+ sizeof(req), false);
1041+}
1042+
1043+static int
1044+mt7915_tm_txbf_apply_tx(struct mt7915_phy *phy, u16 wlan_idx, bool ebf,
1045+ bool ibf, bool phase_cal)
1046+{
1047+#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
1048+#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
1049+ struct mt7915_dev *dev = phy->dev;
1050+ struct {
1051+ u8 category;
1052+ u8 wlan_idx_lo;
1053+ bool ebf;
1054+ bool ibf;
1055+ bool mu_txbf;
1056+ bool phase_cal;
1057+ u8 wlan_idx_hi;
1058+ u8 _rsv;
1059+ } __packed req = {
1060+ .category = MT_BF_DATA_PACKET_APPLY,
1061+ .wlan_idx_lo = to_wcid_lo(wlan_idx),
1062+ .ebf = ebf,
1063+ .ibf = ibf,
1064+ .phase_cal = phase_cal,
1065+ .wlan_idx_hi = to_wcid_hi(wlan_idx),
1066+ };
1067+
1068+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1069+ sizeof(req), false);
1070+}
1071+
1072+static int mt7915_tm_txbf_set_rate(struct mt7915_phy *phy,
1073+ struct mt76_wcid *wcid)
1074+{
1075+ struct mt7915_dev *dev = phy->dev;
1076+ struct mt76_testmode_entry_data *ed = mt76_testmode_entry_data(phy->mt76, wcid);
1077+ struct ieee80211_sta *sta = wcid_to_sta(wcid);
1078+ struct sta_phy rate = {};
1079+
1080+ if (!sta)
1081+ return 0;
1082+
1083+ rate.type = MT_PHY_TYPE_HT;
1084+ rate.bw = mt7915_tm_chan_bw(phy->mt76->chandef.width);
1085+ rate.nss = ed->tx_rate_nss;
1086+ rate.mcs = ed->tx_rate_idx;
1087+ rate.ldpc = (rate.bw || ed->tx_rate_ldpc) * GENMASK(2, 0);
1088+
1089+ return mt7915_mcu_set_fixed_rate_ctrl(dev, phy->monitor_vif, sta,
1090+ &rate, RATE_PARAM_FIXED);
1091+}
1092+
1093+static int
1094+mt7915_tm_txbf_set_tx(struct mt7915_phy *phy, u16 *val)
1095+{
1096+ bool bf_on = val[0], update = val[3];
1097+ /* u16 wlan_idx = val[2]; */
1098+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
1099+ struct mt76_testmode_data *td = &phy->mt76->test;
1100+ struct mt76_wcid *wcid;
1101+
1102+ if (bf_on) {
1103+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
1104+ mt7915_tm_txbf_profile_tag_read(phy, 2);
1105+ tag->t1.invalid_prof = false;
1106+ mt7915_tm_txbf_profile_tag_write(phy, 2, tag);
1107+
1108+ phy->test.bf_ever_en = true;
1109+
1110+ if (update)
1111+ mt7915_tm_txbf_apply_tx(phy, 1, 0, 1, 1);
1112+ } else {
1113+ if (!phy->test.bf_ever_en) {
1114+ if (update)
1115+ mt7915_tm_txbf_apply_tx(phy, 1, 0, 0, 0);
1116+ } else {
1117+ phy->test.bf_ever_en = false;
1118+
1119+ mt7915_tm_txbf_profile_tag_read(phy, 2);
1120+ tag->t1.invalid_prof = true;
1121+ mt7915_tm_txbf_profile_tag_write(phy, 2, tag);
1122+ }
1123+ }
1124+
1125+ wcid = list_first_entry(&td->tm_entry_list, struct mt76_wcid, list);
1126+ mt7915_tm_txbf_set_rate(phy, wcid);
1127+
1128+ return 0;
1129+}
1130+
1131+static int
1132+mt7915_tm_txbf_profile_update(struct mt7915_phy *phy, u16 *val, bool ebf)
1133+{
1134+ static const u8 mode_to_lm[] = {
1135+ [MT76_TM_TX_MODE_CCK] = 0,
1136+ [MT76_TM_TX_MODE_OFDM] = 0,
1137+ [MT76_TM_TX_MODE_HT] = 1,
1138+ [MT76_TM_TX_MODE_VHT] = 2,
1139+ [MT76_TM_TX_MODE_HE_SU] = 3,
1140+ [MT76_TM_TX_MODE_HE_EXT_SU] = 3,
1141+ [MT76_TM_TX_MODE_HE_TB] = 3,
1142+ [MT76_TM_TX_MODE_HE_MU] = 3,
1143+ };
1144+ struct mt76_testmode_data *td = &phy->mt76->test;
1145+ struct mt76_wcid *wcid;
1146+ struct ieee80211_vif *vif = phy->monitor_vif;
1147+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
1148+ u8 pfmu_idx = val[0], nc = val[2], nr;
1149+ int ret;
1150+
1151+ if (td->tx_antenna_mask == 3)
1152+ nr = 1;
1153+ else if (td->tx_antenna_mask == 7)
1154+ nr = 2;
1155+ else
1156+ nr = 3;
1157+
1158+ memset(tag, 0, sizeof(*tag));
1159+ tag->t1.pfmu_idx = pfmu_idx;
1160+ tag->t1.ebf = ebf;
1161+ tag->t1.nr = nr;
1162+ tag->t1.nc = nc;
1163+ tag->t1.invalid_prof = true;
1164+
1165+ tag->t1.snr_sts4 = 0xc0;
1166+ tag->t1.snr_sts5 = 0xff;
1167+ tag->t1.snr_sts6 = 0xff;
1168+ tag->t1.snr_sts7 = 0xff;
1169+
1170+ if (ebf) {
1171+ tag->t1.row_id1 = 0;
1172+ tag->t1.row_id2 = 1;
1173+ tag->t1.row_id3 = 2;
1174+ tag->t1.row_id4 = 3;
1175+ tag->t1.lm = mode_to_lm[MT76_TM_TX_MODE_HT];
1176+ } else {
1177+ tag->t1.row_id1 = 4;
1178+ tag->t1.row_id2 = 5;
1179+ tag->t1.row_id3 = 6;
1180+ tag->t1.row_id4 = 7;
1181+ tag->t1.lm = mode_to_lm[MT76_TM_TX_MODE_OFDM];
1182+
1183+ tag->t2.ibf_timeout = 0xff;
1184+ tag->t2.ibf_nr = nr;
1185+ }
1186+
1187+ ret = mt7915_tm_txbf_profile_tag_write(phy, pfmu_idx, tag);
1188+ if (ret)
1189+ return ret;
1190+
1191+ wcid = list_first_entry(&td->tm_entry_list, struct mt76_wcid, list);
1192+ ret = mt7915_tm_add_txbf(phy, vif, wcid_to_sta(wcid), pfmu_idx, nr, nc, ebf);
1193+ if (ret)
1194+ return ret;
1195+
1196+ if (!ebf)
1197+ return mt7915_tm_txbf_apply_tx(phy, 1, false, true, true);
1198+
1199+ return 0;
1200+}
1201+
1202+static int
1203+mt7915_tm_txbf_phase_cal(struct mt7915_phy *phy, u16 *val)
1204+{
1205+#define GROUP_L 0
1206+#define GROUP_M 1
1207+#define GROUP_H 2
1208+ struct mt7915_dev *dev = phy->dev;
1209+ struct {
1210+ u8 category;
1211+ u8 group_l_m_n;
1212+ u8 group;
1213+ bool sx2;
1214+ u8 cal_type;
1215+ u8 lna_gain_level;
1216+ u8 _rsv[2];
1217+ } __packed req = {
1218+ .category = MT_BF_PHASE_CAL,
1219+ .group = val[0],
1220+ .group_l_m_n = val[1],
1221+ .sx2 = val[2],
1222+ .cal_type = val[3],
1223+ .lna_gain_level = 0, /* for test purpose */
1224+ };
1225+ struct mt7915_tm_txbf_phase *phase =
1226+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1227+
1228+ phase[req.group].status = 0;
1229+
1230+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1231+ sizeof(req), true);
1232+}
1233+
1234+int mt7915_tm_txbf_status_read(struct mt7915_dev *dev, struct sk_buff *skb)
1235+{
1236+#define BF_PFMU_TAG 16
1237+#define BF_CAL_PHASE 21
1238+ u8 format_id;
1239+
developer7c3a5082022-06-24 13:40:42 +08001240+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
developer6caa5e22022-06-16 13:33:13 +08001241+ format_id = *(u8 *)skb->data;
1242+
1243+ if (format_id == BF_PFMU_TAG) {
1244+ struct mt7915_tm_pfmu_tag *tag = dev->test.txbf_pfmu_tag;
1245+
1246+ skb_pull(skb, 8);
1247+ memcpy(tag, skb->data, sizeof(struct mt7915_tm_pfmu_tag));
1248+ } else if (format_id == BF_CAL_PHASE) {
1249+ struct mt7915_tm_ibf_cal_info *cal;
1250+ struct mt7915_tm_txbf_phase *phase =
1251+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1252+
1253+ cal = (struct mt7915_tm_ibf_cal_info *)skb->data;
1254+ switch (cal->cal_type) {
1255+ case IBF_PHASE_CAL_NORMAL:
1256+ case IBF_PHASE_CAL_NORMAL_INSTRUMENT:
1257+ if (cal->group_l_m_n != GROUP_M)
1258+ break;
1259+ phase = &phase[cal->group];
1260+ memcpy(&phase->phase, cal->buf + 16, sizeof(phase->phase));
1261+ phase->status = cal->status;
1262+ break;
1263+ case IBF_PHASE_CAL_VERIFY:
1264+ case IBF_PHASE_CAL_VERIFY_INSTRUMENT:
1265+ break;
1266+ default:
1267+ break;
1268+ }
1269+ }
1270+
1271+ wake_up(&dev->mt76.tx_wait);
1272+
1273+ return 0;
1274+}
1275+
1276+static int
1277+mt7915_tm_txbf_profile_update_all(struct mt7915_phy *phy, u16 *val)
1278+{
1279+ struct mt76_testmode_data *td = &phy->mt76->test;
1280+ u16 pfmu_idx = val[0];
1281+ u16 subc_id = val[1];
1282+ u16 angle11 = val[2];
1283+ u16 angle21 = val[3];
1284+ u16 angle31 = val[4];
1285+ u16 angle41 = val[5];
1286+ s16 phi11 = 0, phi21 = 0, phi31 = 0;
1287+ struct mt7915_tm_pfmu_data *pfmu_data;
1288+
1289+ if (subc_id > 63)
1290+ return -EINVAL;
1291+
1292+ if (td->tx_antenna_mask == 2) {
1293+ phi11 = (s16)(angle21 - angle11);
1294+ } else if (td->tx_antenna_mask == 3) {
1295+ phi11 = (s16)(angle31 - angle11);
1296+ phi21 = (s16)(angle31 - angle21);
1297+ } else {
1298+ phi11 = (s16)(angle41 - angle11);
1299+ phi21 = (s16)(angle41 - angle21);
1300+ phi31 = (s16)(angle41 - angle31);
1301+ }
1302+
1303+ pfmu_data = (struct mt7915_tm_pfmu_data *)phy->dev->test.txbf_pfmu_data;
1304+ pfmu_data = &pfmu_data[subc_id];
1305+
1306+ if (subc_id < 32)
1307+ pfmu_data->subc_idx = cpu_to_le16(subc_id + 224);
1308+ else
1309+ pfmu_data->subc_idx = cpu_to_le16(subc_id - 32);
1310+ pfmu_data->phi11 = cpu_to_le16(phi11);
1311+ pfmu_data->phi21 = cpu_to_le16(phi21);
1312+ pfmu_data->phi31 = cpu_to_le16(phi31);
1313+
1314+ if (subc_id == 63) {
1315+ struct mt7915_dev *dev = phy->dev;
1316+ struct {
1317+ u8 format_id;
1318+ u8 pfmu_idx;
1319+ u8 dbdc_idx;
1320+ u8 _rsv;
1321+ u8 buf[512];
1322+ } __packed req = {
1323+ .format_id = MT_BF_PROFILE_WRITE_ALL,
1324+ .pfmu_idx = pfmu_idx,
1325+ .dbdc_idx = phy != &dev->phy,
1326+ };
1327+
1328+ memcpy(req.buf, dev->test.txbf_pfmu_data, 512);
1329+
1330+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION),
1331+ &req, sizeof(req), true);
1332+ }
1333+
1334+ return 0;
1335+}
1336+
1337+static int
1338+mt7915_tm_txbf_e2p_update(struct mt7915_phy *phy)
1339+{
1340+ struct mt7915_tm_txbf_phase *phase, *p;
1341+ struct mt7915_dev *dev = phy->dev;
1342+ u8 *eeprom = dev->mt76.eeprom.data;
1343+ u16 offset;
1344+ bool is_7976;
1345+ int i;
1346+
1347+ is_7976 = mt7915_check_adie(dev, false) || is_mt7916(&dev->mt76);
1348+ offset = is_7976 ? 0x60a : 0x651;
1349+
1350+ phase = (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1351+ for (i = 0; i < MAX_PHASE_GROUP_NUM; i++) {
1352+ p = &phase[i];
1353+
1354+ if (!p->status)
1355+ continue;
1356+
1357+ /* copy phase cal data to eeprom */
1358+ memcpy(eeprom + offset + i * sizeof(p->phase), &p->phase,
1359+ sizeof(p->phase));
1360+ }
1361+
1362+ return 0;
1363+}
1364+
1365+static int
1366+mt7915_tm_set_txbf(struct mt7915_phy *phy)
1367+{
1368+ struct mt76_testmode_data *td = &phy->mt76->test;
1369+ u16 *val = td->txbf_param;
1370+
1371+ pr_info("ibf cal process: act = %u, val = %u, %u, %u, %u, %u\n",
1372+ td->txbf_act, val[0], val[1], val[2], val[3], val[4]);
1373+
1374+ switch (td->txbf_act) {
1375+ case MT76_TM_TXBF_ACT_INIT:
1376+ return mt7915_tm_txbf_init(phy, val);
1377+ case MT76_TM_TXBF_ACT_UPDATE_CH:
1378+ mt7915_tm_update_channel(phy);
1379+ break;
1380+ case MT76_TM_TXBF_ACT_PHASE_COMP:
1381+ return mt7915_tm_txbf_phase_comp(phy, val);
1382+ case MT76_TM_TXBF_ACT_TX_PREP:
1383+ return mt7915_tm_txbf_set_tx(phy, val);
1384+ case MT76_TM_TXBF_ACT_IBF_PROF_UPDATE:
1385+ return mt7915_tm_txbf_profile_update(phy, val, false);
1386+ case MT76_TM_TXBF_ACT_EBF_PROF_UPDATE:
1387+ return mt7915_tm_txbf_profile_update(phy, val, true);
1388+ case MT76_TM_TXBF_ACT_PHASE_CAL:
1389+ return mt7915_tm_txbf_phase_cal(phy, val);
1390+ case MT76_TM_TXBF_ACT_PROF_UPDATE_ALL:
1391+ return mt7915_tm_txbf_profile_update_all(phy, val);
1392+ case MT76_TM_TXBF_ACT_E2P_UPDATE:
1393+ return mt7915_tm_txbf_e2p_update(phy);
1394+ default:
1395+ break;
1396+ };
1397+
1398+ return 0;
1399+}
1400+
1401 static int
developer7c3a5082022-06-24 13:40:42 +08001402 mt7915_tm_set_wmm_qid(struct mt7915_phy *phy, u8 qid, u8 aifs, u8 cw_min,
developer6caa5e22022-06-16 13:33:13 +08001403- u16 cw_max, u16 txop)
1404+ u16 cw_max, u16 txop, u8 tx_cmd)
1405 {
developer7c3a5082022-06-24 13:40:42 +08001406 struct mt7915_vif *mvif = (struct mt7915_vif *)phy->monitor_vif->drv_priv;
developer6caa5e22022-06-16 13:33:13 +08001407- struct mt7915_mcu_tx req = { .total = 1 };
1408+ struct mt7915_mcu_tx req = {
1409+ .valid = true,
1410+ .mode = tx_cmd,
1411+ .total = 1,
1412+ };
1413 struct edca *e = &req.edca[0];
1414
developer7c3a5082022-06-24 13:40:42 +08001415 e->queue = qid + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS;
developer17bb0a82022-12-13 15:52:04 +08001416@@ -263,7 +1037,8 @@ done:
developer6caa5e22022-06-16 13:33:13 +08001417
developer7c3a5082022-06-24 13:40:42 +08001418 return mt7915_tm_set_wmm_qid(phy,
developer6caa5e22022-06-16 13:33:13 +08001419 mt76_connac_lmac_mapping(IEEE80211_AC_BE),
1420- aifsn, cw, cw, 0);
1421+ aifsn, cw, cw, 0,
1422+ mode == MT76_TM_TX_MODE_HE_MU);
1423 }
1424
1425 static int
developer17bb0a82022-12-13 15:52:04 +08001426@@ -339,7 +1114,7 @@ mt7915_tm_set_tx_len(struct mt7915_phy *phy, u32 tx_time)
developer6caa5e22022-06-16 13:33:13 +08001427 bitrate = cfg80211_calculate_bitrate(&rate);
1428 tx_len = bitrate * tx_time / 10 / 8;
1429
1430- ret = mt76_testmode_alloc_skb(phy->mt76, tx_len);
1431+ ret = mt76_testmode_init_skb(phy->mt76, tx_len, &td->tx_skb, td->addr);
1432 if (ret)
1433 return ret;
1434
developer17bb0a82022-12-13 15:52:04 +08001435@@ -458,64 +1233,227 @@ mt7915_tm_init(struct mt7915_phy *phy, bool en)
developer6caa5e22022-06-16 13:33:13 +08001436
1437 phy->mt76->test.flag |= MT_TM_FW_RX_COUNT;
1438
1439- if (!en)
1440+ if (!en) {
1441 mt7915_tm_set_tam_arb(phy, en, 0);
1442+
1443+ phy->mt76->test.aid = 0;
1444+ phy->mt76->test.tx_mpdu_len = 0;
1445+ phy->test.bf_en = 0;
1446+ mt7915_tm_set_entry(phy);
1447+ }
1448+}
1449+
1450+static bool
1451+mt7915_tm_check_skb(struct mt7915_phy *phy)
1452+{
1453+ struct mt76_testmode_entry_data *ed;
1454+ struct mt76_wcid *wcid;
1455+
1456+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
1457+ struct ieee80211_tx_info *info;
1458+
1459+ if (!ed->tx_skb)
1460+ return false;
1461+
1462+ info = IEEE80211_SKB_CB(ed->tx_skb);
1463+ info->control.vif = phy->monitor_vif;
1464+ }
1465+
1466+ return true;
1467+}
1468+
1469+static int
1470+mt7915_tm_set_ba(struct mt7915_phy *phy)
1471+{
1472+ struct mt7915_dev *dev = phy->dev;
1473+ struct mt76_testmode_data *td = &phy->mt76->test;
1474+ struct mt76_wcid *wcid;
1475+ struct ieee80211_vif *vif = phy->monitor_vif;
1476+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1477+ struct ieee80211_ampdu_params params = { .buf_size = 256 };
1478+
1479+ list_for_each_entry(wcid, &td->tm_entry_list, list) {
1480+ int tid, ret;
1481+
1482+ params.sta = wcid_to_sta(wcid);
1483+ for (tid = 0; tid < 8; tid++) {
1484+ params.tid = tid;
1485+ ret = mt7915_mcu_add_tx_ba(phy->dev, &params, true);
1486+ if (ret)
1487+ return ret;
1488+ }
1489+ }
1490+
1491+ mt76_wr(dev, MT_AGG_AALCR0(mvif->mt76.band_idx, mvif->mt76.wmm_idx),
1492+ 0x01010101);
1493+
1494+ return 0;
1495+}
1496+
1497+static int
1498+mt7915_tm_set_muru_cfg(struct mt7915_phy *phy, struct mt7915_tm_muru *muru)
1499+{
1500+/* #define MURU_SET_MANUAL_CFG 100 */
1501+ struct mt7915_dev *dev = phy->dev;
1502+ struct {
1503+ __le32 cmd;
1504+ struct mt7915_tm_muru muru;
1505+ } __packed req = {
1506+ .cmd = cpu_to_le32(MURU_SET_MANUAL_CFG),
1507+ };
1508+
1509+ memcpy(&req.muru, muru, sizeof(struct mt7915_tm_muru));
1510+
1511+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
1512+ sizeof(req), false);
1513+}
1514+
1515+static int
1516+mt7915_tm_set_muru_dl(struct mt7915_phy *phy)
1517+{
1518+ struct mt76_testmode_data *td = &phy->mt76->test;
1519+ struct mt76_testmode_entry_data *ed;
1520+ struct mt76_wcid *wcid;
1521+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
1522+ struct ieee80211_vif *vif = phy->monitor_vif;
1523+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1524+ struct mt7915_tm_muru muru = {};
1525+ struct mt7915_tm_muru_comm *comm = &muru.comm;
1526+ struct mt7915_tm_muru_dl *dl = &muru.dl;
1527+ int i;
1528+
1529+ comm->ppdu_format = MURU_PPDU_HE_MU;
1530+ comm->band = mvif->mt76.band_idx;
1531+ comm->wmm_idx = mvif->mt76.wmm_idx;
1532+ comm->spe_idx = phy->test.spe_idx;
1533+
1534+ dl->bw = mt7915_tm_chan_bw(chandef->width);
1535+ dl->gi = td->tx_rate_sgi;;
1536+ dl->ltf = td->tx_ltf;
1537+ dl->tx_mode = MT_PHY_TYPE_HE_MU;
1538+
1539+ for (i = 0; i < sizeof(dl->ru); i++)
1540+ dl->ru[i] = 0x71;
1541+
1542+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
1543+ struct mt7915_tm_muru_dl_usr *dl_usr = &dl->usr[dl->user_num];
1544+
1545+ dl_usr->wlan_idx = cpu_to_le16(wcid->idx);
1546+ dl_usr->ru_alloc_seg = ed->aid < 8 ? 0 : 1;
1547+ dl_usr->ru_idx = ed->ru_idx;
1548+ dl_usr->mcs = ed->tx_rate_idx;
1549+ dl_usr->nss = ed->tx_rate_nss - 1;
1550+ dl_usr->ldpc = ed->tx_rate_ldpc;
1551+ dl->ru[dl->user_num] = ed->ru_alloc;
1552+
1553+ dl->user_num++;
1554+ }
1555+
1556+ muru.cfg_comm = cpu_to_le32(MURU_COMM_SET);
1557+ muru.cfg_dl = cpu_to_le32(MURU_DL_SET);
1558+
1559+ return mt7915_tm_set_muru_cfg(phy, &muru);
1560+}
1561+
1562+static int
1563+mt7915_tm_set_muru_pkt_cnt(struct mt7915_phy *phy, bool enable, u32 tx_count)
1564+{
1565+#define MURU_SET_TX_PKT_CNT 105
1566+#define MURU_SET_TX_EN 106
1567+ struct mt7915_dev *dev = phy->dev;
1568+ struct {
1569+ __le32 cmd;
1570+ u8 band;
1571+ u8 enable;
1572+ u8 _rsv[2];
1573+ __le32 tx_count;
1574+ } __packed req = {
developer17bb0a82022-12-13 15:52:04 +08001575+ .band = phy->mt76->band_idx,
developer6caa5e22022-06-16 13:33:13 +08001576+ .enable = enable,
1577+ .tx_count = enable ? cpu_to_le32(tx_count) : 0,
1578+ };
1579+ int ret;
1580+
1581+ req.cmd = enable ? cpu_to_le32(MURU_SET_TX_PKT_CNT) :
1582+ cpu_to_le32(MURU_SET_TX_EN);
1583+
1584+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
1585+ sizeof(req), false);
1586+ if (ret)
1587+ return ret;
1588+
1589+ req.cmd = enable ? cpu_to_le32(MURU_SET_TX_EN) :
1590+ cpu_to_le32(MURU_SET_TX_PKT_CNT);
1591+
1592+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
1593+ sizeof(req), false);
1594 }
1595
1596 static void
1597-mt7915_tm_update_channel(struct mt7915_phy *phy)
1598+mt7915_tm_tx_frames_mu(struct mt7915_phy *phy, bool enable)
1599 {
1600- mutex_unlock(&phy->dev->mt76.mutex);
1601- mt7915_set_channel(phy);
1602- mutex_lock(&phy->dev->mt76.mutex);
1603+ struct mt76_testmode_data *td = &phy->mt76->test;
1604
1605- mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
1606+ if (enable) {
1607+ struct mt7915_dev *dev = phy->dev;
1608+
1609+ mt7915_tm_set_ba(phy);
1610+ mt7915_tm_set_muru_dl(phy);
1611+ mt76_rr(dev, MT_MIB_DR8(phy != &dev->phy));
1612+ } else {
1613+ /* set to zero for counting real tx free num */
1614+ td->tx_done = 0;
1615+ }
1616+
1617+ mt7915_tm_set_muru_pkt_cnt(phy, enable, td->tx_count);
1618+ usleep_range(100000, 200000);
1619 }
1620
1621 static void
developer072c5612022-07-15 18:30:03 +08001622 mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
1623 {
developer6caa5e22022-06-16 13:33:13 +08001624 struct mt76_testmode_data *td = &phy->mt76->test;
1625- struct mt7915_dev *dev = phy->dev;
1626- struct ieee80211_tx_info *info;
1627- u8 duty_cycle = td->tx_duty_cycle;
1628- u32 tx_time = td->tx_time;
1629- u32 ipg = td->tx_ipg;
1630
1631 mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
1632- mt7915_tm_clean_hwq(phy, dev->mt76.global_wcid.idx);
1633+ mt7915_tm_set_trx(phy, TM_MAC_TX, false);
1634
1635 if (en) {
1636- mt7915_tm_update_channel(phy);
1637+ u32 tx_time = td->tx_time, ipg = td->tx_ipg;
1638+ u8 duty_cycle = td->tx_duty_cycle;
1639+
1640+ if (!phy->test.bf_en)
1641+ mt7915_tm_update_channel(phy);
1642
developer072c5612022-07-15 18:30:03 +08001643 if (td->tx_spe_idx)
developer6caa5e22022-06-16 13:33:13 +08001644 phy->test.spe_idx = td->tx_spe_idx;
developer072c5612022-07-15 18:30:03 +08001645 else
1646 phy->test.spe_idx = mt76_connac_spe_idx(td->tx_antenna_mask);
developer6caa5e22022-06-16 13:33:13 +08001647- }
1648
1649- mt7915_tm_set_tam_arb(phy, en,
1650- td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU);
1651+ /* if all three params are set, duty_cycle will be ignored */
1652+ if (duty_cycle && tx_time && !ipg) {
1653+ ipg = tx_time * 100 / duty_cycle - tx_time;
1654+ } else if (duty_cycle && !tx_time && ipg) {
1655+ if (duty_cycle < 100)
1656+ tx_time = duty_cycle * ipg / (100 - duty_cycle);
1657+ }
1658
1659- /* if all three params are set, duty_cycle will be ignored */
1660- if (duty_cycle && tx_time && !ipg) {
1661- ipg = tx_time * 100 / duty_cycle - tx_time;
1662- } else if (duty_cycle && !tx_time && ipg) {
1663- if (duty_cycle < 100)
1664- tx_time = duty_cycle * ipg / (100 - duty_cycle);
1665- }
1666+ mt7915_tm_set_ipg_params(phy, ipg, td->tx_rate_mode);
1667+ mt7915_tm_set_tx_len(phy, tx_time);
1668
1669- mt7915_tm_set_ipg_params(phy, ipg, td->tx_rate_mode);
1670- mt7915_tm_set_tx_len(phy, tx_time);
1671+ if (ipg)
1672+ td->tx_queued_limit = MT76_TM_TIMEOUT * 1000000 / ipg / 2;
1673
1674- if (ipg)
1675- td->tx_queued_limit = MT76_TM_TIMEOUT * 1000000 / ipg / 2;
1676+ if (!mt7915_tm_check_skb(phy))
1677+ return;
1678+ } else {
1679+ mt7915_tm_clean_hwq(phy);
1680+ }
1681
1682- if (!en || !td->tx_skb)
1683- return;
1684+ mt7915_tm_set_tam_arb(phy, en,
1685+ td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU);
1686
1687- info = IEEE80211_SKB_CB(td->tx_skb);
1688- info->control.vif = phy->monitor_vif;
1689+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
1690+ mt7915_tm_tx_frames_mu(phy, en);
1691
1692 mt7915_tm_set_trx(phy, TM_MAC_TX, en);
1693 }
developer17bb0a82022-12-13 15:52:04 +08001694@@ -544,10 +1482,6 @@ mt7915_tm_get_rx_stats(struct mt7915_phy *phy, bool clear)
developer6caa5e22022-06-16 13:33:13 +08001695 return ret;
1696
1697 rs_band = (struct mt7915_tm_rx_stat_band *)skb->data;
1698- /* pr_info("mdrdy_cnt = %d\n", le32_to_cpu(rs_band->mdrdy_cnt)); */
1699- /* pr_info("fcs_err = %d\n", le16_to_cpu(rs_band->fcs_err)); */
1700- /* pr_info("len_mismatch = %d\n", le16_to_cpu(rs_band->len_mismatch)); */
1701- /* pr_info("fcs_ok = %d\n", le16_to_cpu(rs_band->fcs_succ)); */
1702
1703 if (!clear) {
developerf7a3ca32022-09-01 14:44:55 +08001704 enum mt76_rxq_id q = req.band ? MT_RXQ_BAND1 : MT_RXQ_MAIN;
developer17bb0a82022-12-13 15:52:04 +08001705@@ -562,13 +1496,61 @@ mt7915_tm_get_rx_stats(struct mt7915_phy *phy, bool clear)
developer6caa5e22022-06-16 13:33:13 +08001706 return 0;
1707 }
1708
1709+static int
1710+mt7915_tm_set_rx_user_idx(struct mt7915_phy *phy, u8 aid)
1711+{
1712+ struct mt7915_dev *dev = phy->dev;
1713+ struct mt76_wcid *wcid = NULL;
1714+ struct mt76_testmode_entry_data *ed;
1715+ struct {
1716+ u8 band;
1717+ u8 _rsv;
1718+ __le16 wlan_idx;
1719+ } __packed req = {
developer17bb0a82022-12-13 15:52:04 +08001720+ .band = phy->mt76->band_idx,
developer6caa5e22022-06-16 13:33:13 +08001721+ };
1722+
1723+ mt76_tm_for_each_entry(phy->mt76, wcid, ed)
1724+ if (ed->aid == aid)
1725+ break;
1726+
1727+ if (!wcid)
1728+ return -EINVAL;
1729+
1730+ req.wlan_idx = cpu_to_le16(wcid->idx);
1731+
1732+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_STAT_USER_CTRL),
1733+ &req, sizeof(req), false);
1734+}
1735+
1736+static int
1737+mt7915_tm_set_muru_aid(struct mt7915_phy *phy, u16 aid)
1738+{
1739+ struct mt7915_dev *dev = phy->dev;
1740+ struct mt7915_tm_cmd req = {
1741+ .testmode_en = 1,
1742+ .param_idx = MCU_ATE_SET_MU_RX_AID,
developer17bb0a82022-12-13 15:52:04 +08001743+ .param.rx_aid.band = cpu_to_le32(phy->mt76->band_idx),
developer6caa5e22022-06-16 13:33:13 +08001744+ .param.rx_aid.aid = cpu_to_le16(aid),
1745+ };
1746+
1747+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
1748+ sizeof(req), false);
1749+}
1750+
1751 static void
1752 mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
1753 {
1754+ struct mt76_testmode_data *td = &phy->mt76->test;
1755+
1756+ mt7915_tm_set_trx(phy, TM_MAC_TX, false);
1757 mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
1758
1759 if (en) {
1760- mt7915_tm_update_channel(phy);
1761+ if (!phy->test.bf_en)
1762+ mt7915_tm_update_channel(phy);
1763+ if (td->aid)
1764+ mt7915_tm_set_rx_user_idx(phy, td->aid);
1765
1766 /* read-clear */
1767 mt7915_tm_get_rx_stats(phy, true);
developer17bb0a82022-12-13 15:52:04 +08001768@@ -576,9 +1558,12 @@ mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
developer6caa5e22022-06-16 13:33:13 +08001769 /* clear fw count */
1770 mt7915_tm_set_phy_count(phy, 0);
1771 mt7915_tm_set_phy_count(phy, 1);
1772-
1773- mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
1774 }
1775+
1776+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
1777+ mt7915_tm_set_muru_aid(phy, en ? td->aid : 0xf800);
1778+
1779+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
1780 }
1781
1782 static int
developer17bb0a82022-12-13 15:52:04 +08001783@@ -617,34 +1602,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
developer6caa5e22022-06-16 13:33:13 +08001784 tx_cont->tx_ant = td->tx_antenna_mask;
developer17bb0a82022-12-13 15:52:04 +08001785 tx_cont->band = band;
developer3609d782022-11-29 18:07:22 +08001786
developer6caa5e22022-06-16 13:33:13 +08001787- switch (chandef->width) {
1788- case NL80211_CHAN_WIDTH_40:
1789- tx_cont->bw = CMD_CBW_40MHZ;
1790- break;
1791- case NL80211_CHAN_WIDTH_80:
1792- tx_cont->bw = CMD_CBW_80MHZ;
1793- break;
1794- case NL80211_CHAN_WIDTH_80P80:
1795- tx_cont->bw = CMD_CBW_8080MHZ;
1796- break;
1797- case NL80211_CHAN_WIDTH_160:
1798- tx_cont->bw = CMD_CBW_160MHZ;
1799- break;
1800- case NL80211_CHAN_WIDTH_5:
1801- tx_cont->bw = CMD_CBW_5MHZ;
1802- break;
1803- case NL80211_CHAN_WIDTH_10:
1804- tx_cont->bw = CMD_CBW_10MHZ;
1805- break;
1806- case NL80211_CHAN_WIDTH_20:
1807- tx_cont->bw = CMD_CBW_20MHZ;
1808- break;
1809- case NL80211_CHAN_WIDTH_20_NOHT:
1810- tx_cont->bw = CMD_CBW_20MHZ;
1811- break;
1812- default:
1813- return -EINVAL;
1814- }
1815+ tx_cont->bw = mt7915_tm_chan_bw(chandef->width);
1816
1817 if (!en) {
developer17bb0a82022-12-13 15:52:04 +08001818 req.op.rf.param.func_data = cpu_to_le32(band);
1819@@ -728,6 +1686,12 @@ mt7915_tm_update_params(struct mt7915_phy *phy, u32 changed)
developer6caa5e22022-06-16 13:33:13 +08001820 mt7915_tm_set_freq_offset(phy, en, en ? td->freq_offset : 0);
1821 if (changed & BIT(TM_CHANGED_TXPOWER))
1822 mt7915_tm_set_tx_power(phy);
1823+ if (changed & BIT(TM_CHANGED_AID))
1824+ mt7915_tm_set_entry(phy);
1825+ if (changed & BIT(TM_CHANGED_CFG))
1826+ mt7915_tm_set_cfg(phy);
1827+ if (changed & BIT(TM_CHANGED_TXBF_ACT))
1828+ mt7915_tm_set_txbf(phy);
1829 }
1830
1831 static int
developer17bb0a82022-12-13 15:52:04 +08001832@@ -807,6 +1771,7 @@ static int
developer6caa5e22022-06-16 13:33:13 +08001833 mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
1834 {
1835 struct mt7915_phy *phy = mphy->priv;
1836+ struct mt7915_dev *dev = phy->dev;
1837 void *rx, *rssi;
1838 int i;
1839
developer17bb0a82022-12-13 15:52:04 +08001840@@ -852,11 +1817,68 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
developer6caa5e22022-06-16 13:33:13 +08001841
1842 nla_nest_end(msg, rx);
1843
1844+ if (mphy->test.tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
1845+ mphy->test.tx_done += mt76_rr(dev, MT_MIB_DR8(phy != &dev->phy));
1846+
1847 return mt7915_tm_get_rx_stats(phy, false);
1848 }
1849
1850+static int
1851+mt7915_tm_write_back_to_efuse(struct mt7915_dev *dev)
1852+{
1853+ struct mt7915_mcu_eeprom_info req = {};
1854+ u8 *eeprom = dev->mt76.eeprom.data;
1855+ int i, ret = -EINVAL;
1856+
1857+ /* prevent from damaging chip id in efuse */
1858+ if (mt76_chip(&dev->mt76) != get_unaligned_le16(eeprom))
1859+ goto out;
1860+
1861+ for (i = 0; i < mt7915_eeprom_size(dev); i += MT76_TM_EEPROM_BLOCK_SIZE) {
1862+ req.addr = cpu_to_le32(i);
1863+ memcpy(&req.data, eeprom + i, MT76_TM_EEPROM_BLOCK_SIZE);
1864+
1865+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_ACCESS),
1866+ &req, sizeof(req), true);
1867+ if (ret)
1868+ return ret;
1869+ }
1870+
1871+out:
1872+ return ret;
1873+}
1874+
1875+static int
1876+mt7915_tm_set_eeprom(struct mt76_phy *mphy, u32 offset, u8 *val, u8 action)
1877+{
1878+ struct mt7915_phy *phy = mphy->priv;
1879+ struct mt7915_dev *dev = phy->dev;
1880+ u8 *eeprom = dev->mt76.eeprom.data;
1881+ int ret = 0;
1882+
1883+ if (offset >= mt7915_eeprom_size(dev))
1884+ return -EINVAL;
1885+
1886+ switch (action) {
1887+ case MT76_TM_EEPROM_ACTION_UPDATE_DATA:
1888+ memcpy(eeprom + offset, val, MT76_TM_EEPROM_BLOCK_SIZE);
1889+ break;
1890+ case MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE:
1891+ ret = mt7915_mcu_set_eeprom(dev, true);
1892+ break;
1893+ case MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE:
1894+ ret = mt7915_tm_write_back_to_efuse(dev);
1895+ break;
1896+ default:
1897+ break;
1898+ }
1899+
1900+ return ret;
1901+}
1902+
1903 const struct mt76_testmode_ops mt7915_testmode_ops = {
1904 .set_state = mt7915_tm_set_state,
1905 .set_params = mt7915_tm_set_params,
1906 .dump_stats = mt7915_tm_dump_stats,
1907+ .set_eeprom = mt7915_tm_set_eeprom,
1908 };
1909diff --git a/mt7915/testmode.h b/mt7915/testmode.h
developer60a3d662023-02-07 15:24:34 +08001910index a1c54c89..01b08e9e 100644
developer6caa5e22022-06-16 13:33:13 +08001911--- a/mt7915/testmode.h
1912+++ b/mt7915/testmode.h
1913@@ -4,6 +4,8 @@
1914 #ifndef __MT7915_TESTMODE_H
1915 #define __MT7915_TESTMODE_H
1916
1917+#include "mcu.h"
1918+
1919 struct mt7915_tm_trx {
1920 u8 type;
1921 u8 enable;
1922@@ -39,6 +41,11 @@ struct mt7915_tm_cfg {
1923 u8 _rsv[2];
1924 };
1925
1926+struct mt7915_tm_mu_rx_aid {
1927+ __le32 band;
1928+ __le16 aid;
1929+};
1930+
1931 struct mt7915_tm_cmd {
1932 u8 testmode_en;
1933 u8 param_idx;
1934@@ -50,6 +57,7 @@ struct mt7915_tm_cmd {
1935 struct mt7915_tm_slot_time slot;
1936 struct mt7915_tm_clean_txq clean;
1937 struct mt7915_tm_cfg cfg;
1938+ struct mt7915_tm_mu_rx_aid rx_aid;
1939 u8 test[72];
1940 } param;
1941 } __packed;
1942@@ -109,6 +117,16 @@ enum {
1943 TAM_ARB_OP_MODE_FORCE_SU = 5,
1944 };
1945
1946+enum {
1947+ TM_CBW_20MHZ,
1948+ TM_CBW_40MHZ,
1949+ TM_CBW_80MHZ,
1950+ TM_CBW_10MHZ,
1951+ TM_CBW_5MHZ,
1952+ TM_CBW_160MHZ,
1953+ TM_CBW_8080MHZ,
1954+};
1955+
1956 struct mt7915_tm_rx_stat_band {
1957 u8 category;
1958
1959@@ -130,4 +148,264 @@ struct mt7915_tm_rx_stat_band {
1960 __le16 mdrdy_cnt_ofdm;
1961 };
1962
1963+struct mt7915_tm_muru_comm {
1964+ u8 ppdu_format;
1965+ u8 sch_type;
1966+ u8 band;
1967+ u8 wmm_idx;
1968+ u8 spe_idx;
1969+ u8 proc_type;
1970+};
1971+
1972+struct mt7915_tm_muru_dl_usr {
1973+ __le16 wlan_idx;
1974+ u8 ru_alloc_seg;
1975+ u8 ru_idx;
1976+ u8 ldpc;
1977+ u8 nss;
1978+ u8 mcs;
1979+ u8 mu_group_idx;
1980+ u8 vht_groud_id;
1981+ u8 vht_up;
1982+ u8 he_start_stream;
1983+ u8 he_mu_spatial;
1984+ u8 ack_policy;
1985+ __le16 tx_power_alpha;
1986+};
1987+
1988+struct mt7915_tm_muru_dl {
1989+ u8 user_num;
1990+ u8 tx_mode;
1991+ u8 bw;
1992+ u8 gi;
1993+ u8 ltf;
1994+ /* sigB */
1995+ u8 mcs;
1996+ u8 dcm;
1997+ u8 cmprs;
1998+
1999+ u8 tx_power;
2000+ u8 ru[8];
2001+ u8 c26[2];
2002+ u8 ack_policy;
2003+
2004+ struct mt7915_tm_muru_dl_usr usr[16];
2005+};
2006+
2007+struct mt7915_tm_muru_ul_usr {
2008+ __le16 wlan_idx;
2009+ u8 ru_alloc;
2010+ u8 ru_idx;
2011+ u8 ldpc;
2012+ u8 nss;
2013+ u8 mcs;
2014+ u8 target_rssi;
2015+ __le32 trig_pkt_size;
2016+};
2017+
2018+struct mt7915_tm_muru_ul {
2019+ u8 user_num;
2020+
2021+ /* UL TX */
2022+ u8 trig_type;
2023+ __le16 trig_cnt;
2024+ __le16 trig_intv;
2025+ u8 bw;
2026+ u8 gi_ltf;
2027+ __le16 ul_len;
2028+ u8 pad;
2029+ u8 trig_ta[ETH_ALEN];
2030+ u8 ru[8];
2031+ u8 c26[2];
2032+
2033+ struct mt7915_tm_muru_ul_usr usr[16];
2034+ /* HE TB RX Debug */
2035+ __le32 rx_hetb_nonsf_en_bitmap;
2036+ __le32 rx_hetb_cfg[2];
2037+
2038+ /* DL TX */
2039+ u8 ba_type;
2040+};
2041+
2042+struct mt7915_tm_muru {
2043+ __le32 cfg_comm;
2044+ __le32 cfg_dl;
2045+ __le32 cfg_ul;
2046+
2047+ struct mt7915_tm_muru_comm comm;
2048+ struct mt7915_tm_muru_dl dl;
2049+ struct mt7915_tm_muru_ul ul;
2050+};
2051+
2052+#define MURU_PPDU_HE_MU BIT(3)
2053+
2054+/* Common Config */
2055+/* #define MURU_COMM_PPDU_FMT BIT(0) */
2056+/* #define MURU_COMM_SCH_TYPE BIT(1) */
2057+/* #define MURU_COMM_BAND BIT(2) */
2058+/* #define MURU_COMM_WMM BIT(3) */
2059+/* #define MURU_COMM_SPE_IDX BIT(4) */
2060+/* #define MURU_COMM_PROC_TYPE BIT(5) */
2061+/* #define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_BAND | \ */
2062+/* MURU_COMM_WMM | MURU_COMM_SPE_IDX) */
2063+/* DL Config */
2064+#define MURU_DL_BW BIT(0)
2065+#define MURU_DL_GI BIT(1)
2066+#define MURU_DL_TX_MODE BIT(2)
2067+#define MURU_DL_TONE_PLAN BIT(3)
2068+#define MURU_DL_USER_CNT BIT(4)
2069+#define MURU_DL_LTF BIT(5)
2070+#define MURU_DL_SIGB_MCS BIT(6)
2071+#define MURU_DL_SIGB_DCM BIT(7)
2072+#define MURU_DL_SIGB_CMPRS BIT(8)
2073+#define MURU_DL_ACK_POLICY BIT(9)
2074+#define MURU_DL_TXPOWER BIT(10)
2075+/* DL Per User Config */
2076+#define MURU_DL_USER_WLAN_ID BIT(16)
2077+#define MURU_DL_USER_COD BIT(17)
2078+#define MURU_DL_USER_MCS BIT(18)
2079+#define MURU_DL_USER_NSS BIT(19)
2080+#define MURU_DL_USER_RU_ALLOC BIT(20)
2081+#define MURU_DL_USER_MUMIMO_GRP BIT(21)
2082+#define MURU_DL_USER_MUMIMO_VHT BIT(22)
2083+#define MURU_DL_USER_ACK_POLICY BIT(23)
2084+#define MURU_DL_USER_MUMIMO_HE BIT(24)
2085+#define MURU_DL_USER_PWR_ALPHA BIT(25)
2086+#define MURU_DL_SET (GENMASK(7, 0) | GENMASK(20, 16) | BIT(25))
2087+
2088+#define MAX_PHASE_GROUP_NUM 9
2089+
2090+struct mt7915_tm_txbf_phase {
2091+ u8 status;
2092+ struct {
2093+ u8 r0_uh;
2094+ u8 r0_h;
2095+ u8 r0_m;
2096+ u8 r0_l;
2097+ u8 r0_ul;
2098+ u8 r1_uh;
2099+ u8 r1_h;
2100+ u8 r1_m;
2101+ u8 r1_l;
2102+ u8 r1_ul;
2103+ u8 r2_uh;
2104+ u8 r2_h;
2105+ u8 r2_m;
2106+ u8 r2_l;
2107+ u8 r2_ul;
2108+ u8 r3_uh;
2109+ u8 r3_h;
2110+ u8 r3_m;
2111+ u8 r3_l;
2112+ u8 r3_ul;
2113+ u8 r2_uh_sx2;
2114+ u8 r2_h_sx2;
2115+ u8 r2_m_sx2;
2116+ u8 r2_l_sx2;
2117+ u8 r2_ul_sx2;
2118+ u8 r3_uh_sx2;
2119+ u8 r3_h_sx2;
2120+ u8 r3_m_sx2;
2121+ u8 r3_l_sx2;
2122+ u8 r3_ul_sx2;
2123+ u8 m_t0_h;
2124+ u8 m_t1_h;
2125+ u8 m_t2_h;
2126+ u8 m_t2_h_sx2;
2127+ u8 r0_reserved;
2128+ u8 r1_reserved;
2129+ u8 r2_reserved;
2130+ u8 r3_reserved;
2131+ u8 r2_sx2_reserved;
2132+ u8 r3_sx2_reserved;
2133+ } phase;
2134+};
2135+
2136+struct mt7915_tm_pfmu_tag1 {
2137+ __le32 pfmu_idx:10;
2138+ __le32 ebf:1;
2139+ __le32 data_bw:2;
2140+ __le32 lm:2;
2141+ __le32 is_mu:1;
2142+ __le32 nr:3, nc:3;
2143+ __le32 codebook:2;
2144+ __le32 ngroup:2;
2145+ __le32 _rsv:2;
2146+ __le32 invalid_prof:1;
2147+ __le32 rmsd:3;
2148+
2149+ __le32 col_id1:6, row_id1:10;
2150+ __le32 col_id2:6, row_id2:10;
2151+ __le32 col_id3:6, row_id3:10;
2152+ __le32 col_id4:6, row_id4:10;
2153+
2154+ __le32 ru_start_id:7;
2155+ __le32 _rsv1:1;
2156+ __le32 ru_end_id:7;
2157+ __le32 _rsv2:1;
2158+ __le32 mob_cal_en:1;
2159+ __le32 _rsv3:15;
2160+
2161+ __le32 snr_sts0:8, snr_sts1:8, snr_sts2:8, snr_sts3:8;
2162+ __le32 snr_sts4:8, snr_sts5:8, snr_sts6:8, snr_sts7:8;
2163+
2164+ __le32 _rsv4;
2165+} __packed;
2166+
2167+struct mt7915_tm_pfmu_tag2 {
2168+ __le32 smart_ant:24;
2169+ __le32 se_idx:5;
2170+ __le32 _rsv:3;
2171+
2172+ __le32 _rsv1:8;
2173+ __le32 rmsd_thres:3;
2174+ __le32 _rsv2:5;
2175+ __le32 ibf_timeout:8;
2176+ __le32 _rsv3:8;
2177+
2178+ __le32 _rsv4:16;
2179+ __le32 ibf_data_bw:2;
2180+ __le32 ibf_nc:3;
2181+ __le32 ibf_nr:3;
2182+ __le32 ibf_ru:8;
2183+
2184+ __le32 mob_delta_t:8;
2185+ __le32 mob_lq_result:7;
2186+ __le32 _rsv5:1;
2187+ __le32 _rsv6:16;
2188+
2189+ __le32 _rsv7;
2190+} __packed;
2191+
2192+struct mt7915_tm_pfmu_tag {
2193+ struct mt7915_tm_pfmu_tag1 t1;
2194+ struct mt7915_tm_pfmu_tag2 t2;
2195+};
2196+
2197+struct mt7915_tm_pfmu_data {
2198+ __le16 subc_idx;
2199+ __le16 phi11;
2200+ __le16 phi21;
2201+ __le16 phi31;
2202+};
2203+
2204+struct mt7915_tm_ibf_cal_info {
2205+ u8 format_id;
2206+ u8 group_l_m_n;
2207+ u8 group;
2208+ bool sx2;
2209+ u8 status;
2210+ u8 cal_type;
2211+ u8 _rsv[2];
2212+ u8 buf[1000];
2213+} __packed;
2214+
2215+enum {
2216+ IBF_PHASE_CAL_UNSPEC,
2217+ IBF_PHASE_CAL_NORMAL,
2218+ IBF_PHASE_CAL_VERIFY,
2219+ IBF_PHASE_CAL_NORMAL_INSTRUMENT,
2220+ IBF_PHASE_CAL_VERIFY_INSTRUMENT,
2221+};
2222+
2223 #endif
2224diff --git a/testmode.c b/testmode.c
developer60a3d662023-02-07 15:24:34 +08002225index 1d0d5d30..7a9ed543 100644
developer6caa5e22022-06-16 13:33:13 +08002226--- a/testmode.c
2227+++ b/testmode.c
developer072c5612022-07-15 18:30:03 +08002228@@ -27,28 +27,16 @@ const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
developer6caa5e22022-06-16 13:33:13 +08002229 };
2230 EXPORT_SYMBOL_GPL(mt76_tm_policy);
2231
2232-void mt76_testmode_tx_pending(struct mt76_phy *phy)
2233+static void
2234+mt76_testmode_queue_tx(struct mt76_phy *phy, struct mt76_wcid *wcid,
developerf79ad452022-07-12 11:37:54 +08002235+ struct sk_buff *skb, struct mt76_queue *q, int qid,
2236+ u16 limit)
developer6caa5e22022-06-16 13:33:13 +08002237 {
2238 struct mt76_testmode_data *td = &phy->test;
2239 struct mt76_dev *dev = phy->dev;
2240- struct mt76_wcid *wcid = &dev->global_wcid;
2241- struct sk_buff *skb = td->tx_skb;
2242- struct mt76_queue *q;
2243- u16 tx_queued_limit;
2244- int qid;
2245-
2246- if (!skb || !td->tx_pending)
2247- return;
2248+ u16 count = limit;
2249
2250- qid = skb_get_queue_mapping(skb);
2251- q = phy->q_tx[qid];
2252-
2253- tx_queued_limit = td->tx_queued_limit ? td->tx_queued_limit : 1000;
2254-
2255- spin_lock_bh(&q->lock);
2256-
2257- while (td->tx_pending > 0 &&
2258- td->tx_queued - td->tx_done < tx_queued_limit &&
2259+ while (td->tx_pending > 0 && count &&
2260 q->queued < q->ndesc / 2) {
2261 int ret;
2262
developer072c5612022-07-15 18:30:03 +08002263@@ -57,13 +45,65 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy)
developer6caa5e22022-06-16 13:33:13 +08002264 if (ret < 0)
2265 break;
2266
2267+ count--;
2268 td->tx_pending--;
2269 td->tx_queued++;
2270+
2271+ if (td->tx_rate_mode != MT76_TM_TX_MODE_HE_MU)
2272+ if (td->tx_queued - td->tx_done >= limit)
2273+ break;
2274 }
2275
2276 dev->queue_ops->kick(dev, q);
2277+}
2278+
2279+void mt76_testmode_tx_pending(struct mt76_phy *phy)
2280+{
2281+ struct mt76_testmode_data *td = &phy->test;
2282+ struct mt76_testmode_entry_data *ed;
2283+ struct mt76_queue *q;
2284+ int qid;
2285+ u16 tx_queued_limit;
2286+ u32 remain;
2287+ bool is_mu;
2288+
2289+ if (!td->tx_pending)
2290+ return;
2291+
2292+ /* tx_queued_limit = td->tx_queued_limit ?: 100; */
2293+ tx_queued_limit = 100;
2294+
2295+ if (!td->aid) {
2296+ qid = skb_get_queue_mapping(td->tx_skb);
2297+ q = phy->q_tx[qid];
2298+ spin_lock_bh(&q->lock);
2299+ mt76_testmode_queue_tx(phy, &phy->dev->global_wcid,
developerf79ad452022-07-12 11:37:54 +08002300+ td->tx_skb, q, qid, tx_queued_limit);
developer6caa5e22022-06-16 13:33:13 +08002301+ spin_unlock_bh(&q->lock);
2302+
2303+ return;
2304+ }
2305+
2306+ is_mu = td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU;
2307+ ed = mt76_testmode_entry_data(phy, td->cur_entry);
2308+ qid = skb_get_queue_mapping(ed->tx_skb);
2309+ q = phy->q_tx[qid];
2310+
2311+ spin_lock_bh(&q->lock);
2312+
2313+ remain = is_mu ? 1 : (td->tx_pending % td->tx_count) ?: td->tx_count;
2314+ if (remain < tx_queued_limit)
2315+ tx_queued_limit = remain;
2316+
developerf79ad452022-07-12 11:37:54 +08002317+ mt76_testmode_queue_tx(phy, td->cur_entry, ed->tx_skb, q, qid, tx_queued_limit);
developer6caa5e22022-06-16 13:33:13 +08002318+
2319+ if (td->tx_pending % td->tx_count == 0 || is_mu)
2320+ td->cur_entry = list_next_entry(td->cur_entry, list);
2321
2322 spin_unlock_bh(&q->lock);
2323+
2324+ if (is_mu && td->tx_pending)
2325+ mt76_worker_schedule(&phy->dev->tx_worker);
2326 }
2327
2328 static u32
developer072c5612022-07-15 18:30:03 +08002329@@ -89,15 +129,31 @@ mt76_testmode_max_mpdu_len(struct mt76_phy *phy, u8 tx_rate_mode)
developer6caa5e22022-06-16 13:33:13 +08002330 }
2331
2332 static void
2333-mt76_testmode_free_skb(struct mt76_phy *phy)
2334+mt76_testmode_free_skb(struct sk_buff **tx_skb)
2335+{
2336+ if (!(*tx_skb))
2337+ return;
2338+
2339+ dev_kfree_skb(*tx_skb);
2340+ *tx_skb = NULL;
2341+}
2342+
2343+static void
2344+mt76_testmode_free_skb_all(struct mt76_phy *phy)
2345 {
2346 struct mt76_testmode_data *td = &phy->test;
2347+ struct mt76_testmode_entry_data *ed = &td->ed;
2348+ struct mt76_wcid *wcid;
2349+
2350+ mt76_testmode_free_skb(&ed->tx_skb);
2351
2352- dev_kfree_skb(td->tx_skb);
2353- td->tx_skb = NULL;
2354+ mt76_tm_for_each_entry(phy, wcid, ed)
2355+ mt76_testmode_free_skb(&ed->tx_skb);
2356 }
2357
2358-int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
2359+static int
2360+mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len,
2361+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN])
2362 {
2363 #define MT_TXP_MAX_LEN 4095
2364 u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
developerf7a3ca32022-09-01 14:44:55 +08002365@@ -118,7 +174,8 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer6caa5e22022-06-16 13:33:13 +08002366 nfrags = len / MT_TXP_MAX_LEN;
2367 head_len = nfrags ? MT_TXP_MAX_LEN : len;
2368
2369- if (len > IEEE80211_MAX_FRAME_LEN)
2370+ if (len > IEEE80211_MAX_FRAME_LEN ||
2371+ td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
2372 fc |= IEEE80211_STYPE_QOS_DATA;
2373
2374 head = alloc_skb(head_len, GFP_KERNEL);
developerf7a3ca32022-09-01 14:44:55 +08002375@@ -127,9 +184,9 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer6caa5e22022-06-16 13:33:13 +08002376
developer072c5612022-07-15 18:30:03 +08002377 hdr = __skb_put_zero(head, sizeof(*hdr));
developer6caa5e22022-06-16 13:33:13 +08002378 hdr->frame_control = cpu_to_le16(fc);
2379- memcpy(hdr->addr1, td->addr[0], ETH_ALEN);
2380- memcpy(hdr->addr2, td->addr[1], ETH_ALEN);
2381- memcpy(hdr->addr3, td->addr[2], ETH_ALEN);
2382+ memcpy(hdr->addr1, addr[0], ETH_ALEN);
2383+ memcpy(hdr->addr2, addr[1], ETH_ALEN);
2384+ memcpy(hdr->addr3, addr[2], ETH_ALEN);
2385 skb_set_queue_mapping(head, IEEE80211_AC_BE);
developer072c5612022-07-15 18:30:03 +08002386 get_random_bytes(__skb_put(head, head_len - sizeof(*hdr)),
2387 head_len - sizeof(*hdr));
developerf7a3ca32022-09-01 14:44:55 +08002388@@ -153,7 +210,7 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer6caa5e22022-06-16 13:33:13 +08002389
2390 frag = alloc_skb(frag_len, GFP_KERNEL);
2391 if (!frag) {
2392- mt76_testmode_free_skb(phy);
2393+ mt76_testmode_free_skb(tx_skb);
2394 dev_kfree_skb(head);
2395 return -ENOMEM;
2396 }
developerf7a3ca32022-09-01 14:44:55 +08002397@@ -166,15 +223,14 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer6caa5e22022-06-16 13:33:13 +08002398 frag_tail = &(*frag_tail)->next;
2399 }
2400
2401- mt76_testmode_free_skb(phy);
2402- td->tx_skb = head;
2403+ mt76_testmode_free_skb(tx_skb);
2404+ *tx_skb = head;
2405
2406 return 0;
2407 }
2408-EXPORT_SYMBOL(mt76_testmode_alloc_skb);
2409
2410-static int
2411-mt76_testmode_tx_init(struct mt76_phy *phy)
2412+int mt76_testmode_init_skb(struct mt76_phy *phy, u32 len,
2413+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN])
2414 {
2415 struct mt76_testmode_data *td = &phy->test;
2416 struct ieee80211_tx_info *info;
developerf7a3ca32022-09-01 14:44:55 +08002417@@ -182,7 +238,7 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
developer6caa5e22022-06-16 13:33:13 +08002418 u8 max_nss = hweight8(phy->antenna_mask);
2419 int ret;
2420
2421- ret = mt76_testmode_alloc_skb(phy, td->tx_mpdu_len);
2422+ ret = mt76_testmode_alloc_skb(phy, len, tx_skb, addr);
2423 if (ret)
2424 return ret;
2425
developerf7a3ca32022-09-01 14:44:55 +08002426@@ -192,7 +248,7 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
developer6caa5e22022-06-16 13:33:13 +08002427 if (td->tx_antenna_mask)
2428 max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask));
2429
2430- info = IEEE80211_SKB_CB(td->tx_skb);
2431+ info = IEEE80211_SKB_CB(*tx_skb);
2432 rate = &info->control.rates[0];
2433 rate->count = 1;
2434 rate->idx = td->tx_rate_idx;
developerf7a3ca32022-09-01 14:44:55 +08002435@@ -264,6 +320,25 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
developer6caa5e22022-06-16 13:33:13 +08002436 out:
2437 return 0;
2438 }
2439+EXPORT_SYMBOL(mt76_testmode_init_skb);
2440+
2441+static int
2442+mt76_testmode_tx_init(struct mt76_phy *phy)
2443+{
2444+ struct mt76_testmode_entry_data *ed;
2445+ struct mt76_wcid *wcid;
2446+
2447+ mt76_tm_for_each_entry(phy, wcid, ed) {
2448+ int ret;
2449+
2450+ ret = mt76_testmode_init_skb(phy, ed->tx_mpdu_len,
2451+ &ed->tx_skb, ed->addr);
2452+ if (ret)
2453+ return ret;
2454+ }
2455+
2456+ return 0;
2457+}
2458
2459 static void
2460 mt76_testmode_tx_start(struct mt76_phy *phy)
developerf7a3ca32022-09-01 14:44:55 +08002461@@ -274,6 +349,14 @@ mt76_testmode_tx_start(struct mt76_phy *phy)
developer6caa5e22022-06-16 13:33:13 +08002462 td->tx_queued = 0;
2463 td->tx_done = 0;
2464 td->tx_pending = td->tx_count;
2465+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
2466+ td->tx_pending = 1;
2467+ if (td->entry_num) {
2468+ td->tx_pending *= td->entry_num;
2469+ td->cur_entry = list_first_entry(&td->tm_entry_list,
2470+ struct mt76_wcid, list);
2471+ }
2472+
2473 mt76_worker_schedule(&dev->tx_worker);
2474 }
2475
developerf7a3ca32022-09-01 14:44:55 +08002476@@ -292,7 +375,7 @@ mt76_testmode_tx_stop(struct mt76_phy *phy)
developer6caa5e22022-06-16 13:33:13 +08002477 wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued,
2478 MT76_TM_TIMEOUT * HZ);
2479
2480- mt76_testmode_free_skb(phy);
2481+ mt76_testmode_free_skb_all(phy);
2482 }
2483
2484 static inline void
developerf7a3ca32022-09-01 14:44:55 +08002485@@ -323,6 +406,8 @@ mt76_testmode_init_defaults(struct mt76_phy *phy)
developer6caa5e22022-06-16 13:33:13 +08002486 memcpy(td->addr[0], phy->macaddr, ETH_ALEN);
2487 memcpy(td->addr[1], phy->macaddr, ETH_ALEN);
2488 memcpy(td->addr[2], phy->macaddr, ETH_ALEN);
2489+
2490+ INIT_LIST_HEAD(&phy->test.tm_entry_list);
2491 }
2492
2493 static int
developerf7a3ca32022-09-01 14:44:55 +08002494@@ -332,8 +417,12 @@ __mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state)
developer6caa5e22022-06-16 13:33:13 +08002495 struct mt76_dev *dev = phy->dev;
2496 int err;
2497
2498- if (prev_state == MT76_TM_STATE_TX_FRAMES)
2499+ if (prev_state == MT76_TM_STATE_TX_FRAMES) {
2500+ /* MU needs to clean hwq for free done event */
2501+ if (phy->test.tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
2502+ dev->test_ops->set_state(phy, MT76_TM_STATE_IDLE);
2503 mt76_testmode_tx_stop(phy);
2504+ }
2505
2506 if (state == MT76_TM_STATE_TX_FRAMES) {
2507 err = mt76_testmode_tx_init(phy);
developerf7a3ca32022-09-01 14:44:55 +08002508@@ -403,6 +492,44 @@ mt76_tm_get_u8(struct nlattr *attr, u8 *dest, u8 min, u8 max)
developer6caa5e22022-06-16 13:33:13 +08002509 return 0;
2510 }
2511
2512+static int
2513+mt76_testmode_set_eeprom(struct mt76_phy *phy, struct nlattr **tb)
2514+{
2515+ struct mt76_dev *dev = phy->dev;
2516+ u8 action, val[MT76_TM_EEPROM_BLOCK_SIZE];
2517+ u32 offset = 0;
2518+ int err = -EINVAL;
2519+
2520+ if (!dev->test_ops->set_eeprom)
2521+ return -EOPNOTSUPP;
2522+
2523+ if (mt76_tm_get_u8(tb[MT76_TM_ATTR_EEPROM_ACTION], &action,
2524+ 0, MT76_TM_EEPROM_ACTION_MAX))
2525+ goto out;
2526+
2527+ if (tb[MT76_TM_ATTR_EEPROM_OFFSET]) {
2528+ struct nlattr *cur;
2529+ int rem, idx = 0;
2530+
2531+ offset = nla_get_u32(tb[MT76_TM_ATTR_EEPROM_OFFSET]);
2532+ if (!!(offset % MT76_TM_EEPROM_BLOCK_SIZE) ||
2533+ !tb[MT76_TM_ATTR_EEPROM_VAL])
2534+ goto out;
2535+
2536+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_EEPROM_VAL], rem) {
2537+ if (nla_len(cur) != 1 || idx >= ARRAY_SIZE(val))
2538+ goto out;
2539+
2540+ val[idx++] = nla_get_u8(cur);
2541+ }
2542+ }
2543+
2544+ err = dev->test_ops->set_eeprom(phy, offset, val, action);
2545+
2546+out:
2547+ return err;
2548+}
2549+
2550 int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2551 void *data, int len)
2552 {
developerf7a3ca32022-09-01 14:44:55 +08002553@@ -426,6 +553,11 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer6caa5e22022-06-16 13:33:13 +08002554
2555 mutex_lock(&dev->mutex);
2556
2557+ if (tb[MT76_TM_ATTR_EEPROM_ACTION]) {
2558+ err = mt76_testmode_set_eeprom(phy, tb);
2559+ goto out;
2560+ }
2561+
2562 if (tb[MT76_TM_ATTR_RESET]) {
2563 mt76_testmode_set_state(phy, MT76_TM_STATE_OFF);
2564 memset(td, 0, sizeof(*td));
developer28b11e22022-09-05 19:09:45 +08002565@@ -452,7 +584,10 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer6caa5e22022-06-16 13:33:13 +08002566 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_DUTY_CYCLE],
2567 &td->tx_duty_cycle, 0, 99) ||
2568 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_POWER_CONTROL],
2569- &td->tx_power_control, 0, 1))
2570+ &td->tx_power_control, 0, 1) ||
2571+ mt76_tm_get_u8(tb[MT76_TM_ATTR_AID], &td->aid, 0, 16) ||
2572+ mt76_tm_get_u8(tb[MT76_TM_ATTR_RU_ALLOC], &td->ru_alloc, 0, 0xff) ||
2573+ mt76_tm_get_u8(tb[MT76_TM_ATTR_RU_IDX], &td->ru_idx, 0, 68))
2574 goto out;
2575
2576 if (tb[MT76_TM_ATTR_TX_LENGTH]) {
developer28b11e22022-09-05 19:09:45 +08002577@@ -484,8 +619,7 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer6caa5e22022-06-16 13:33:13 +08002578
2579 if (tb[MT76_TM_ATTR_TX_POWER]) {
2580 struct nlattr *cur;
2581- int idx = 0;
2582- int rem;
2583+ int rem, idx = 0;
2584
2585 nla_for_each_nested(cur, tb[MT76_TM_ATTR_TX_POWER], rem) {
2586 if (nla_len(cur) != 1 ||
developer28b11e22022-09-05 19:09:45 +08002587@@ -505,11 +639,45 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer6caa5e22022-06-16 13:33:13 +08002588 if (nla_len(cur) != ETH_ALEN || idx >= 3)
2589 goto out;
2590
2591- memcpy(td->addr[idx], nla_data(cur), ETH_ALEN);
2592+ memcpy(td->addr[idx++], nla_data(cur), ETH_ALEN);
2593+ }
2594+ }
2595+
2596+ if (tb[MT76_TM_ATTR_CFG]) {
2597+ struct nlattr *cur;
2598+ int rem, idx = 0;
2599+
2600+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_CFG], rem) {
2601+ if (nla_len(cur) != 1 || idx >= 2)
2602+ goto out;
2603+
2604+ if (idx == 0)
2605+ td->cfg.type = nla_get_u8(cur);
2606+ else
2607+ td->cfg.enable = nla_get_u8(cur);
2608 idx++;
2609 }
2610 }
2611
2612+ if (tb[MT76_TM_ATTR_TXBF_ACT]) {
2613+ struct nlattr *cur;
2614+ int rem, idx = 0;
2615+
2616+ if (!tb[MT76_TM_ATTR_TXBF_PARAM] ||
2617+ mt76_tm_get_u8(tb[MT76_TM_ATTR_TXBF_ACT], &td->txbf_act,
2618+ 0, MT76_TM_TXBF_ACT_MAX))
2619+ goto out;
2620+
2621+ memset(td->txbf_param, 0, sizeof(td->txbf_param));
2622+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_TXBF_PARAM], rem) {
2623+ if (nla_len(cur) != 2 ||
2624+ idx >= ARRAY_SIZE(td->txbf_param))
2625+ goto out;
2626+
2627+ td->txbf_param[idx++] = nla_get_u16(cur);
2628+ }
2629+ }
2630+
2631 if (dev->test_ops->set_params) {
2632 err = dev->test_ops->set_params(phy, tb, state);
2633 if (err)
developer28b11e22022-09-05 19:09:45 +08002634@@ -574,6 +742,7 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer6caa5e22022-06-16 13:33:13 +08002635 struct mt76_phy *phy = hw->priv;
2636 struct mt76_dev *dev = phy->dev;
2637 struct mt76_testmode_data *td = &phy->test;
2638+ struct mt76_testmode_entry_data *ed = &td->ed;
2639 struct nlattr *tb[NUM_MT76_TM_ATTRS] = {};
2640 int err = 0;
2641 void *a;
developer28b11e22022-09-05 19:09:45 +08002642@@ -606,6 +775,19 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer6caa5e22022-06-16 13:33:13 +08002643 goto out;
2644 }
2645
2646+ if (tb[MT76_TM_ATTR_AID]) {
2647+ struct mt76_wcid *wcid;
2648+ u8 aid;
2649+
2650+ err = mt76_tm_get_u8(tb[MT76_TM_ATTR_AID], &aid, 1, 16);
2651+ if (err)
2652+ goto out;
2653+
2654+ mt76_tm_for_each_entry(phy, wcid, ed)
2655+ if (ed->aid == aid)
2656+ ed = mt76_testmode_entry_data(phy, wcid);
2657+ }
2658+
2659 mt76_testmode_init_defaults(phy);
2660
2661 err = -EMSGSIZE;
developer28b11e22022-09-05 19:09:45 +08002662@@ -618,12 +800,8 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer6caa5e22022-06-16 13:33:13 +08002663 goto out;
2664
2665 if (nla_put_u32(msg, MT76_TM_ATTR_TX_COUNT, td->tx_count) ||
2666- nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_mpdu_len) ||
2667 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_MODE, td->tx_rate_mode) ||
2668- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, td->tx_rate_nss) ||
2669- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, td->tx_rate_idx) ||
2670 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_SGI, td->tx_rate_sgi) ||
2671- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, td->tx_rate_ldpc) ||
2672 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_STBC, td->tx_rate_stbc) ||
2673 (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_LTF) &&
2674 nla_put_u8(msg, MT76_TM_ATTR_TX_LTF, td->tx_ltf)) ||
developer28b11e22022-09-05 19:09:45 +08002675@@ -643,6 +821,15 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer6caa5e22022-06-16 13:33:13 +08002676 nla_put_u8(msg, MT76_TM_ATTR_FREQ_OFFSET, td->freq_offset)))
2677 goto out;
2678
2679+ if (nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, ed->tx_mpdu_len) ||
2680+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, ed->tx_rate_nss) ||
2681+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, ed->tx_rate_idx) ||
2682+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, ed->tx_rate_ldpc) ||
2683+ nla_put_u8(msg, MT76_TM_ATTR_AID, ed->aid) ||
2684+ nla_put_u8(msg, MT76_TM_ATTR_RU_ALLOC, ed->ru_alloc) ||
2685+ nla_put_u8(msg, MT76_TM_ATTR_RU_IDX, ed->ru_idx))
2686+ goto out;
2687+
2688 if (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER)) {
2689 a = nla_nest_start(msg, MT76_TM_ATTR_TX_POWER);
2690 if (!a)
2691diff --git a/testmode.h b/testmode.h
developer60a3d662023-02-07 15:24:34 +08002692index 89613266..57949f2b 100644
developer6caa5e22022-06-16 13:33:13 +08002693--- a/testmode.h
2694+++ b/testmode.h
2695@@ -6,6 +6,8 @@
2696 #define __MT76_TESTMODE_H
2697
2698 #define MT76_TM_TIMEOUT 10
2699+#define MT76_TM_MAX_ENTRY_NUM 16
2700+#define MT76_TM_EEPROM_BLOCK_SIZE 16
2701
2702 /**
2703 * enum mt76_testmode_attr - testmode attributes inside NL80211_ATTR_TESTDATA
2704@@ -47,6 +49,15 @@
2705 * @MT76_TM_ATTR_DRV_DATA: driver specific netlink attrs (nested)
2706 *
2707 * @MT76_TM_ATTR_MAC_ADDRS: array of nested MAC addresses (nested)
2708+ *
2709+ * @MT76_TM_ATTR_EEPROM_ACTION: eeprom setting actions
2710+ * (u8, see &enum mt76_testmode_eeprom_action)
2711+ * @MT76_TM_ATTR_EEPROM_OFFSET: offset of eeprom data block for writing (u32)
2712+ * @MT76_TM_ATTR_EEPROM_VAL: values for writing into a 16-byte data block
2713+ * (nested, u8 attrs)
2714+ *
2715+ * @MT76_TM_ATTR_CFG: config testmode rf feature (nested, see &mt76_testmode_cfg)
2716+ *
2717 */
2718 enum mt76_testmode_attr {
2719 MT76_TM_ATTR_UNSPEC,
2720@@ -84,6 +95,17 @@ enum mt76_testmode_attr {
2721 MT76_TM_ATTR_DRV_DATA,
2722
2723 MT76_TM_ATTR_MAC_ADDRS,
2724+ MT76_TM_ATTR_AID,
2725+ MT76_TM_ATTR_RU_ALLOC,
2726+ MT76_TM_ATTR_RU_IDX,
2727+
2728+ MT76_TM_ATTR_EEPROM_ACTION,
2729+ MT76_TM_ATTR_EEPROM_OFFSET,
2730+ MT76_TM_ATTR_EEPROM_VAL,
2731+
2732+ MT76_TM_ATTR_CFG,
2733+ MT76_TM_ATTR_TXBF_ACT,
2734+ MT76_TM_ATTR_TXBF_PARAM,
2735
2736 /* keep last */
2737 NUM_MT76_TM_ATTRS,
2738@@ -198,4 +220,57 @@ enum mt76_testmode_tx_mode {
2739
2740 extern const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS];
2741
2742+/**
2743+ * enum mt76_testmode_eeprom_action - eeprom setting actions
2744+ *
2745+ * @MT76_TM_EEPROM_ACTION_UPDATE_DATA: update rf values to specific
2746+ * eeprom data block
2747+ * @MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE: send updated eeprom data to fw
2748+ * @MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE: write eeprom data back to efuse
2749+ */
2750+enum mt76_testmode_eeprom_action {
2751+ MT76_TM_EEPROM_ACTION_UPDATE_DATA,
2752+ MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE,
2753+ MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE,
2754+
2755+ /* keep last */
2756+ NUM_MT76_TM_EEPROM_ACTION,
2757+ MT76_TM_EEPROM_ACTION_MAX = NUM_MT76_TM_EEPROM_ACTION - 1,
2758+};
2759+
2760+/**
2761+ * enum mt76_testmode_cfg - packet tx phy mode
2762+ *
2763+ * @MT76_TM_EEPROM_ACTION_UPDATE_DATA: update rf values to specific
2764+ * eeprom data block
2765+ * @MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE: send updated eeprom data to fw
2766+ * @MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE: write eeprom data back to efuse
2767+ */
2768+enum mt76_testmode_cfg {
2769+ MT76_TM_CFG_TSSI,
2770+ MT76_TM_CFG_DPD,
2771+ MT76_TM_CFG_RATE_POWER_OFFSET,
2772+ MT76_TM_CFG_THERMAL_COMP,
2773+
2774+ /* keep last */
2775+ NUM_MT76_TM_CFG,
2776+ MT76_TM_CFG_MAX = NUM_MT76_TM_CFG - 1,
2777+};
2778+
2779+enum mt76_testmode_txbf_act {
2780+ MT76_TM_TXBF_ACT_INIT,
2781+ MT76_TM_TXBF_ACT_UPDATE_CH,
2782+ MT76_TM_TXBF_ACT_PHASE_COMP,
2783+ MT76_TM_TXBF_ACT_TX_PREP,
2784+ MT76_TM_TXBF_ACT_IBF_PROF_UPDATE,
2785+ MT76_TM_TXBF_ACT_EBF_PROF_UPDATE,
2786+ MT76_TM_TXBF_ACT_PHASE_CAL,
2787+ MT76_TM_TXBF_ACT_PROF_UPDATE_ALL,
2788+ MT76_TM_TXBF_ACT_E2P_UPDATE,
2789+
2790+ /* keep last */
2791+ NUM_MT76_TM_TXBF_ACT,
2792+ MT76_TM_TXBF_ACT_MAX = NUM_MT76_TM_TXBF_ACT - 1,
2793+};
2794+
2795 #endif
2796diff --git a/tools/fields.c b/tools/fields.c
developer60a3d662023-02-07 15:24:34 +08002797index e3f69089..6e36ab27 100644
developer6caa5e22022-06-16 13:33:13 +08002798--- a/tools/fields.c
2799+++ b/tools/fields.c
2800@@ -10,6 +10,7 @@ static const char * const testmode_state[] = {
2801 [MT76_TM_STATE_IDLE] = "idle",
2802 [MT76_TM_STATE_TX_FRAMES] = "tx_frames",
2803 [MT76_TM_STATE_RX_FRAMES] = "rx_frames",
2804+ [MT76_TM_STATE_TX_CONT] = "tx_cont",
2805 };
2806
2807 static const char * const testmode_tx_mode[] = {
2808@@ -201,6 +202,63 @@ static void print_extra_stats(const struct tm_field *field, struct nlattr **tb)
2809 printf("%srx_per=%.02f%%\n", prefix, 100 * failed / total);
2810 }
2811
2812+static bool parse_mac(const struct tm_field *field, int idx,
2813+ struct nl_msg *msg, const char *val)
2814+{
2815+#define ETH_ALEN 6
2816+ bool ret = true;
2817+ char *str, *cur, *ap;
2818+ void *a;
2819+
2820+ ap = str = strdup(val);
2821+
2822+ a = nla_nest_start(msg, idx);
2823+
2824+ idx = 0;
2825+ while ((cur = strsep(&ap, ",")) != NULL) {
2826+ unsigned char addr[ETH_ALEN];
2827+ char *val, *tmp = cur;
2828+ int i = 0;
2829+
2830+ while ((val = strsep(&tmp, ":")) != NULL) {
2831+ if (i >= ETH_ALEN)
2832+ break;
2833+
2834+ addr[i++] = strtoul(val, NULL, 16);
2835+ }
2836+
2837+ nla_put(msg, idx, ETH_ALEN, addr);
2838+
2839+ idx++;
2840+ }
2841+
2842+ nla_nest_end(msg, a);
2843+
2844+ free(str);
2845+
2846+ return ret;
2847+}
2848+
2849+static void print_mac(const struct tm_field *field, struct nlattr *attr)
2850+{
2851+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
2852+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
2853+ unsigned char addr[3][6];
2854+ struct nlattr *cur;
2855+ int idx = 0;
2856+ int rem;
2857+
2858+ nla_for_each_nested(cur, attr, rem) {
2859+ if (nla_len(cur) != 6)
2860+ continue;
2861+ memcpy(addr[idx++], nla_data(cur), 6);
2862+ }
2863+
2864+ printf("" MACSTR "," MACSTR "," MACSTR "",
2865+ MAC2STR(addr[0]), MAC2STR(addr[1]), MAC2STR(addr[2]));
2866+
2867+ return;
2868+}
2869
2870 #define FIELD_GENERIC(_field, _name, ...) \
2871 [FIELD_NAME(_field)] = { \
2872@@ -250,6 +308,13 @@ static void print_extra_stats(const struct tm_field *field, struct nlattr **tb)
2873 ##__VA_ARGS__ \
2874 )
2875
2876+#define FIELD_MAC(_field, _name) \
2877+ [FIELD_NAME(_field)] = { \
2878+ .name = _name, \
2879+ .parse = parse_mac, \
2880+ .print = print_mac \
2881+ }
2882+
2883 #define FIELD_NAME(_field) MT76_TM_RX_ATTR_##_field
2884 static const struct tm_field rx_fields[NUM_MT76_TM_RX_ATTRS] = {
2885 FIELD_RO(s32, FREQ_OFFSET, "freq_offset"),
2886@@ -300,10 +365,18 @@ static const struct tm_field testdata_fields[NUM_MT76_TM_ATTRS] = {
2887 FIELD(u8, TX_RATE_LDPC, "tx_rate_ldpc"),
2888 FIELD(u8, TX_RATE_STBC, "tx_rate_stbc"),
2889 FIELD(u8, TX_LTF, "tx_ltf"),
2890+ FIELD(u8, TX_DUTY_CYCLE, "tx_duty_cycle"),
2891+ FIELD(u32, TX_IPG, "tx_ipg"),
2892+ FIELD(u32, TX_TIME, "tx_time"),
2893 FIELD(u8, TX_POWER_CONTROL, "tx_power_control"),
2894 FIELD_ARRAY(u8, TX_POWER, "tx_power"),
2895 FIELD(u8, TX_ANTENNA, "tx_antenna"),
2896+ FIELD(u8, TX_SPE_IDX, "tx_spe_idx"),
2897 FIELD(u32, FREQ_OFFSET, "freq_offset"),
2898+ FIELD(u8, AID, "aid"),
2899+ FIELD(u8, RU_ALLOC, "ru_alloc"),
2900+ FIELD(u8, RU_IDX, "ru_idx"),
2901+ FIELD_MAC(MAC_ADDRS, "mac_addrs"),
2902 FIELD_NESTED_RO(STATS, stats, "",
2903 .print_extra = print_extra_stats),
2904 };
2905@@ -322,9 +395,16 @@ static struct nla_policy testdata_policy[NUM_MT76_TM_ATTRS] = {
2906 [MT76_TM_ATTR_TX_RATE_LDPC] = { .type = NLA_U8 },
2907 [MT76_TM_ATTR_TX_RATE_STBC] = { .type = NLA_U8 },
2908 [MT76_TM_ATTR_TX_LTF] = { .type = NLA_U8 },
2909+ [MT76_TM_ATTR_TX_DUTY_CYCLE] = { .type = NLA_U8 },
2910+ [MT76_TM_ATTR_TX_IPG] = { .type = NLA_U32 },
2911+ [MT76_TM_ATTR_TX_TIME] = { .type = NLA_U32 },
2912 [MT76_TM_ATTR_TX_POWER_CONTROL] = { .type = NLA_U8 },
2913 [MT76_TM_ATTR_TX_ANTENNA] = { .type = NLA_U8 },
2914+ [MT76_TM_ATTR_TX_SPE_IDX] = { .type = NLA_U8 },
2915 [MT76_TM_ATTR_FREQ_OFFSET] = { .type = NLA_U32 },
2916+ [MT76_TM_ATTR_AID] = { .type = NLA_U8 },
2917+ [MT76_TM_ATTR_RU_ALLOC] = { .type = NLA_U8 },
2918+ [MT76_TM_ATTR_RU_IDX] = { .type = NLA_U8 },
2919 [MT76_TM_ATTR_STATS] = { .type = NLA_NESTED },
2920 };
2921
2922diff --git a/tx.c b/tx.c
developer60a3d662023-02-07 15:24:34 +08002923index 1f309d05..6d55566f 100644
developer6caa5e22022-06-16 13:33:13 +08002924--- a/tx.c
2925+++ b/tx.c
developer17bb0a82022-12-13 15:52:04 +08002926@@ -250,8 +250,7 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *
developer6caa5e22022-06-16 13:33:13 +08002927 if (mt76_is_testmode_skb(dev, skb, &hw)) {
2928 struct mt76_phy *phy = hw->priv;
2929
2930- if (skb == phy->test.tx_skb)
2931- phy->test.tx_done++;
2932+ phy->test.tx_done++;
2933 if (phy->test.tx_queued == phy->test.tx_done)
2934 wake_up(&dev->tx_wait);
2935
2936--
developer3f784572023-01-31 15:21:28 +080029372.18.0
developer6caa5e22022-06-16 13:33:13 +08002938