blob: 7902ebcd4eba8cc5af998353bcdfd9e19f565b16 [file] [log] [blame]
developer692ed9b2023-06-19 12:03:50 +08001From c311d022536c9c6be72f8e4a134f9dbaed13cd6d Mon Sep 17 00:00:00 2001
developer6caa5e22022-06-16 13:33:13 +08002From: Shayne Chen <shayne.chen@mediatek.com>
3Date: Thu, 21 Apr 2022 15:43:19 +0800
developer692ed9b2023-06-19 12:03:50 +08004Subject: [PATCH 1010/1014] wifi: mt76: testmode: additional supports
developer6caa5e22022-06-16 13:33:13 +08005
6Signed-off-by: Shayne Chen <shayne.chen@mediatek.com>
developerbb6ddff2023-03-08 17:22:32 +08007Signed-off-by: StanleyYP Wang <StanleyYP.Wang@mediatek.com>
developer6caa5e22022-06-16 13:33:13 +08008---
developerf7a3ca32022-09-01 14:44:55 +08009 dma.c | 3 +-
10 mac80211.c | 12 +
developer692ed9b2023-06-19 12:03:50 +080011 mt76.h | 108 +++-
developerf7a3ca32022-09-01 14:44:55 +080012 mt76_connac_mcu.c | 4 +
13 mt76_connac_mcu.h | 2 +
developerc9333e12023-04-06 18:07:42 +080014 mt7915/eeprom.c | 2 +-
developerf7a3ca32022-09-01 14:44:55 +080015 mt7915/init.c | 2 +-
developer3f784572023-01-31 15:21:28 +080016 mt7915/mac.c | 39 +-
developerf7a3ca32022-09-01 14:44:55 +080017 mt7915/main.c | 2 +-
developer692ed9b2023-06-19 12:03:50 +080018 mt7915/mcu.c | 22 +-
developer1475cf22023-05-05 13:45:43 +080019 mt7915/mcu.h | 29 +-
developerf7a3ca32022-09-01 14:44:55 +080020 mt7915/mmio.c | 2 +
developerc9333e12023-04-06 18:07:42 +080021 mt7915/mt7915.h | 16 +-
developerf7a3ca32022-09-01 14:44:55 +080022 mt7915/regs.h | 3 +
developer692ed9b2023-06-19 12:03:50 +080023 mt7915/testmode.c | 1221 ++++++++++++++++++++++++++++++++++++++++++---
developerf7a3ca32022-09-01 14:44:55 +080024 mt7915/testmode.h | 278 +++++++++++
developer692ed9b2023-06-19 12:03:50 +080025 testmode.c | 282 +++++++++--
developerf7a3ca32022-09-01 14:44:55 +080026 testmode.h | 75 +++
developer15c355d2023-03-21 17:28:34 +080027 tools/fields.c | 84 +++-
developerf7a3ca32022-09-01 14:44:55 +080028 tx.c | 3 +-
developer692ed9b2023-06-19 12:03:50 +080029 20 files changed, 2023 insertions(+), 166 deletions(-)
developer6caa5e22022-06-16 13:33:13 +080030
31diff --git a/dma.c b/dma.c
developer7af0f762023-05-22 15:16:16 +080032index c9d2671..fc92e39 100644
developer6caa5e22022-06-16 13:33:13 +080033--- a/dma.c
34+++ b/dma.c
developer8effbd32023-04-17 15:57:28 +080035@@ -574,8 +574,7 @@ free:
developer6caa5e22022-06-16 13:33:13 +080036 if (mt76_is_testmode_skb(dev, skb, &hw)) {
37 struct mt76_phy *phy = hw->priv;
38
39- if (tx_info.skb == phy->test.tx_skb)
40- phy->test.tx_done--;
41+ phy->test.tx_done--;
42 }
43 #endif
44
45diff --git a/mac80211.c b/mac80211.c
developer7af0f762023-05-22 15:16:16 +080046index 115bb05..75e2ffe 100644
developer6caa5e22022-06-16 13:33:13 +080047--- a/mac80211.c
48+++ b/mac80211.c
developer8effbd32023-04-17 15:57:28 +080049@@ -55,6 +55,13 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = {
developer6caa5e22022-06-16 13:33:13 +080050 CHAN5G(60, 5300),
51 CHAN5G(64, 5320),
52
53+ CHAN5G(68, 5340),
54+ CHAN5G(80, 5400),
55+ CHAN5G(84, 5420),
56+ CHAN5G(88, 5440),
57+ CHAN5G(92, 5460),
58+ CHAN5G(96, 5480),
59+
60 CHAN5G(100, 5500),
61 CHAN5G(104, 5520),
62 CHAN5G(108, 5540),
developer8effbd32023-04-17 15:57:28 +080063@@ -75,6 +82,11 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = {
developer6caa5e22022-06-16 13:33:13 +080064 CHAN5G(165, 5825),
65 CHAN5G(169, 5845),
66 CHAN5G(173, 5865),
67+
68+ CHAN5G(184, 4920),
69+ CHAN5G(188, 4940),
70+ CHAN5G(192, 4960),
71+ CHAN5G(196, 4980),
72 };
73
74 static const struct ieee80211_channel mt76_channels_6ghz[] = {
75diff --git a/mt76.h b/mt76.h
developer7af0f762023-05-22 15:16:16 +080076index 492fe42..3191626 100644
developer6caa5e22022-06-16 13:33:13 +080077--- a/mt76.h
78+++ b/mt76.h
developer8effbd32023-04-17 15:57:28 +080079@@ -646,6 +646,21 @@ struct mt76_testmode_ops {
developer6caa5e22022-06-16 13:33:13 +080080 int (*set_params)(struct mt76_phy *phy, struct nlattr **tb,
81 enum mt76_testmode_state new_state);
82 int (*dump_stats)(struct mt76_phy *phy, struct sk_buff *msg);
83+ int (*set_eeprom)(struct mt76_phy *phy, u32 offset, u8 *val, u8 action);
84+};
85+
86+struct mt76_testmode_entry_data {
87+ struct sk_buff *tx_skb;
88+
89+ u16 tx_mpdu_len;
90+ u8 tx_rate_idx;
91+ u8 tx_rate_nss;
92+ u8 tx_rate_ldpc;
93+
94+ u8 addr[3][ETH_ALEN];
95+ u8 aid;
96+ u8 ru_alloc;
97+ u8 ru_idx;
98 };
99
100 #define MT_TM_FW_RX_COUNT BIT(0)
developer8effbd32023-04-17 15:57:28 +0800101@@ -654,16 +669,11 @@ struct mt76_testmode_data {
developer6caa5e22022-06-16 13:33:13 +0800102 enum mt76_testmode_state state;
103
104 u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)];
105- struct sk_buff *tx_skb;
106
107 u32 tx_count;
108- u16 tx_mpdu_len;
109
110 u8 tx_rate_mode;
111- u8 tx_rate_idx;
112- u8 tx_rate_nss;
113 u8 tx_rate_sgi;
114- u8 tx_rate_ldpc;
115 u8 tx_rate_stbc;
116 u8 tx_ltf;
117
developer8effbd32023-04-17 15:57:28 +0800118@@ -679,10 +689,37 @@ struct mt76_testmode_data {
developer6caa5e22022-06-16 13:33:13 +0800119 u8 tx_power[4];
120 u8 tx_power_control;
121
122- u8 addr[3][ETH_ALEN];
123+ struct list_head tm_entry_list;
124+ struct mt76_wcid *cur_entry;
125+ u8 entry_num;
126+ union {
127+ struct mt76_testmode_entry_data ed;
128+ struct {
129+ /* must be the same as mt76_testmode_entry_data */
130+ struct sk_buff *tx_skb;
131+
132+ u16 tx_mpdu_len;
133+ u8 tx_rate_idx;
134+ u8 tx_rate_nss;
135+ u8 tx_rate_ldpc;
136+
137+ u8 addr[3][ETH_ALEN];
138+ u8 aid;
139+ u8 ru_alloc;
140+ u8 ru_idx;
141+ };
142+ };
143
144 u8 flag;
145
146+ struct {
147+ u8 type;
148+ u8 enable;
149+ } cfg;
150+
151+ u8 txbf_act;
152+ u16 txbf_param[8];
153+
154 u32 tx_pending;
155 u32 tx_queued;
156 u16 tx_queued_limit;
developer7af0f762023-05-22 15:16:16 +0800157@@ -1147,6 +1184,59 @@ static inline bool mt76_testmode_enabled(struct mt76_phy *phy)
developer6caa5e22022-06-16 13:33:13 +0800158 #endif
159 }
160
161+#ifdef CONFIG_NL80211_TESTMODE
162+static inline struct mt76_wcid *
163+mt76_testmode_first_entry(struct mt76_phy *phy)
164+{
165+ if (list_empty(&phy->test.tm_entry_list) && !phy->test.aid)
166+ return &phy->dev->global_wcid;
167+
168+ return list_first_entry(&phy->test.tm_entry_list,
169+ typeof(struct mt76_wcid),
170+ list);
171+}
172+
173+static inline struct mt76_testmode_entry_data *
174+mt76_testmode_entry_data(struct mt76_phy *phy, struct mt76_wcid *wcid)
175+{
176+ if (!wcid)
177+ return NULL;
178+ if (wcid == &phy->dev->global_wcid)
179+ return &phy->test.ed;
180+
181+ return (struct mt76_testmode_entry_data *)((u8 *)wcid +
182+ phy->hw->sta_data_size);
183+}
184+
185+#define mt76_tm_for_each_entry(phy, wcid, ed) \
186+ for (wcid = mt76_testmode_first_entry(phy), \
187+ ed = mt76_testmode_entry_data(phy, wcid); \
188+ ((phy->test.aid && \
189+ !list_entry_is_head(wcid, &phy->test.tm_entry_list, list)) || \
190+ (!phy->test.aid && wcid == &phy->dev->global_wcid)) && ed; \
191+ wcid = list_next_entry(wcid, list), \
192+ ed = mt76_testmode_entry_data(phy, wcid))
193+#endif
194+
195+static inline bool __mt76_is_testmode_skb(struct mt76_phy *phy,
196+ struct sk_buff *skb)
197+{
198+#ifdef CONFIG_NL80211_TESTMODE
199+ struct mt76_testmode_entry_data *ed = &phy->test.ed;
200+ struct mt76_wcid *wcid;
201+
202+ if (skb == ed->tx_skb)
203+ return true;
204+
205+ mt76_tm_for_each_entry(phy, wcid, ed)
206+ if (skb == ed->tx_skb)
207+ return true;
208+ return false;
209+#else
210+ return false;
211+#endif
212+}
213+
214 static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
215 struct sk_buff *skb,
216 struct ieee80211_hw **hw)
developer7af0f762023-05-22 15:16:16 +0800217@@ -1157,7 +1247,8 @@ static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
developerf7a3ca32022-09-01 14:44:55 +0800218 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
219 struct mt76_phy *phy = dev->phys[i];
220
221- if (phy && skb == phy->test.tx_skb) {
222+ if (phy && mt76_testmode_enabled(phy) &&
223+ __mt76_is_testmode_skb(phy, skb)) {
224 *hw = dev->phys[i]->hw;
225 return true;
226 }
developer7af0f762023-05-22 15:16:16 +0800227@@ -1259,7 +1350,8 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer6caa5e22022-06-16 13:33:13 +0800228 int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
229 struct netlink_callback *cb, void *data, int len);
230 int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state);
231-int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len);
232+int mt76_testmode_init_skb(struct mt76_phy *phy, u32 len,
233+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN]);
234
235 static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable)
236 {
237diff --git a/mt76_connac_mcu.c b/mt76_connac_mcu.c
developer7af0f762023-05-22 15:16:16 +0800238index 1cdef36..15ed126 100644
developer6caa5e22022-06-16 13:33:13 +0800239--- a/mt76_connac_mcu.c
240+++ b/mt76_connac_mcu.c
developer7af0f762023-05-22 15:16:16 +0800241@@ -395,6 +395,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
developer6caa5e22022-06-16 13:33:13 +0800242 switch (vif->type) {
243 case NL80211_IFTYPE_MESH_POINT:
244 case NL80211_IFTYPE_AP:
245+ case NL80211_IFTYPE_MONITOR:
developer7af0f762023-05-22 15:16:16 +0800246 if (vif->p2p && !is_mt7921(dev))
developer6caa5e22022-06-16 13:33:13 +0800247 conn_type = CONNECTION_P2P_GC;
248 else
developer8effbd32023-04-17 15:57:28 +0800249@@ -576,6 +577,9 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
developer6caa5e22022-06-16 13:33:13 +0800250 rx->rca2 = 1;
251 rx->rv = 1;
252
253+ if (vif->type == NL80211_IFTYPE_MONITOR)
254+ rx->rca1 = 0;
255+
256 if (!is_connac_v1(dev))
257 return;
258
259diff --git a/mt76_connac_mcu.h b/mt76_connac_mcu.h
developer7af0f762023-05-22 15:16:16 +0800260index d6165a3..79416e6 100644
developer6caa5e22022-06-16 13:33:13 +0800261--- a/mt76_connac_mcu.h
262+++ b/mt76_connac_mcu.h
developer15c355d2023-03-21 17:28:34 +0800263@@ -996,6 +996,7 @@ enum {
developer6caa5e22022-06-16 13:33:13 +0800264 MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13,
265 MCU_EXT_EVENT_THERMAL_PROTECT = 0x22,
266 MCU_EXT_EVENT_ASSERT_DUMP = 0x23,
267+ MCU_EXT_EVENT_BF_STATUS_READ = 0x35,
268 MCU_EXT_EVENT_RDD_REPORT = 0x3a,
269 MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
270 MCU_EXT_EVENT_BCC_NOTIFY = 0x75,
developer15c355d2023-03-21 17:28:34 +0800271@@ -1197,6 +1198,7 @@ enum {
developer6caa5e22022-06-16 13:33:13 +0800272 MCU_EXT_CMD_PHY_STAT_INFO = 0xad,
273 /* for vendor csi and air monitor */
274 MCU_EXT_CMD_SMESH_CTRL = 0xae,
275+ MCU_EXT_CMD_RX_STAT_USER_CTRL = 0xb3,
276 MCU_EXT_CMD_CERT_CFG = 0xb7,
277 MCU_EXT_CMD_CSI_CTRL = 0xc2,
278 };
developerc9333e12023-04-06 18:07:42 +0800279diff --git a/mt7915/eeprom.c b/mt7915/eeprom.c
developer7af0f762023-05-22 15:16:16 +0800280index 76be730..f5ab331 100644
developerc9333e12023-04-06 18:07:42 +0800281--- a/mt7915/eeprom.c
282+++ b/mt7915/eeprom.c
developer7af0f762023-05-22 15:16:16 +0800283@@ -131,7 +131,7 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
developerc9333e12023-04-06 18:07:42 +0800284 /* read eeprom data from efuse */
285 block_num = DIV_ROUND_UP(eeprom_size, eeprom_blk_size);
286 for (i = 0; i < block_num; i++) {
287- ret = mt7915_mcu_get_eeprom(dev, i * eeprom_blk_size);
288+ ret = mt7915_mcu_get_eeprom(dev, i * eeprom_blk_size, NULL);
289 if (ret < 0)
290 return ret;
291 }
developer6caa5e22022-06-16 13:33:13 +0800292diff --git a/mt7915/init.c b/mt7915/init.c
developer7af0f762023-05-22 15:16:16 +0800293index 7250998..8eacf34 100644
developer6caa5e22022-06-16 13:33:13 +0800294--- a/mt7915/init.c
295+++ b/mt7915/init.c
developer7af0f762023-05-22 15:16:16 +0800296@@ -701,7 +701,7 @@ static void mt7915_init_work(struct work_struct *work)
developer6caa5e22022-06-16 13:33:13 +0800297 struct mt7915_dev *dev = container_of(work, struct mt7915_dev,
298 init_work);
299
300- mt7915_mcu_set_eeprom(dev);
301+ mt7915_mcu_set_eeprom(dev, dev->flash_mode);
302 mt7915_mac_init(dev);
303 mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband);
304 mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband);
305diff --git a/mt7915/mac.c b/mt7915/mac.c
developer7af0f762023-05-22 15:16:16 +0800306index 25acb97..ee33850 100644
developer6caa5e22022-06-16 13:33:13 +0800307--- a/mt7915/mac.c
308+++ b/mt7915/mac.c
developer7af0f762023-05-22 15:16:16 +0800309@@ -603,16 +603,38 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
developer6caa5e22022-06-16 13:33:13 +0800310 {
311 #ifdef CONFIG_NL80211_TESTMODE
312 struct mt76_testmode_data *td = &phy->mt76->test;
313+ struct mt76_testmode_entry_data *ed;
314+ struct mt76_wcid *wcid;
315 const struct ieee80211_rate *r;
316- u8 bw, mode, nss = td->tx_rate_nss;
317- u8 rate_idx = td->tx_rate_idx;
318+ u8 bw, mode, nss, rate_idx, ldpc;
319 u16 rateval = 0;
320 u32 val;
321 bool cck = false;
322 int band;
323
324- if (skb != phy->mt76->test.tx_skb)
325+ txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
326+ txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
327+ phy->test.spe_idx));
328+
329+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU) {
330+ txwi[1] |= cpu_to_le32(BIT(18));
331+ txwi[2] = 0;
332+ txwi[3] &= ~cpu_to_le32(MT_TXD3_NO_ACK);
333+ le32p_replace_bits(&txwi[3], 0x1f, MT_TXD3_REM_TX_COUNT);
334+
developerf7a3ca32022-09-01 14:44:55 +0800335 return;
developer6caa5e22022-06-16 13:33:13 +0800336+ }
337+
338+ mt76_tm_for_each_entry(phy->mt76, wcid, ed)
339+ if (ed->tx_skb == skb)
340+ break;
341+
342+ if (!ed)
developerf7a3ca32022-09-01 14:44:55 +0800343+ return;
344+
developer6caa5e22022-06-16 13:33:13 +0800345+ nss = ed->tx_rate_nss;
346+ rate_idx = ed->tx_rate_idx;
347+ ldpc = ed->tx_rate_ldpc;
developerf7a3ca32022-09-01 14:44:55 +0800348
developer6caa5e22022-06-16 13:33:13 +0800349 switch (td->tx_rate_mode) {
350 case MT76_TM_TX_MODE_HT:
developer7af0f762023-05-22 15:16:16 +0800351@@ -643,7 +665,7 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
developer3f784572023-01-31 15:21:28 +0800352 rate_idx += 4;
353
354 r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx];
355- val = cck ? r->hw_value_short : r->hw_value;
356+ val = r->hw_value;
357
358 mode = val >> 8;
359 rate_idx = val & 0xff;
developer7af0f762023-05-22 15:16:16 +0800360@@ -702,13 +724,14 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
developer6caa5e22022-06-16 13:33:13 +0800361 if (mode >= MT_PHY_TYPE_HE_SU)
362 val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf);
363
364- if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
365+ if (ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
366 val |= MT_TXD6_LDPC;
367
developerf7a3ca32022-09-01 14:44:55 +0800368 txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
developer6caa5e22022-06-16 13:33:13 +0800369+ if (phy->test.bf_en)
370+ val |= MT_TXD6_TX_IBF | MT_TXD6_TX_EBF;
371+
372 txwi[6] |= cpu_to_le32(val);
373- txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
374- phy->test.spe_idx));
375 #endif
376 }
377
developer7af0f762023-05-22 15:16:16 +0800378@@ -1469,7 +1492,7 @@ mt7915_mac_restart(struct mt7915_dev *dev)
developer047bc182022-11-16 12:20:48 +0800379 goto out;
380
381 /* set the necessary init items */
382- ret = mt7915_mcu_set_eeprom(dev);
383+ ret = mt7915_mcu_set_eeprom(dev, dev->flash_mode);
384 if (ret)
385 goto out;
386
developer6caa5e22022-06-16 13:33:13 +0800387diff --git a/mt7915/main.c b/mt7915/main.c
developer7af0f762023-05-22 15:16:16 +0800388index e7523c1..798eaa8 100644
developer6caa5e22022-06-16 13:33:13 +0800389--- a/mt7915/main.c
390+++ b/mt7915/main.c
developerc5ce7502022-12-19 11:33:22 +0800391@@ -238,7 +238,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
developer6caa5e22022-06-16 13:33:13 +0800392 mvif->phy = phy;
developer17bb0a82022-12-13 15:52:04 +0800393 mvif->mt76.band_idx = phy->mt76->band_idx;
developer6caa5e22022-06-16 13:33:13 +0800394
395- mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
396+ mvif->mt76.wmm_idx = (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_MONITOR);
397 if (ext_phy)
398 mvif->mt76.wmm_idx += 2;
399
400diff --git a/mt7915/mcu.c b/mt7915/mcu.c
developer692ed9b2023-06-19 12:03:50 +0800401index 5a68bb7..5cea513 100644
developer6caa5e22022-06-16 13:33:13 +0800402--- a/mt7915/mcu.c
403+++ b/mt7915/mcu.c
developer7af0f762023-05-22 15:16:16 +0800404@@ -387,6 +387,11 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
developer6caa5e22022-06-16 13:33:13 +0800405 case MCU_EXT_EVENT_BCC_NOTIFY:
406 mt7915_mcu_rx_bcc_notify(dev, skb);
407 break;
408+#ifdef CONFIG_NL80211_TESTMODE
409+ case MCU_EXT_EVENT_BF_STATUS_READ:
410+ mt7915_tm_txbf_status_read(dev, skb);
411+ break;
412+#endif
413 default:
414 break;
415 }
developer7af0f762023-05-22 15:16:16 +0800416@@ -418,6 +423,7 @@ void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb)
developer6caa5e22022-06-16 13:33:13 +0800417 rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP ||
418 rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC ||
419 rxd->ext_eid == MCU_EXT_EVENT_BCC_NOTIFY ||
420+ rxd->ext_eid == MCU_EXT_EVENT_BF_STATUS_READ ||
421 !rxd->seq)
422 mt7915_mcu_rx_unsolicited_event(dev, skb);
423 else
developer692ed9b2023-06-19 12:03:50 +0800424@@ -2713,7 +2719,8 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
425 }
426 #endif
427
428- if (mt76_connac_spe_idx(phy->mt76->antenna_mask))
429+ if (mt76_connac_spe_idx(phy->mt76->antenna_mask) &&
430+ !mt76_testmode_enabled(phy->mt76))
431 req.tx_path_num = fls(phy->mt76->antenna_mask);
432
433 if (dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
434@@ -2781,21 +2788,21 @@ static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev)
developer6caa5e22022-06-16 13:33:13 +0800435 return 0;
436 }
437
438-int mt7915_mcu_set_eeprom(struct mt7915_dev *dev)
439+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev, bool flash_mode)
440 {
441 struct mt7915_mcu_eeprom req = {
442 .buffer_mode = EE_MODE_EFUSE,
443 .format = EE_FORMAT_WHOLE,
444 };
445
446- if (dev->flash_mode)
447+ if (flash_mode)
448 return mt7915_mcu_set_eeprom_flash(dev);
449
450 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_BUFFER_MODE),
developerc9333e12023-04-06 18:07:42 +0800451 &req, sizeof(req), true);
452 }
453
454-int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
455+int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset, u8 *read_buf)
456 {
457 struct mt7915_mcu_eeprom_info req = {
458 .addr = cpu_to_le32(round_down(offset,
developer692ed9b2023-06-19 12:03:50 +0800459@@ -2804,7 +2811,7 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
developerc9333e12023-04-06 18:07:42 +0800460 struct mt7915_mcu_eeprom_info *res;
461 struct sk_buff *skb;
462 int ret;
463- u8 *buf;
464+ u8 *buf = read_buf;
465
466 ret = mt76_mcu_send_and_get_msg(&dev->mt76,
467 MCU_EXT_QUERY(EFUSE_ACCESS),
developer692ed9b2023-06-19 12:03:50 +0800468@@ -2813,8 +2820,11 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
developerc9333e12023-04-06 18:07:42 +0800469 return ret;
470
471 res = (struct mt7915_mcu_eeprom_info *)skb->data;
472- buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr);
473+
474+ if (!buf)
475+ buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr);
476 memcpy(buf, res->data, MT7915_EEPROM_BLOCK_SIZE);
477+
478 dev_kfree_skb(skb);
479
480 return 0;
developer6caa5e22022-06-16 13:33:13 +0800481diff --git a/mt7915/mcu.h b/mt7915/mcu.h
developer1475cf22023-05-05 13:45:43 +0800482index 1be6cf3..0020025 100644
developer6caa5e22022-06-16 13:33:13 +0800483--- a/mt7915/mcu.h
484+++ b/mt7915/mcu.h
developer7c3a5082022-06-24 13:40:42 +0800485@@ -8,10 +8,15 @@
developer6caa5e22022-06-16 13:33:13 +0800486
487 enum {
488 MCU_ATE_SET_TRX = 0x1,
489+ MCU_ATE_SET_TSSI = 0x5,
490+ MCU_ATE_SET_DPD = 0x6,
491+ MCU_ATE_SET_RATE_POWER_OFFSET = 0x7,
492+ MCU_ATE_SET_THERMAL_COMP = 0x8,
493 MCU_ATE_SET_FREQ_OFFSET = 0xa,
494 MCU_ATE_SET_PHY_COUNT = 0x11,
495 MCU_ATE_SET_SLOT_TIME = 0x13,
496 MCU_ATE_CLEAN_TXQUEUE = 0x1c,
497+ MCU_ATE_SET_MU_RX_AID = 0x1e,
498 };
499
developer7c3a5082022-06-24 13:40:42 +0800500 struct mt7915_mcu_thermal_ctrl {
developer1475cf22023-05-05 13:45:43 +0800501@@ -527,6 +532,12 @@ enum {
developer6caa5e22022-06-16 13:33:13 +0800502
503 enum {
504 MT_BF_SOUNDING_ON = 1,
505+ MT_BF_DATA_PACKET_APPLY = 2,
506+ MT_BF_PFMU_TAG_READ = 5,
507+ MT_BF_PFMU_TAG_WRITE = 6,
508+ MT_BF_PHASE_CAL = 14,
509+ MT_BF_IBF_PHASE_COMP = 15,
510+ MT_BF_PROFILE_WRITE_ALL = 17,
511 MT_BF_TYPE_UPDATE = 20,
512 MT_BF_MODULE_UPDATE = 25
513 };
developer1475cf22023-05-05 13:45:43 +0800514@@ -775,10 +786,20 @@ struct mt7915_muru {
developer6caa5e22022-06-16 13:33:13 +0800515 #define MURU_OFDMA_SCH_TYPE_UL BIT(1)
516
developer7c3a5082022-06-24 13:40:42 +0800517 /* Common Config */
developer6caa5e22022-06-16 13:33:13 +0800518-#define MURU_COMM_PPDU_FMT BIT(0)
519-#define MURU_COMM_SCH_TYPE BIT(1)
520-#define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_SCH_TYPE)
developer6caa5e22022-06-16 13:33:13 +0800521-/* DL&UL User config*/
developer6caa5e22022-06-16 13:33:13 +0800522+/* #define MURU_COMM_PPDU_FMT BIT(0) */
523+/* #define MURU_COMM_SCH_TYPE BIT(1) */
524+/* #define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_SCH_TYPE) */
developer7c3a5082022-06-24 13:40:42 +0800525+#define MURU_COMM_PPDU_FMT BIT(0)
526+#define MURU_COMM_SCH_TYPE BIT(1)
527+#define MURU_COMM_BAND BIT(2)
528+#define MURU_COMM_WMM BIT(3)
529+#define MURU_COMM_SPE_IDX BIT(4)
530+#define MURU_COMM_PROC_TYPE BIT(5)
developer1475cf22023-05-05 13:45:43 +0800531+#define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_SCH_TYPE)
532+#define MURU_COMM_SET_TM (MURU_COMM_PPDU_FMT | MURU_COMM_BAND | \
533+ MURU_COMM_WMM | MURU_COMM_SPE_IDX)
developer6caa5e22022-06-16 13:33:13 +0800534+
535+/* DL&UL User config */
536 #define MURU_USER_CNT BIT(4)
537
538 enum {
539diff --git a/mt7915/mmio.c b/mt7915/mmio.c
developer7af0f762023-05-22 15:16:16 +0800540index b97aca7..fd5722b 100644
developer6caa5e22022-06-16 13:33:13 +0800541--- a/mt7915/mmio.c
542+++ b/mt7915/mmio.c
developer60a3d662023-02-07 15:24:34 +0800543@@ -134,6 +134,7 @@ static const u32 mt7915_offs[] = {
developer6caa5e22022-06-16 13:33:13 +0800544 [ARB_DRNGR0] = 0x194,
545 [ARB_SCR] = 0x080,
546 [RMAC_MIB_AIRTIME14] = 0x3b8,
547+ [AGG_AALCR0] = 0x048,
548 [AGG_AWSCR0] = 0x05c,
549 [AGG_PCR0] = 0x06c,
550 [AGG_ACR0] = 0x084,
developer60a3d662023-02-07 15:24:34 +0800551@@ -209,6 +210,7 @@ static const u32 mt7916_offs[] = {
developer6caa5e22022-06-16 13:33:13 +0800552 [ARB_DRNGR0] = 0x1e0,
553 [ARB_SCR] = 0x000,
554 [RMAC_MIB_AIRTIME14] = 0x0398,
555+ [AGG_AALCR0] = 0x028,
556 [AGG_AWSCR0] = 0x030,
557 [AGG_PCR0] = 0x040,
558 [AGG_ACR0] = 0x054,
559diff --git a/mt7915/mt7915.h b/mt7915/mt7915.h
developer7af0f762023-05-22 15:16:16 +0800560index 60364f0..83743cd 100644
developer6caa5e22022-06-16 13:33:13 +0800561--- a/mt7915/mt7915.h
562+++ b/mt7915/mt7915.h
developer7af0f762023-05-22 15:16:16 +0800563@@ -321,6 +321,9 @@ struct mt7915_phy {
developer6caa5e22022-06-16 13:33:13 +0800564 u8 last_snr;
565
566 u8 spe_idx;
567+
568+ bool bf_en;
569+ bool bf_ever_en;
570 } test;
571 #endif
572
developer7af0f762023-05-22 15:16:16 +0800573@@ -419,6 +422,14 @@ struct mt7915_dev {
developer6caa5e22022-06-16 13:33:13 +0800574 void __iomem *dcm;
575 void __iomem *sku;
576
577+#ifdef CONFIG_NL80211_TESTMODE
578+ struct {
579+ void *txbf_phase_cal;
580+ void *txbf_pfmu_data;
581+ void *txbf_pfmu_tag;
582+ } test;
583+#endif
584+
585 #ifdef MTK_DEBUG
586 u16 wlan_idx;
587 struct {
developer7af0f762023-05-22 15:16:16 +0800588@@ -591,8 +602,8 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
developer6caa5e22022-06-16 13:33:13 +0800589 struct ieee80211_vif *vif,
590 struct ieee80211_sta *sta,
591 void *data, u32 field);
592-int mt7915_mcu_set_eeprom(struct mt7915_dev *dev);
developerc9333e12023-04-06 18:07:42 +0800593-int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset);
developer6caa5e22022-06-16 13:33:13 +0800594+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev, bool flash_mode);
developerc9333e12023-04-06 18:07:42 +0800595+int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset, u8 *read_buf);
developer6caa5e22022-06-16 13:33:13 +0800596 int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num);
597 int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable,
developerc9333e12023-04-06 18:07:42 +0800598 bool hdr_trans);
developer7af0f762023-05-22 15:16:16 +0800599@@ -630,6 +641,7 @@ int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl);
developer6caa5e22022-06-16 13:33:13 +0800600 int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level);
601 void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb);
602 void mt7915_mcu_exit(struct mt7915_dev *dev);
603+int mt7915_tm_txbf_status_read(struct mt7915_dev *dev, struct sk_buff *skb);
604
605 static inline u16 mt7915_wtbl_size(struct mt7915_dev *dev)
606 {
607diff --git a/mt7915/regs.h b/mt7915/regs.h
developer692ed9b2023-06-19 12:03:50 +0800608index e7bc181..b6f36f5 100644
developer6caa5e22022-06-16 13:33:13 +0800609--- a/mt7915/regs.h
610+++ b/mt7915/regs.h
developer3609d782022-11-29 18:07:22 +0800611@@ -62,6 +62,7 @@ enum offs_rev {
developer6caa5e22022-06-16 13:33:13 +0800612 ARB_DRNGR0,
613 ARB_SCR,
614 RMAC_MIB_AIRTIME14,
615+ AGG_AALCR0,
616 AGG_AWSCR0,
617 AGG_PCR0,
618 AGG_ACR0,
developer3609d782022-11-29 18:07:22 +0800619@@ -482,6 +483,8 @@ enum offs_rev {
developer6caa5e22022-06-16 13:33:13 +0800620 #define MT_WF_AGG_BASE(_band) ((_band) ? 0x820f2000 : 0x820e2000)
621 #define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
622
623+#define MT_AGG_AALCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AALCR0) + \
developer692ed9b2023-06-19 12:03:50 +0800624+ (_n) * 4))
developer6caa5e22022-06-16 13:33:13 +0800625 #define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AWSCR0) + \
626 (_n) * 4))
627 #define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_PCR0) + \
628diff --git a/mt7915/testmode.c b/mt7915/testmode.c
developer692ed9b2023-06-19 12:03:50 +0800629index 4693919..62ef4db 100644
developer6caa5e22022-06-16 13:33:13 +0800630--- a/mt7915/testmode.c
631+++ b/mt7915/testmode.c
632@@ -9,6 +9,9 @@
633 enum {
634 TM_CHANGED_TXPOWER,
635 TM_CHANGED_FREQ_OFFSET,
636+ TM_CHANGED_AID,
637+ TM_CHANGED_CFG,
638+ TM_CHANGED_TXBF_ACT,
639
640 /* must be last */
641 NUM_TM_CHANGED
642@@ -17,6 +20,9 @@ enum {
643 static const u8 tm_change_map[] = {
644 [TM_CHANGED_TXPOWER] = MT76_TM_ATTR_TX_POWER,
645 [TM_CHANGED_FREQ_OFFSET] = MT76_TM_ATTR_FREQ_OFFSET,
646+ [TM_CHANGED_AID] = MT76_TM_ATTR_AID,
647+ [TM_CHANGED_CFG] = MT76_TM_ATTR_CFG,
648+ [TM_CHANGED_TXBF_ACT] = MT76_TM_ATTR_TXBF_ACT,
649 };
650
651 struct reg_band {
developer692ed9b2023-06-19 12:03:50 +0800652@@ -33,6 +39,57 @@ struct reg_band {
developer6caa5e22022-06-16 13:33:13 +0800653 #define TM_REG_MAX_ID 20
654 static struct reg_band reg_backup_list[TM_REG_MAX_ID];
655
656+static void mt7915_tm_update_entry(struct mt7915_phy *phy);
657+
658+static u8 mt7915_tm_chan_bw(enum nl80211_chan_width width)
659+{
660+ static const u8 width_to_bw[] = {
661+ [NL80211_CHAN_WIDTH_40] = TM_CBW_40MHZ,
662+ [NL80211_CHAN_WIDTH_80] = TM_CBW_80MHZ,
663+ [NL80211_CHAN_WIDTH_80P80] = TM_CBW_8080MHZ,
664+ [NL80211_CHAN_WIDTH_160] = TM_CBW_160MHZ,
665+ [NL80211_CHAN_WIDTH_5] = TM_CBW_5MHZ,
666+ [NL80211_CHAN_WIDTH_10] = TM_CBW_10MHZ,
667+ [NL80211_CHAN_WIDTH_20] = TM_CBW_20MHZ,
668+ [NL80211_CHAN_WIDTH_20_NOHT] = TM_CBW_20MHZ,
669+ };
670+
671+ if (width >= ARRAY_SIZE(width_to_bw))
672+ return 0;
673+
674+ return width_to_bw[width];
675+}
676+
developer692ed9b2023-06-19 12:03:50 +0800677+static int
678+mt7915_tm_check_antenna(struct mt7915_phy *phy)
679+{
680+ struct mt76_testmode_data *td = &phy->mt76->test;
681+ struct mt7915_dev *dev = phy->dev;
682+ u8 band_idx = phy->mt76->band_idx;
683+ u32 chainmask = phy->mt76->chainmask;
684+
685+ chainmask = chainmask >> (dev->chainshift * band_idx);
686+ if (td->tx_antenna_mask & ~chainmask) {
687+ dev_err(dev->mt76.dev,
688+ "tx antenna mask %d exceeds hardware limitation (chainmask %d)\n",
689+ td->tx_antenna_mask, chainmask);
690+ return -EINVAL;
691+ }
692+
693+ return 0;
694+}
695+
developer6caa5e22022-06-16 13:33:13 +0800696+static void
697+mt7915_tm_update_channel(struct mt7915_phy *phy)
698+{
699+ mutex_unlock(&phy->dev->mt76.mutex);
700+ mt7915_set_channel(phy);
701+ mutex_lock(&phy->dev->mt76.mutex);
702+
703+ mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
704+
705+ mt7915_tm_update_entry(phy);
706+}
707
708 static int
709 mt7915_tm_set_tx_power(struct mt7915_phy *phy)
developer692ed9b2023-06-19 12:03:50 +0800710@@ -119,18 +176,28 @@ mt7915_tm_set_trx(struct mt7915_phy *phy, int type, bool en)
developer6caa5e22022-06-16 13:33:13 +0800711 }
712
713 static int
714-mt7915_tm_clean_hwq(struct mt7915_phy *phy, u8 wcid)
715+mt7915_tm_clean_hwq(struct mt7915_phy *phy)
716 {
717+ struct mt76_testmode_entry_data *ed;
718+ struct mt76_wcid *wcid;
719 struct mt7915_dev *dev = phy->dev;
720 struct mt7915_tm_cmd req = {
721 .testmode_en = 1,
722 .param_idx = MCU_ATE_CLEAN_TXQUEUE,
723- .param.clean.wcid = wcid,
developer17bb0a82022-12-13 15:52:04 +0800724 .param.clean.band = phy->mt76->band_idx,
developer6caa5e22022-06-16 13:33:13 +0800725 };
726
727- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
728- sizeof(req), false);
729+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
730+ int ret;
731+
732+ req.param.clean.wcid = wcid->idx;
733+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL),
734+ &req, sizeof(req), false);
735+ if (ret)
736+ return ret;
737+ }
738+
739+ return 0;
740 }
741
742 static int
developer692ed9b2023-06-19 12:03:50 +0800743@@ -141,7 +208,7 @@ mt7915_tm_set_phy_count(struct mt7915_phy *phy, u8 control)
developer17bb0a82022-12-13 15:52:04 +0800744 .testmode_en = 1,
745 .param_idx = MCU_ATE_SET_PHY_COUNT,
746 .param.cfg.enable = control,
747- .param.cfg.band = phy != &dev->phy,
748+ .param.cfg.band = phy->mt76->band_idx,
749 };
750
751 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
developer692ed9b2023-06-19 12:03:50 +0800752@@ -182,12 +249,738 @@ mt7915_tm_set_tam_arb(struct mt7915_phy *phy, bool enable, bool mu)
developer6caa5e22022-06-16 13:33:13 +0800753 return mt7915_mcu_set_muru_ctrl(dev, MURU_SET_ARB_OP_MODE, op_mode);
754 }
755
756+static int
757+mt7915_tm_set_cfg(struct mt7915_phy *phy)
758+{
759+ static const u8 cfg_cmd[] = {
760+ [MT76_TM_CFG_TSSI] = MCU_ATE_SET_TSSI,
761+ [MT76_TM_CFG_DPD] = MCU_ATE_SET_DPD,
762+ [MT76_TM_CFG_RATE_POWER_OFFSET] = MCU_ATE_SET_RATE_POWER_OFFSET,
763+ [MT76_TM_CFG_THERMAL_COMP] = MCU_ATE_SET_THERMAL_COMP,
764+ };
765+ struct mt76_testmode_data *td = &phy->mt76->test;
766+ struct mt7915_dev *dev = phy->dev;
767+ struct mt7915_tm_cmd req = {
768+ .testmode_en = !(phy->mt76->test.state == MT76_TM_STATE_OFF),
769+ .param_idx = cfg_cmd[td->cfg.type],
770+ .param.cfg.enable = td->cfg.enable,
developer17bb0a82022-12-13 15:52:04 +0800771+ .param.cfg.band = phy->mt76->band_idx,
developer6caa5e22022-06-16 13:33:13 +0800772+ };
773+
774+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
775+ sizeof(req), false);
776+}
777+
778+static int
779+mt7915_tm_add_txbf(struct mt7915_phy *phy, struct ieee80211_vif *vif,
780+ struct ieee80211_sta *sta, u8 pfmu_idx, u8 nr,
781+ u8 nc, bool ebf)
782+{
783+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
784+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
785+ struct mt7915_dev *dev = phy->dev;
786+ struct sk_buff *skb;
787+ struct sta_rec_bf *bf;
788+ struct tlv *tlv;
789+ u8 ndp_rate;
790+
791+ if (nr == 1)
792+ ndp_rate = 8;
793+ else if (nr == 2)
794+ ndp_rate = 16;
795+ else
796+ ndp_rate = 24;
797+
798+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
799+ &msta->wcid);
800+ if (IS_ERR(skb))
801+ return PTR_ERR(skb);
802+
803+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
804+ bf = (struct sta_rec_bf *)tlv;
805+
806+ bf->pfmu = cpu_to_le16(pfmu_idx);
807+ bf->sounding_phy = 1;
808+ bf->bf_cap = ebf;
809+ bf->ncol = nc;
810+ bf->nrow = nr;
811+ bf->ndp_rate = ndp_rate;
812+ bf->ibf_timeout = 0xff;
813+ bf->tx_mode = MT_PHY_TYPE_HT;
814+
815+ if (ebf) {
816+ bf->mem[0].row = 0;
817+ bf->mem[1].row = 1;
818+ bf->mem[2].row = 2;
819+ bf->mem[3].row = 3;
820+ } else {
821+ bf->mem[0].row = 4;
822+ bf->mem[1].row = 5;
823+ bf->mem[2].row = 6;
824+ bf->mem[3].row = 7;
825+ }
826+
827+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
828+ MCU_EXT_CMD(STA_REC_UPDATE), true);
829+}
830+
831+static int
832+mt7915_tm_entry_add(struct mt7915_phy *phy, u8 aid)
833+{
834+ struct mt76_testmode_data *td = &phy->mt76->test;
835+ struct mt76_testmode_entry_data *ed;
836+ struct ieee80211_sband_iftype_data *sdata;
837+ struct ieee80211_supported_band *sband;
838+ struct ieee80211_sta *sta;
839+ struct mt7915_sta *msta;
840+ int tid, ret;
841+
842+ if (td->entry_num >= MT76_TM_MAX_ENTRY_NUM)
843+ return -EINVAL;
844+
845+ sta = kzalloc(sizeof(*sta) + phy->mt76->hw->sta_data_size +
846+ sizeof(*ed), GFP_KERNEL);
847+ if (!sta)
848+ return -ENOMEM;
849+
850+ msta = (struct mt7915_sta *)sta->drv_priv;
851+ ed = mt76_testmode_entry_data(phy->mt76, &msta->wcid);
852+ memcpy(ed, &td->ed, sizeof(*ed));
853+
854+ if (phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ) {
855+ sband = &phy->mt76->sband_5g.sband;
856+ sdata = phy->iftype[NL80211_BAND_5GHZ];
857+ } else if (phy->mt76->chandef.chan->band == NL80211_BAND_6GHZ) {
858+ sband = &phy->mt76->sband_6g.sband;
859+ sdata = phy->iftype[NL80211_BAND_6GHZ];
860+ } else {
861+ sband = &phy->mt76->sband_2g.sband;
862+ sdata = phy->iftype[NL80211_BAND_2GHZ];
863+ }
864+
865+ memcpy(sta->addr, ed->addr[0], ETH_ALEN);
866+ if (phy->test.bf_en) {
867+ u8 addr[ETH_ALEN] = {0x00, 0x11, 0x11, 0x11, 0x11, 0x11};
868+
869+ memcpy(sta->addr, addr, ETH_ALEN);
870+ }
871+
872+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_HT)
developer17bb0a82022-12-13 15:52:04 +0800873+ memcpy(&sta->deflink.ht_cap, &sband->ht_cap, sizeof(sta->deflink.ht_cap));
developer6caa5e22022-06-16 13:33:13 +0800874+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_VHT)
developer17bb0a82022-12-13 15:52:04 +0800875+ memcpy(&sta->deflink.vht_cap, &sband->vht_cap, sizeof(sta->deflink.vht_cap));
developer6caa5e22022-06-16 13:33:13 +0800876+ if (td->tx_rate_mode >= MT76_TM_TX_MODE_HE_SU)
developer17bb0a82022-12-13 15:52:04 +0800877+ memcpy(&sta->deflink.he_cap, &sdata[NL80211_IFTYPE_STATION].he_cap,
878+ sizeof(sta->deflink.he_cap));
developer6caa5e22022-06-16 13:33:13 +0800879+ sta->aid = aid;
880+ sta->wme = 1;
881+
882+ ret = mt7915_mac_sta_add(&phy->dev->mt76, phy->monitor_vif, sta);
883+ if (ret) {
884+ kfree(sta);
885+ return ret;
886+ }
887+
888+ /* prevent from starting tx ba session */
889+ for (tid = 0; tid < 8; tid++)
890+ set_bit(tid, &msta->ampdu_state);
891+
892+ list_add_tail(&msta->wcid.list, &td->tm_entry_list);
893+ td->entry_num++;
894+
895+ return 0;
896+}
897+
898+static void
899+mt7915_tm_entry_remove(struct mt7915_phy *phy, u8 aid)
900+{
901+ struct mt76_testmode_data *td = &phy->mt76->test;
902+ struct mt76_wcid *wcid, *tmp;
903+
904+ if (list_empty(&td->tm_entry_list))
905+ return;
906+
907+ list_for_each_entry_safe(wcid, tmp, &td->tm_entry_list, list) {
908+ struct mt76_testmode_entry_data *ed;
909+ struct mt7915_dev *dev = phy->dev;
910+ struct ieee80211_sta *sta;
911+
912+ ed = mt76_testmode_entry_data(phy->mt76, wcid);
913+ if (aid && ed->aid != aid)
914+ continue;
915+
916+ sta = wcid_to_sta(wcid);
917+ mt7915_mac_sta_remove(&dev->mt76, phy->monitor_vif, sta);
918+ mt76_wcid_mask_clear(dev->mt76.wcid_mask, wcid->idx);
919+
920+ list_del_init(&wcid->list);
921+ kfree(sta);
922+ phy->mt76->test.entry_num--;
923+ }
924+}
925+
926+static int
927+mt7915_tm_set_entry(struct mt7915_phy *phy)
928+{
929+ struct mt76_testmode_data *td = &phy->mt76->test;
930+ struct mt76_testmode_entry_data *ed;
931+ struct mt76_wcid *wcid;
932+
933+ if (!td->aid) {
934+ if (td->state > MT76_TM_STATE_IDLE)
935+ mt76_testmode_set_state(phy->mt76, MT76_TM_STATE_IDLE);
936+ mt7915_tm_entry_remove(phy, td->aid);
937+ return 0;
938+ }
939+
940+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
941+ if (ed->aid == td->aid) {
942+ struct sk_buff *skb;
943+
944+ local_bh_disable();
945+ skb = ed->tx_skb;
946+ memcpy(ed, &td->ed, sizeof(*ed));
947+ ed->tx_skb = skb;
948+ local_bh_enable();
949+
950+ return 0;
951+ }
952+ }
953+
954+ return mt7915_tm_entry_add(phy, td->aid);
955+}
956+
957+static void
958+mt7915_tm_update_entry(struct mt7915_phy *phy)
959+{
960+ struct mt76_testmode_data *td = &phy->mt76->test;
961+ struct mt76_testmode_entry_data *ed, tmp;
962+ struct mt76_wcid *wcid, *last;
963+
964+ if (!td->aid || phy->test.bf_en)
965+ return;
966+
967+ memcpy(&tmp, &td->ed, sizeof(tmp));
968+ last = list_last_entry(&td->tm_entry_list,
969+ struct mt76_wcid, list);
970+
971+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
972+ memcpy(&td->ed, ed, sizeof(td->ed));
973+ mt7915_tm_entry_remove(phy, td->aid);
974+ mt7915_tm_entry_add(phy, td->aid);
975+ if (wcid == last)
976+ break;
977+ }
978+
979+ memcpy(&td->ed, &tmp, sizeof(td->ed));
980+}
981+
982+static int
983+mt7915_tm_txbf_init(struct mt7915_phy *phy, u16 *val)
984+{
985+ struct mt76_testmode_data *td = &phy->mt76->test;
986+ struct mt7915_dev *dev = phy->dev;
987+ bool enable = val[0];
988+ void *phase_cal, *pfmu_data, *pfmu_tag;
989+ u8 addr[ETH_ALEN] = {0x00, 0x22, 0x22, 0x22, 0x22, 0x22};
990+
991+ if (!enable) {
992+ phy->test.bf_en = 0;
993+ return 0;
994+ }
995+
996+ if (!dev->test.txbf_phase_cal) {
997+ phase_cal = devm_kzalloc(dev->mt76.dev,
998+ sizeof(struct mt7915_tm_txbf_phase) *
999+ MAX_PHASE_GROUP_NUM,
1000+ GFP_KERNEL);
1001+ if (!phase_cal)
1002+ return -ENOMEM;
1003+
1004+ dev->test.txbf_phase_cal = phase_cal;
1005+ }
1006+
1007+ if (!dev->test.txbf_pfmu_data) {
1008+ pfmu_data = devm_kzalloc(dev->mt76.dev, 512, GFP_KERNEL);
1009+ if (!pfmu_data)
1010+ return -ENOMEM;
1011+
1012+ dev->test.txbf_pfmu_data = pfmu_data;
1013+ }
1014+
1015+ if (!dev->test.txbf_pfmu_tag) {
1016+ pfmu_tag = devm_kzalloc(dev->mt76.dev,
1017+ sizeof(struct mt7915_tm_pfmu_tag), GFP_KERNEL);
1018+ if (!pfmu_tag)
1019+ return -ENOMEM;
1020+
1021+ dev->test.txbf_pfmu_tag = pfmu_tag;
1022+ }
1023+
1024+ memcpy(phy->monitor_vif->addr, addr, ETH_ALEN);
1025+ mt7915_mcu_add_dev_info(phy, phy->monitor_vif, true);
1026+
1027+ td->tx_rate_mode = MT76_TM_TX_MODE_HT;
1028+ td->tx_mpdu_len = 1024;
1029+ td->tx_rate_sgi = 0;
1030+ td->tx_ipg = 100;
1031+ phy->test.bf_en = 1;
1032+
1033+ return mt7915_tm_set_trx(phy, TM_MAC_TX, true);
1034+}
1035+
1036+static int
1037+mt7915_tm_txbf_phase_comp(struct mt7915_phy *phy, u16 *val)
1038+{
1039+ struct mt7915_dev *dev = phy->dev;
1040+ struct {
1041+ u8 category;
1042+ u8 wlan_idx_lo;
1043+ u8 bw;
1044+ u8 jp_band;
1045+ u8 dbdc_idx;
1046+ bool read_from_e2p;
1047+ bool disable;
1048+ u8 wlan_idx_hi;
1049+ u8 buf[40];
1050+ } __packed req = {
1051+ .category = MT_BF_IBF_PHASE_COMP,
1052+ .bw = val[0],
1053+ .jp_band = (val[2] == 1) ? 1 : 0,
developer17bb0a82022-12-13 15:52:04 +08001054+ .dbdc_idx = phy->mt76->band_idx,
developer6caa5e22022-06-16 13:33:13 +08001055+ .read_from_e2p = val[3],
1056+ .disable = val[4],
1057+ };
1058+ struct mt7915_tm_txbf_phase *phase =
1059+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1060+
1061+ wait_event_timeout(dev->mt76.tx_wait, phase[val[2]].status != 0, HZ);
1062+ memcpy(req.buf, &phase[val[2]].phase, sizeof(req.buf));
1063+
1064+ pr_info("ibf cal process: phase comp info\n");
1065+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1,
1066+ &req, sizeof(req), 0);
1067+
1068+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1069+ sizeof(req), true);
1070+}
1071+
1072+static int
1073+mt7915_tm_txbf_profile_tag_read(struct mt7915_phy *phy, u8 pfmu_idx)
1074+{
1075+ struct mt7915_dev *dev = phy->dev;
1076+ struct {
1077+ u8 format_id;
1078+ u8 pfmu_idx;
1079+ bool bfer;
1080+ u8 dbdc_idx;
1081+ } __packed req = {
1082+ .format_id = MT_BF_PFMU_TAG_READ,
1083+ .pfmu_idx = pfmu_idx,
1084+ .bfer = 1,
1085+ .dbdc_idx = phy != &dev->phy,
1086+ };
1087+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
1088+
1089+ tag->t1.pfmu_idx = 0;
1090+
1091+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1092+ sizeof(req), true);
1093+}
1094+
1095+static int
1096+mt7915_tm_txbf_profile_tag_write(struct mt7915_phy *phy, u8 pfmu_idx,
1097+ struct mt7915_tm_pfmu_tag *tag)
1098+{
1099+ struct mt7915_dev *dev = phy->dev;
1100+ struct {
1101+ u8 format_id;
1102+ u8 pfmu_idx;
1103+ bool bfer;
1104+ u8 dbdc_idx;
1105+ u8 buf[64];
1106+ } __packed req = {
1107+ .format_id = MT_BF_PFMU_TAG_WRITE,
1108+ .pfmu_idx = pfmu_idx,
1109+ .bfer = 1,
1110+ .dbdc_idx = phy != &dev->phy,
1111+ };
1112+
1113+ memcpy(req.buf, tag, sizeof(*tag));
1114+ wait_event_timeout(dev->mt76.tx_wait, tag->t1.pfmu_idx != 0, HZ);
1115+
1116+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1117+ sizeof(req), false);
1118+}
1119+
1120+static int
1121+mt7915_tm_txbf_apply_tx(struct mt7915_phy *phy, u16 wlan_idx, bool ebf,
1122+ bool ibf, bool phase_cal)
1123+{
1124+#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
1125+#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
1126+ struct mt7915_dev *dev = phy->dev;
1127+ struct {
1128+ u8 category;
1129+ u8 wlan_idx_lo;
1130+ bool ebf;
1131+ bool ibf;
1132+ bool mu_txbf;
1133+ bool phase_cal;
1134+ u8 wlan_idx_hi;
1135+ u8 _rsv;
1136+ } __packed req = {
1137+ .category = MT_BF_DATA_PACKET_APPLY,
1138+ .wlan_idx_lo = to_wcid_lo(wlan_idx),
1139+ .ebf = ebf,
1140+ .ibf = ibf,
1141+ .phase_cal = phase_cal,
1142+ .wlan_idx_hi = to_wcid_hi(wlan_idx),
1143+ };
1144+
1145+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1146+ sizeof(req), false);
1147+}
1148+
1149+static int mt7915_tm_txbf_set_rate(struct mt7915_phy *phy,
1150+ struct mt76_wcid *wcid)
1151+{
1152+ struct mt7915_dev *dev = phy->dev;
1153+ struct mt76_testmode_entry_data *ed = mt76_testmode_entry_data(phy->mt76, wcid);
1154+ struct ieee80211_sta *sta = wcid_to_sta(wcid);
1155+ struct sta_phy rate = {};
1156+
1157+ if (!sta)
1158+ return 0;
1159+
1160+ rate.type = MT_PHY_TYPE_HT;
1161+ rate.bw = mt7915_tm_chan_bw(phy->mt76->chandef.width);
1162+ rate.nss = ed->tx_rate_nss;
1163+ rate.mcs = ed->tx_rate_idx;
1164+ rate.ldpc = (rate.bw || ed->tx_rate_ldpc) * GENMASK(2, 0);
1165+
1166+ return mt7915_mcu_set_fixed_rate_ctrl(dev, phy->monitor_vif, sta,
1167+ &rate, RATE_PARAM_FIXED);
1168+}
1169+
1170+static int
1171+mt7915_tm_txbf_set_tx(struct mt7915_phy *phy, u16 *val)
1172+{
1173+ bool bf_on = val[0], update = val[3];
1174+ /* u16 wlan_idx = val[2]; */
1175+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
1176+ struct mt76_testmode_data *td = &phy->mt76->test;
1177+ struct mt76_wcid *wcid;
1178+
1179+ if (bf_on) {
1180+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
1181+ mt7915_tm_txbf_profile_tag_read(phy, 2);
1182+ tag->t1.invalid_prof = false;
1183+ mt7915_tm_txbf_profile_tag_write(phy, 2, tag);
1184+
1185+ phy->test.bf_ever_en = true;
1186+
1187+ if (update)
1188+ mt7915_tm_txbf_apply_tx(phy, 1, 0, 1, 1);
1189+ } else {
1190+ if (!phy->test.bf_ever_en) {
1191+ if (update)
1192+ mt7915_tm_txbf_apply_tx(phy, 1, 0, 0, 0);
1193+ } else {
1194+ phy->test.bf_ever_en = false;
1195+
1196+ mt7915_tm_txbf_profile_tag_read(phy, 2);
1197+ tag->t1.invalid_prof = true;
1198+ mt7915_tm_txbf_profile_tag_write(phy, 2, tag);
1199+ }
1200+ }
1201+
1202+ wcid = list_first_entry(&td->tm_entry_list, struct mt76_wcid, list);
1203+ mt7915_tm_txbf_set_rate(phy, wcid);
1204+
1205+ return 0;
1206+}
1207+
1208+static int
1209+mt7915_tm_txbf_profile_update(struct mt7915_phy *phy, u16 *val, bool ebf)
1210+{
1211+ static const u8 mode_to_lm[] = {
1212+ [MT76_TM_TX_MODE_CCK] = 0,
1213+ [MT76_TM_TX_MODE_OFDM] = 0,
1214+ [MT76_TM_TX_MODE_HT] = 1,
1215+ [MT76_TM_TX_MODE_VHT] = 2,
1216+ [MT76_TM_TX_MODE_HE_SU] = 3,
1217+ [MT76_TM_TX_MODE_HE_EXT_SU] = 3,
1218+ [MT76_TM_TX_MODE_HE_TB] = 3,
1219+ [MT76_TM_TX_MODE_HE_MU] = 3,
1220+ };
1221+ struct mt76_testmode_data *td = &phy->mt76->test;
1222+ struct mt76_wcid *wcid;
1223+ struct ieee80211_vif *vif = phy->monitor_vif;
1224+ struct mt7915_tm_pfmu_tag *tag = phy->dev->test.txbf_pfmu_tag;
1225+ u8 pfmu_idx = val[0], nc = val[2], nr;
1226+ int ret;
1227+
1228+ if (td->tx_antenna_mask == 3)
1229+ nr = 1;
1230+ else if (td->tx_antenna_mask == 7)
1231+ nr = 2;
1232+ else
1233+ nr = 3;
1234+
1235+ memset(tag, 0, sizeof(*tag));
1236+ tag->t1.pfmu_idx = pfmu_idx;
1237+ tag->t1.ebf = ebf;
1238+ tag->t1.nr = nr;
1239+ tag->t1.nc = nc;
1240+ tag->t1.invalid_prof = true;
1241+
1242+ tag->t1.snr_sts4 = 0xc0;
1243+ tag->t1.snr_sts5 = 0xff;
1244+ tag->t1.snr_sts6 = 0xff;
1245+ tag->t1.snr_sts7 = 0xff;
1246+
1247+ if (ebf) {
1248+ tag->t1.row_id1 = 0;
1249+ tag->t1.row_id2 = 1;
1250+ tag->t1.row_id3 = 2;
1251+ tag->t1.row_id4 = 3;
1252+ tag->t1.lm = mode_to_lm[MT76_TM_TX_MODE_HT];
1253+ } else {
1254+ tag->t1.row_id1 = 4;
1255+ tag->t1.row_id2 = 5;
1256+ tag->t1.row_id3 = 6;
1257+ tag->t1.row_id4 = 7;
1258+ tag->t1.lm = mode_to_lm[MT76_TM_TX_MODE_OFDM];
1259+
1260+ tag->t2.ibf_timeout = 0xff;
1261+ tag->t2.ibf_nr = nr;
1262+ }
1263+
1264+ ret = mt7915_tm_txbf_profile_tag_write(phy, pfmu_idx, tag);
1265+ if (ret)
1266+ return ret;
1267+
1268+ wcid = list_first_entry(&td->tm_entry_list, struct mt76_wcid, list);
1269+ ret = mt7915_tm_add_txbf(phy, vif, wcid_to_sta(wcid), pfmu_idx, nr, nc, ebf);
1270+ if (ret)
1271+ return ret;
1272+
1273+ if (!ebf)
1274+ return mt7915_tm_txbf_apply_tx(phy, 1, false, true, true);
1275+
1276+ return 0;
1277+}
1278+
1279+static int
1280+mt7915_tm_txbf_phase_cal(struct mt7915_phy *phy, u16 *val)
1281+{
1282+#define GROUP_L 0
1283+#define GROUP_M 1
1284+#define GROUP_H 2
1285+ struct mt7915_dev *dev = phy->dev;
1286+ struct {
1287+ u8 category;
1288+ u8 group_l_m_n;
1289+ u8 group;
1290+ bool sx2;
1291+ u8 cal_type;
1292+ u8 lna_gain_level;
1293+ u8 _rsv[2];
1294+ } __packed req = {
1295+ .category = MT_BF_PHASE_CAL,
1296+ .group = val[0],
1297+ .group_l_m_n = val[1],
1298+ .sx2 = val[2],
1299+ .cal_type = val[3],
1300+ .lna_gain_level = 0, /* for test purpose */
1301+ };
1302+ struct mt7915_tm_txbf_phase *phase =
1303+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1304+
1305+ phase[req.group].status = 0;
1306+
1307+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
1308+ sizeof(req), true);
1309+}
1310+
1311+int mt7915_tm_txbf_status_read(struct mt7915_dev *dev, struct sk_buff *skb)
1312+{
1313+#define BF_PFMU_TAG 16
1314+#define BF_CAL_PHASE 21
1315+ u8 format_id;
1316+
developer7c3a5082022-06-24 13:40:42 +08001317+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
developer6caa5e22022-06-16 13:33:13 +08001318+ format_id = *(u8 *)skb->data;
1319+
1320+ if (format_id == BF_PFMU_TAG) {
1321+ struct mt7915_tm_pfmu_tag *tag = dev->test.txbf_pfmu_tag;
1322+
1323+ skb_pull(skb, 8);
1324+ memcpy(tag, skb->data, sizeof(struct mt7915_tm_pfmu_tag));
1325+ } else if (format_id == BF_CAL_PHASE) {
1326+ struct mt7915_tm_ibf_cal_info *cal;
1327+ struct mt7915_tm_txbf_phase *phase =
1328+ (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1329+
1330+ cal = (struct mt7915_tm_ibf_cal_info *)skb->data;
1331+ switch (cal->cal_type) {
1332+ case IBF_PHASE_CAL_NORMAL:
1333+ case IBF_PHASE_CAL_NORMAL_INSTRUMENT:
1334+ if (cal->group_l_m_n != GROUP_M)
1335+ break;
1336+ phase = &phase[cal->group];
1337+ memcpy(&phase->phase, cal->buf + 16, sizeof(phase->phase));
1338+ phase->status = cal->status;
1339+ break;
1340+ case IBF_PHASE_CAL_VERIFY:
1341+ case IBF_PHASE_CAL_VERIFY_INSTRUMENT:
1342+ break;
1343+ default:
1344+ break;
1345+ }
1346+ }
1347+
1348+ wake_up(&dev->mt76.tx_wait);
1349+
1350+ return 0;
1351+}
1352+
1353+static int
1354+mt7915_tm_txbf_profile_update_all(struct mt7915_phy *phy, u16 *val)
1355+{
1356+ struct mt76_testmode_data *td = &phy->mt76->test;
1357+ u16 pfmu_idx = val[0];
1358+ u16 subc_id = val[1];
1359+ u16 angle11 = val[2];
1360+ u16 angle21 = val[3];
1361+ u16 angle31 = val[4];
1362+ u16 angle41 = val[5];
1363+ s16 phi11 = 0, phi21 = 0, phi31 = 0;
1364+ struct mt7915_tm_pfmu_data *pfmu_data;
1365+
1366+ if (subc_id > 63)
1367+ return -EINVAL;
1368+
1369+ if (td->tx_antenna_mask == 2) {
1370+ phi11 = (s16)(angle21 - angle11);
1371+ } else if (td->tx_antenna_mask == 3) {
1372+ phi11 = (s16)(angle31 - angle11);
1373+ phi21 = (s16)(angle31 - angle21);
1374+ } else {
1375+ phi11 = (s16)(angle41 - angle11);
1376+ phi21 = (s16)(angle41 - angle21);
1377+ phi31 = (s16)(angle41 - angle31);
1378+ }
1379+
1380+ pfmu_data = (struct mt7915_tm_pfmu_data *)phy->dev->test.txbf_pfmu_data;
1381+ pfmu_data = &pfmu_data[subc_id];
1382+
1383+ if (subc_id < 32)
1384+ pfmu_data->subc_idx = cpu_to_le16(subc_id + 224);
1385+ else
1386+ pfmu_data->subc_idx = cpu_to_le16(subc_id - 32);
1387+ pfmu_data->phi11 = cpu_to_le16(phi11);
1388+ pfmu_data->phi21 = cpu_to_le16(phi21);
1389+ pfmu_data->phi31 = cpu_to_le16(phi31);
1390+
1391+ if (subc_id == 63) {
1392+ struct mt7915_dev *dev = phy->dev;
1393+ struct {
1394+ u8 format_id;
1395+ u8 pfmu_idx;
1396+ u8 dbdc_idx;
1397+ u8 _rsv;
1398+ u8 buf[512];
1399+ } __packed req = {
1400+ .format_id = MT_BF_PROFILE_WRITE_ALL,
1401+ .pfmu_idx = pfmu_idx,
1402+ .dbdc_idx = phy != &dev->phy,
1403+ };
1404+
1405+ memcpy(req.buf, dev->test.txbf_pfmu_data, 512);
1406+
1407+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION),
1408+ &req, sizeof(req), true);
1409+ }
1410+
1411+ return 0;
1412+}
1413+
1414+static int
1415+mt7915_tm_txbf_e2p_update(struct mt7915_phy *phy)
1416+{
1417+ struct mt7915_tm_txbf_phase *phase, *p;
1418+ struct mt7915_dev *dev = phy->dev;
1419+ u8 *eeprom = dev->mt76.eeprom.data;
1420+ u16 offset;
1421+ bool is_7976;
1422+ int i;
1423+
1424+ is_7976 = mt7915_check_adie(dev, false) || is_mt7916(&dev->mt76);
1425+ offset = is_7976 ? 0x60a : 0x651;
1426+
1427+ phase = (struct mt7915_tm_txbf_phase *)dev->test.txbf_phase_cal;
1428+ for (i = 0; i < MAX_PHASE_GROUP_NUM; i++) {
1429+ p = &phase[i];
1430+
1431+ if (!p->status)
1432+ continue;
1433+
1434+ /* copy phase cal data to eeprom */
1435+ memcpy(eeprom + offset + i * sizeof(p->phase), &p->phase,
1436+ sizeof(p->phase));
1437+ }
1438+
1439+ return 0;
1440+}
1441+
1442+static int
1443+mt7915_tm_set_txbf(struct mt7915_phy *phy)
1444+{
1445+ struct mt76_testmode_data *td = &phy->mt76->test;
1446+ u16 *val = td->txbf_param;
1447+
1448+ pr_info("ibf cal process: act = %u, val = %u, %u, %u, %u, %u\n",
1449+ td->txbf_act, val[0], val[1], val[2], val[3], val[4]);
1450+
1451+ switch (td->txbf_act) {
1452+ case MT76_TM_TXBF_ACT_INIT:
1453+ return mt7915_tm_txbf_init(phy, val);
1454+ case MT76_TM_TXBF_ACT_UPDATE_CH:
1455+ mt7915_tm_update_channel(phy);
1456+ break;
1457+ case MT76_TM_TXBF_ACT_PHASE_COMP:
1458+ return mt7915_tm_txbf_phase_comp(phy, val);
1459+ case MT76_TM_TXBF_ACT_TX_PREP:
1460+ return mt7915_tm_txbf_set_tx(phy, val);
1461+ case MT76_TM_TXBF_ACT_IBF_PROF_UPDATE:
1462+ return mt7915_tm_txbf_profile_update(phy, val, false);
1463+ case MT76_TM_TXBF_ACT_EBF_PROF_UPDATE:
1464+ return mt7915_tm_txbf_profile_update(phy, val, true);
1465+ case MT76_TM_TXBF_ACT_PHASE_CAL:
1466+ return mt7915_tm_txbf_phase_cal(phy, val);
1467+ case MT76_TM_TXBF_ACT_PROF_UPDATE_ALL:
1468+ return mt7915_tm_txbf_profile_update_all(phy, val);
1469+ case MT76_TM_TXBF_ACT_E2P_UPDATE:
1470+ return mt7915_tm_txbf_e2p_update(phy);
1471+ default:
1472+ break;
1473+ };
1474+
1475+ return 0;
1476+}
1477+
1478 static int
developer7c3a5082022-06-24 13:40:42 +08001479 mt7915_tm_set_wmm_qid(struct mt7915_phy *phy, u8 qid, u8 aifs, u8 cw_min,
developer6caa5e22022-06-16 13:33:13 +08001480- u16 cw_max, u16 txop)
1481+ u16 cw_max, u16 txop, u8 tx_cmd)
1482 {
developer7c3a5082022-06-24 13:40:42 +08001483 struct mt7915_vif *mvif = (struct mt7915_vif *)phy->monitor_vif->drv_priv;
developer6caa5e22022-06-16 13:33:13 +08001484- struct mt7915_mcu_tx req = { .total = 1 };
1485+ struct mt7915_mcu_tx req = {
1486+ .valid = true,
1487+ .mode = tx_cmd,
1488+ .total = 1,
1489+ };
1490 struct edca *e = &req.edca[0];
1491
developer7c3a5082022-06-24 13:40:42 +08001492 e->queue = qid + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS;
developer692ed9b2023-06-19 12:03:50 +08001493@@ -263,7 +1056,8 @@ done:
developer6caa5e22022-06-16 13:33:13 +08001494
developer7c3a5082022-06-24 13:40:42 +08001495 return mt7915_tm_set_wmm_qid(phy,
developer6caa5e22022-06-16 13:33:13 +08001496 mt76_connac_lmac_mapping(IEEE80211_AC_BE),
1497- aifsn, cw, cw, 0);
1498+ aifsn, cw, cw, 0,
1499+ mode == MT76_TM_TX_MODE_HE_MU);
1500 }
1501
1502 static int
developer692ed9b2023-06-19 12:03:50 +08001503@@ -339,7 +1133,7 @@ mt7915_tm_set_tx_len(struct mt7915_phy *phy, u32 tx_time)
developer6caa5e22022-06-16 13:33:13 +08001504 bitrate = cfg80211_calculate_bitrate(&rate);
1505 tx_len = bitrate * tx_time / 10 / 8;
1506
1507- ret = mt76_testmode_alloc_skb(phy->mt76, tx_len);
1508+ ret = mt76_testmode_init_skb(phy->mt76, tx_len, &td->tx_skb, td->addr);
1509 if (ret)
1510 return ret;
1511
developer692ed9b2023-06-19 12:03:50 +08001512@@ -458,64 +1252,227 @@ mt7915_tm_init(struct mt7915_phy *phy, bool en)
developer6caa5e22022-06-16 13:33:13 +08001513
1514 phy->mt76->test.flag |= MT_TM_FW_RX_COUNT;
1515
1516- if (!en)
1517+ if (!en) {
1518 mt7915_tm_set_tam_arb(phy, en, 0);
1519+
1520+ phy->mt76->test.aid = 0;
1521+ phy->mt76->test.tx_mpdu_len = 0;
1522+ phy->test.bf_en = 0;
1523+ mt7915_tm_set_entry(phy);
1524+ }
1525+}
1526+
1527+static bool
1528+mt7915_tm_check_skb(struct mt7915_phy *phy)
1529+{
1530+ struct mt76_testmode_entry_data *ed;
1531+ struct mt76_wcid *wcid;
1532+
1533+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
1534+ struct ieee80211_tx_info *info;
1535+
1536+ if (!ed->tx_skb)
1537+ return false;
1538+
1539+ info = IEEE80211_SKB_CB(ed->tx_skb);
1540+ info->control.vif = phy->monitor_vif;
1541+ }
1542+
1543+ return true;
1544+}
1545+
1546+static int
1547+mt7915_tm_set_ba(struct mt7915_phy *phy)
1548+{
1549+ struct mt7915_dev *dev = phy->dev;
1550+ struct mt76_testmode_data *td = &phy->mt76->test;
1551+ struct mt76_wcid *wcid;
1552+ struct ieee80211_vif *vif = phy->monitor_vif;
1553+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1554+ struct ieee80211_ampdu_params params = { .buf_size = 256 };
1555+
1556+ list_for_each_entry(wcid, &td->tm_entry_list, list) {
1557+ int tid, ret;
1558+
1559+ params.sta = wcid_to_sta(wcid);
1560+ for (tid = 0; tid < 8; tid++) {
1561+ params.tid = tid;
1562+ ret = mt7915_mcu_add_tx_ba(phy->dev, &params, true);
1563+ if (ret)
1564+ return ret;
1565+ }
1566+ }
1567+
1568+ mt76_wr(dev, MT_AGG_AALCR0(mvif->mt76.band_idx, mvif->mt76.wmm_idx),
1569+ 0x01010101);
1570+
1571+ return 0;
1572+}
1573+
1574+static int
1575+mt7915_tm_set_muru_cfg(struct mt7915_phy *phy, struct mt7915_tm_muru *muru)
1576+{
1577+/* #define MURU_SET_MANUAL_CFG 100 */
1578+ struct mt7915_dev *dev = phy->dev;
1579+ struct {
1580+ __le32 cmd;
1581+ struct mt7915_tm_muru muru;
1582+ } __packed req = {
1583+ .cmd = cpu_to_le32(MURU_SET_MANUAL_CFG),
1584+ };
1585+
1586+ memcpy(&req.muru, muru, sizeof(struct mt7915_tm_muru));
1587+
1588+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
1589+ sizeof(req), false);
1590+}
1591+
1592+static int
1593+mt7915_tm_set_muru_dl(struct mt7915_phy *phy)
1594+{
1595+ struct mt76_testmode_data *td = &phy->mt76->test;
1596+ struct mt76_testmode_entry_data *ed;
1597+ struct mt76_wcid *wcid;
1598+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
1599+ struct ieee80211_vif *vif = phy->monitor_vif;
1600+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1601+ struct mt7915_tm_muru muru = {};
1602+ struct mt7915_tm_muru_comm *comm = &muru.comm;
1603+ struct mt7915_tm_muru_dl *dl = &muru.dl;
1604+ int i;
1605+
1606+ comm->ppdu_format = MURU_PPDU_HE_MU;
1607+ comm->band = mvif->mt76.band_idx;
1608+ comm->wmm_idx = mvif->mt76.wmm_idx;
1609+ comm->spe_idx = phy->test.spe_idx;
1610+
1611+ dl->bw = mt7915_tm_chan_bw(chandef->width);
developer692ed9b2023-06-19 12:03:50 +08001612+ dl->gi = td->tx_rate_sgi;
developer6caa5e22022-06-16 13:33:13 +08001613+ dl->ltf = td->tx_ltf;
1614+ dl->tx_mode = MT_PHY_TYPE_HE_MU;
1615+
1616+ for (i = 0; i < sizeof(dl->ru); i++)
1617+ dl->ru[i] = 0x71;
1618+
1619+ mt76_tm_for_each_entry(phy->mt76, wcid, ed) {
1620+ struct mt7915_tm_muru_dl_usr *dl_usr = &dl->usr[dl->user_num];
1621+
1622+ dl_usr->wlan_idx = cpu_to_le16(wcid->idx);
1623+ dl_usr->ru_alloc_seg = ed->aid < 8 ? 0 : 1;
1624+ dl_usr->ru_idx = ed->ru_idx;
1625+ dl_usr->mcs = ed->tx_rate_idx;
1626+ dl_usr->nss = ed->tx_rate_nss - 1;
1627+ dl_usr->ldpc = ed->tx_rate_ldpc;
1628+ dl->ru[dl->user_num] = ed->ru_alloc;
1629+
1630+ dl->user_num++;
1631+ }
1632+
developer1475cf22023-05-05 13:45:43 +08001633+ muru.cfg_comm = cpu_to_le32(MURU_COMM_SET_TM);
developer6caa5e22022-06-16 13:33:13 +08001634+ muru.cfg_dl = cpu_to_le32(MURU_DL_SET);
1635+
1636+ return mt7915_tm_set_muru_cfg(phy, &muru);
1637+}
1638+
1639+static int
1640+mt7915_tm_set_muru_pkt_cnt(struct mt7915_phy *phy, bool enable, u32 tx_count)
1641+{
1642+#define MURU_SET_TX_PKT_CNT 105
1643+#define MURU_SET_TX_EN 106
1644+ struct mt7915_dev *dev = phy->dev;
1645+ struct {
1646+ __le32 cmd;
1647+ u8 band;
1648+ u8 enable;
1649+ u8 _rsv[2];
1650+ __le32 tx_count;
1651+ } __packed req = {
developer17bb0a82022-12-13 15:52:04 +08001652+ .band = phy->mt76->band_idx,
developer6caa5e22022-06-16 13:33:13 +08001653+ .enable = enable,
1654+ .tx_count = enable ? cpu_to_le32(tx_count) : 0,
1655+ };
1656+ int ret;
1657+
1658+ req.cmd = enable ? cpu_to_le32(MURU_SET_TX_PKT_CNT) :
1659+ cpu_to_le32(MURU_SET_TX_EN);
1660+
1661+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
1662+ sizeof(req), false);
1663+ if (ret)
1664+ return ret;
1665+
1666+ req.cmd = enable ? cpu_to_le32(MURU_SET_TX_EN) :
1667+ cpu_to_le32(MURU_SET_TX_PKT_CNT);
1668+
1669+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
1670+ sizeof(req), false);
1671 }
1672
1673 static void
1674-mt7915_tm_update_channel(struct mt7915_phy *phy)
1675+mt7915_tm_tx_frames_mu(struct mt7915_phy *phy, bool enable)
1676 {
1677- mutex_unlock(&phy->dev->mt76.mutex);
1678- mt7915_set_channel(phy);
1679- mutex_lock(&phy->dev->mt76.mutex);
1680+ struct mt76_testmode_data *td = &phy->mt76->test;
1681
1682- mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
1683+ if (enable) {
1684+ struct mt7915_dev *dev = phy->dev;
1685+
1686+ mt7915_tm_set_ba(phy);
1687+ mt7915_tm_set_muru_dl(phy);
1688+ mt76_rr(dev, MT_MIB_DR8(phy != &dev->phy));
1689+ } else {
1690+ /* set to zero for counting real tx free num */
1691+ td->tx_done = 0;
1692+ }
1693+
1694+ mt7915_tm_set_muru_pkt_cnt(phy, enable, td->tx_count);
1695+ usleep_range(100000, 200000);
1696 }
1697
1698 static void
developer072c5612022-07-15 18:30:03 +08001699 mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
1700 {
developer6caa5e22022-06-16 13:33:13 +08001701 struct mt76_testmode_data *td = &phy->mt76->test;
1702- struct mt7915_dev *dev = phy->dev;
1703- struct ieee80211_tx_info *info;
1704- u8 duty_cycle = td->tx_duty_cycle;
1705- u32 tx_time = td->tx_time;
1706- u32 ipg = td->tx_ipg;
1707
1708 mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
1709- mt7915_tm_clean_hwq(phy, dev->mt76.global_wcid.idx);
1710+ mt7915_tm_set_trx(phy, TM_MAC_TX, false);
1711
1712 if (en) {
1713- mt7915_tm_update_channel(phy);
1714+ u32 tx_time = td->tx_time, ipg = td->tx_ipg;
1715+ u8 duty_cycle = td->tx_duty_cycle;
1716+
1717+ if (!phy->test.bf_en)
1718+ mt7915_tm_update_channel(phy);
1719
developer072c5612022-07-15 18:30:03 +08001720 if (td->tx_spe_idx)
developer6caa5e22022-06-16 13:33:13 +08001721 phy->test.spe_idx = td->tx_spe_idx;
developer072c5612022-07-15 18:30:03 +08001722 else
1723 phy->test.spe_idx = mt76_connac_spe_idx(td->tx_antenna_mask);
developer6caa5e22022-06-16 13:33:13 +08001724- }
1725
1726- mt7915_tm_set_tam_arb(phy, en,
1727- td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU);
1728+ /* if all three params are set, duty_cycle will be ignored */
1729+ if (duty_cycle && tx_time && !ipg) {
1730+ ipg = tx_time * 100 / duty_cycle - tx_time;
1731+ } else if (duty_cycle && !tx_time && ipg) {
1732+ if (duty_cycle < 100)
1733+ tx_time = duty_cycle * ipg / (100 - duty_cycle);
1734+ }
1735
1736- /* if all three params are set, duty_cycle will be ignored */
1737- if (duty_cycle && tx_time && !ipg) {
1738- ipg = tx_time * 100 / duty_cycle - tx_time;
1739- } else if (duty_cycle && !tx_time && ipg) {
1740- if (duty_cycle < 100)
1741- tx_time = duty_cycle * ipg / (100 - duty_cycle);
1742- }
1743+ mt7915_tm_set_ipg_params(phy, ipg, td->tx_rate_mode);
1744+ mt7915_tm_set_tx_len(phy, tx_time);
1745
1746- mt7915_tm_set_ipg_params(phy, ipg, td->tx_rate_mode);
1747- mt7915_tm_set_tx_len(phy, tx_time);
1748+ if (ipg)
1749+ td->tx_queued_limit = MT76_TM_TIMEOUT * 1000000 / ipg / 2;
1750
1751- if (ipg)
1752- td->tx_queued_limit = MT76_TM_TIMEOUT * 1000000 / ipg / 2;
1753+ if (!mt7915_tm_check_skb(phy))
1754+ return;
1755+ } else {
1756+ mt7915_tm_clean_hwq(phy);
1757+ }
1758
1759- if (!en || !td->tx_skb)
1760- return;
1761+ mt7915_tm_set_tam_arb(phy, en,
1762+ td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU);
1763
1764- info = IEEE80211_SKB_CB(td->tx_skb);
1765- info->control.vif = phy->monitor_vif;
1766+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
1767+ mt7915_tm_tx_frames_mu(phy, en);
1768
1769 mt7915_tm_set_trx(phy, TM_MAC_TX, en);
1770 }
developer692ed9b2023-06-19 12:03:50 +08001771@@ -544,10 +1501,6 @@ mt7915_tm_get_rx_stats(struct mt7915_phy *phy, bool clear)
developer6caa5e22022-06-16 13:33:13 +08001772 return ret;
1773
1774 rs_band = (struct mt7915_tm_rx_stat_band *)skb->data;
1775- /* pr_info("mdrdy_cnt = %d\n", le32_to_cpu(rs_band->mdrdy_cnt)); */
1776- /* pr_info("fcs_err = %d\n", le16_to_cpu(rs_band->fcs_err)); */
1777- /* pr_info("len_mismatch = %d\n", le16_to_cpu(rs_band->len_mismatch)); */
1778- /* pr_info("fcs_ok = %d\n", le16_to_cpu(rs_band->fcs_succ)); */
1779
1780 if (!clear) {
developerf7a3ca32022-09-01 14:44:55 +08001781 enum mt76_rxq_id q = req.band ? MT_RXQ_BAND1 : MT_RXQ_MAIN;
developer692ed9b2023-06-19 12:03:50 +08001782@@ -562,13 +1515,61 @@ mt7915_tm_get_rx_stats(struct mt7915_phy *phy, bool clear)
developer6caa5e22022-06-16 13:33:13 +08001783 return 0;
1784 }
1785
1786+static int
1787+mt7915_tm_set_rx_user_idx(struct mt7915_phy *phy, u8 aid)
1788+{
1789+ struct mt7915_dev *dev = phy->dev;
1790+ struct mt76_wcid *wcid = NULL;
1791+ struct mt76_testmode_entry_data *ed;
1792+ struct {
1793+ u8 band;
1794+ u8 _rsv;
1795+ __le16 wlan_idx;
1796+ } __packed req = {
developer17bb0a82022-12-13 15:52:04 +08001797+ .band = phy->mt76->band_idx,
developer6caa5e22022-06-16 13:33:13 +08001798+ };
1799+
1800+ mt76_tm_for_each_entry(phy->mt76, wcid, ed)
1801+ if (ed->aid == aid)
1802+ break;
1803+
1804+ if (!wcid)
1805+ return -EINVAL;
1806+
1807+ req.wlan_idx = cpu_to_le16(wcid->idx);
1808+
1809+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_STAT_USER_CTRL),
1810+ &req, sizeof(req), false);
1811+}
1812+
1813+static int
1814+mt7915_tm_set_muru_aid(struct mt7915_phy *phy, u16 aid)
1815+{
1816+ struct mt7915_dev *dev = phy->dev;
1817+ struct mt7915_tm_cmd req = {
1818+ .testmode_en = 1,
1819+ .param_idx = MCU_ATE_SET_MU_RX_AID,
developer17bb0a82022-12-13 15:52:04 +08001820+ .param.rx_aid.band = cpu_to_le32(phy->mt76->band_idx),
developer6caa5e22022-06-16 13:33:13 +08001821+ .param.rx_aid.aid = cpu_to_le16(aid),
1822+ };
1823+
1824+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
1825+ sizeof(req), false);
1826+}
1827+
1828 static void
1829 mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
1830 {
1831+ struct mt76_testmode_data *td = &phy->mt76->test;
1832+
1833+ mt7915_tm_set_trx(phy, TM_MAC_TX, false);
1834 mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
1835
1836 if (en) {
1837- mt7915_tm_update_channel(phy);
1838+ if (!phy->test.bf_en)
1839+ mt7915_tm_update_channel(phy);
1840+ if (td->aid)
1841+ mt7915_tm_set_rx_user_idx(phy, td->aid);
1842
1843 /* read-clear */
1844 mt7915_tm_get_rx_stats(phy, true);
developer692ed9b2023-06-19 12:03:50 +08001845@@ -576,9 +1577,12 @@ mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
developer6caa5e22022-06-16 13:33:13 +08001846 /* clear fw count */
1847 mt7915_tm_set_phy_count(phy, 0);
1848 mt7915_tm_set_phy_count(phy, 1);
1849-
1850- mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
1851 }
1852+
1853+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
1854+ mt7915_tm_set_muru_aid(phy, en ? td->aid : 0xf800);
1855+
1856+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
1857 }
1858
1859 static int
developer692ed9b2023-06-19 12:03:50 +08001860@@ -617,34 +1621,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
developer6caa5e22022-06-16 13:33:13 +08001861 tx_cont->tx_ant = td->tx_antenna_mask;
developer17bb0a82022-12-13 15:52:04 +08001862 tx_cont->band = band;
developer3609d782022-11-29 18:07:22 +08001863
developer6caa5e22022-06-16 13:33:13 +08001864- switch (chandef->width) {
1865- case NL80211_CHAN_WIDTH_40:
1866- tx_cont->bw = CMD_CBW_40MHZ;
1867- break;
1868- case NL80211_CHAN_WIDTH_80:
1869- tx_cont->bw = CMD_CBW_80MHZ;
1870- break;
1871- case NL80211_CHAN_WIDTH_80P80:
1872- tx_cont->bw = CMD_CBW_8080MHZ;
1873- break;
1874- case NL80211_CHAN_WIDTH_160:
1875- tx_cont->bw = CMD_CBW_160MHZ;
1876- break;
1877- case NL80211_CHAN_WIDTH_5:
1878- tx_cont->bw = CMD_CBW_5MHZ;
1879- break;
1880- case NL80211_CHAN_WIDTH_10:
1881- tx_cont->bw = CMD_CBW_10MHZ;
1882- break;
1883- case NL80211_CHAN_WIDTH_20:
1884- tx_cont->bw = CMD_CBW_20MHZ;
1885- break;
1886- case NL80211_CHAN_WIDTH_20_NOHT:
1887- tx_cont->bw = CMD_CBW_20MHZ;
1888- break;
1889- default:
1890- return -EINVAL;
1891- }
1892+ tx_cont->bw = mt7915_tm_chan_bw(chandef->width);
1893
1894 if (!en) {
developer17bb0a82022-12-13 15:52:04 +08001895 req.op.rf.param.func_data = cpu_to_le32(band);
developer692ed9b2023-06-19 12:03:50 +08001896@@ -728,6 +1705,12 @@ mt7915_tm_update_params(struct mt7915_phy *phy, u32 changed)
developer6caa5e22022-06-16 13:33:13 +08001897 mt7915_tm_set_freq_offset(phy, en, en ? td->freq_offset : 0);
1898 if (changed & BIT(TM_CHANGED_TXPOWER))
1899 mt7915_tm_set_tx_power(phy);
1900+ if (changed & BIT(TM_CHANGED_AID))
1901+ mt7915_tm_set_entry(phy);
1902+ if (changed & BIT(TM_CHANGED_CFG))
1903+ mt7915_tm_set_cfg(phy);
1904+ if (changed & BIT(TM_CHANGED_TXBF_ACT))
1905+ mt7915_tm_set_txbf(phy);
1906 }
1907
1908 static int
developer692ed9b2023-06-19 12:03:50 +08001909@@ -737,6 +1720,11 @@ mt7915_tm_set_state(struct mt76_phy *mphy, enum mt76_testmode_state state)
developerbb6ddff2023-03-08 17:22:32 +08001910 struct mt7915_phy *phy = mphy->priv;
1911 enum mt76_testmode_state prev_state = td->state;
1912
1913+ if (!phy->monitor_vif) {
1914+ dev_err(phy->dev->mt76.dev, "Please make sure monitor interface is up\n");
1915+ return -ENOTCONN;
1916+ }
1917+
1918 mphy->test.state = state;
1919
1920 if (prev_state == MT76_TM_STATE_TX_FRAMES ||
developer692ed9b2023-06-19 12:03:50 +08001921@@ -757,7 +1745,7 @@ mt7915_tm_set_state(struct mt76_phy *mphy, enum mt76_testmode_state state)
1922 (state == MT76_TM_STATE_OFF &&
1923 prev_state == MT76_TM_STATE_IDLE)) {
1924 u32 changed = 0;
1925- int i;
1926+ int i, ret;
1927
1928 for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) {
1929 u16 cur = tm_change_map[i];
1930@@ -766,6 +1754,10 @@ mt7915_tm_set_state(struct mt76_phy *mphy, enum mt76_testmode_state state)
1931 changed |= BIT(i);
1932 }
1933
1934+ ret = mt7915_tm_check_antenna(phy);
1935+ if (ret)
1936+ return ret;
1937+
1938 mt7915_tm_update_params(phy, changed);
1939 }
1940
1941@@ -778,10 +1770,8 @@ mt7915_tm_set_params(struct mt76_phy *mphy, struct nlattr **tb,
1942 {
1943 struct mt76_testmode_data *td = &mphy->test;
1944 struct mt7915_phy *phy = mphy->priv;
1945- struct mt7915_dev *dev = phy->dev;
1946- u32 chainmask = mphy->chainmask, changed = 0;
1947- bool ext_phy = phy != &dev->phy;
1948- int i;
1949+ u32 changed = 0;
1950+ int i, ret;
1951
1952 BUILD_BUG_ON(NUM_TM_CHANGED >= 32);
1953
1954@@ -789,9 +1779,9 @@ mt7915_tm_set_params(struct mt76_phy *mphy, struct nlattr **tb,
1955 td->state == MT76_TM_STATE_OFF)
1956 return 0;
1957
1958- chainmask = ext_phy ? chainmask >> dev->chainshift : chainmask;
1959- if (td->tx_antenna_mask > chainmask)
1960- return -EINVAL;
1961+ ret = mt7915_tm_check_antenna(phy);
1962+ if (ret)
1963+ return ret;
1964
1965 for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) {
1966 if (tb[tm_change_map[i]])
1967@@ -807,6 +1797,7 @@ static int
developer6caa5e22022-06-16 13:33:13 +08001968 mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
1969 {
1970 struct mt7915_phy *phy = mphy->priv;
1971+ struct mt7915_dev *dev = phy->dev;
1972 void *rx, *rssi;
1973 int i;
1974
developer692ed9b2023-06-19 12:03:50 +08001975@@ -852,11 +1843,75 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
developer6caa5e22022-06-16 13:33:13 +08001976
1977 nla_nest_end(msg, rx);
1978
1979+ if (mphy->test.tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
1980+ mphy->test.tx_done += mt76_rr(dev, MT_MIB_DR8(phy != &dev->phy));
1981+
1982 return mt7915_tm_get_rx_stats(phy, false);
1983 }
1984
1985+static int
1986+mt7915_tm_write_back_to_efuse(struct mt7915_dev *dev)
1987+{
1988+ struct mt7915_mcu_eeprom_info req = {};
developerc9333e12023-04-06 18:07:42 +08001989+ u8 read_buf[MT76_TM_EEPROM_BLOCK_SIZE], *eeprom = dev->mt76.eeprom.data;
developer6caa5e22022-06-16 13:33:13 +08001990+ int i, ret = -EINVAL;
1991+
1992+ /* prevent from damaging chip id in efuse */
1993+ if (mt76_chip(&dev->mt76) != get_unaligned_le16(eeprom))
1994+ goto out;
1995+
1996+ for (i = 0; i < mt7915_eeprom_size(dev); i += MT76_TM_EEPROM_BLOCK_SIZE) {
1997+ req.addr = cpu_to_le32(i);
developerc9333e12023-04-06 18:07:42 +08001998+ memcpy(req.data, eeprom + i, MT76_TM_EEPROM_BLOCK_SIZE);
1999+
2000+ ret = mt7915_mcu_get_eeprom(dev, i, read_buf);
2001+ if (ret < 0)
2002+ return ret;
2003+
2004+ if (!memcmp(req.data, read_buf, MT76_TM_EEPROM_BLOCK_SIZE))
2005+ continue;
developer6caa5e22022-06-16 13:33:13 +08002006+
2007+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_ACCESS),
2008+ &req, sizeof(req), true);
2009+ if (ret)
2010+ return ret;
2011+ }
2012+
2013+out:
2014+ return ret;
2015+}
2016+
2017+static int
2018+mt7915_tm_set_eeprom(struct mt76_phy *mphy, u32 offset, u8 *val, u8 action)
2019+{
2020+ struct mt7915_phy *phy = mphy->priv;
2021+ struct mt7915_dev *dev = phy->dev;
2022+ u8 *eeprom = dev->mt76.eeprom.data;
2023+ int ret = 0;
2024+
2025+ if (offset >= mt7915_eeprom_size(dev))
2026+ return -EINVAL;
2027+
2028+ switch (action) {
2029+ case MT76_TM_EEPROM_ACTION_UPDATE_DATA:
2030+ memcpy(eeprom + offset, val, MT76_TM_EEPROM_BLOCK_SIZE);
2031+ break;
2032+ case MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE:
2033+ ret = mt7915_mcu_set_eeprom(dev, true);
2034+ break;
2035+ case MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE:
2036+ ret = mt7915_tm_write_back_to_efuse(dev);
2037+ break;
2038+ default:
2039+ break;
2040+ }
2041+
2042+ return ret;
2043+}
2044+
2045 const struct mt76_testmode_ops mt7915_testmode_ops = {
2046 .set_state = mt7915_tm_set_state,
2047 .set_params = mt7915_tm_set_params,
2048 .dump_stats = mt7915_tm_dump_stats,
2049+ .set_eeprom = mt7915_tm_set_eeprom,
2050 };
2051diff --git a/mt7915/testmode.h b/mt7915/testmode.h
developer692ed9b2023-06-19 12:03:50 +08002052index a1c54c8..eb0e043 100644
developer6caa5e22022-06-16 13:33:13 +08002053--- a/mt7915/testmode.h
2054+++ b/mt7915/testmode.h
2055@@ -4,6 +4,8 @@
2056 #ifndef __MT7915_TESTMODE_H
2057 #define __MT7915_TESTMODE_H
2058
2059+#include "mcu.h"
2060+
2061 struct mt7915_tm_trx {
2062 u8 type;
2063 u8 enable;
2064@@ -39,6 +41,11 @@ struct mt7915_tm_cfg {
2065 u8 _rsv[2];
2066 };
2067
2068+struct mt7915_tm_mu_rx_aid {
2069+ __le32 band;
2070+ __le16 aid;
2071+};
2072+
2073 struct mt7915_tm_cmd {
2074 u8 testmode_en;
2075 u8 param_idx;
2076@@ -50,6 +57,7 @@ struct mt7915_tm_cmd {
2077 struct mt7915_tm_slot_time slot;
2078 struct mt7915_tm_clean_txq clean;
2079 struct mt7915_tm_cfg cfg;
2080+ struct mt7915_tm_mu_rx_aid rx_aid;
2081 u8 test[72];
2082 } param;
2083 } __packed;
2084@@ -109,6 +117,16 @@ enum {
2085 TAM_ARB_OP_MODE_FORCE_SU = 5,
2086 };
2087
2088+enum {
2089+ TM_CBW_20MHZ,
2090+ TM_CBW_40MHZ,
2091+ TM_CBW_80MHZ,
2092+ TM_CBW_10MHZ,
2093+ TM_CBW_5MHZ,
2094+ TM_CBW_160MHZ,
2095+ TM_CBW_8080MHZ,
2096+};
2097+
2098 struct mt7915_tm_rx_stat_band {
2099 u8 category;
2100
2101@@ -130,4 +148,264 @@ struct mt7915_tm_rx_stat_band {
2102 __le16 mdrdy_cnt_ofdm;
2103 };
2104
2105+struct mt7915_tm_muru_comm {
2106+ u8 ppdu_format;
2107+ u8 sch_type;
2108+ u8 band;
2109+ u8 wmm_idx;
2110+ u8 spe_idx;
2111+ u8 proc_type;
2112+};
2113+
2114+struct mt7915_tm_muru_dl_usr {
2115+ __le16 wlan_idx;
2116+ u8 ru_alloc_seg;
2117+ u8 ru_idx;
2118+ u8 ldpc;
2119+ u8 nss;
2120+ u8 mcs;
2121+ u8 mu_group_idx;
2122+ u8 vht_groud_id;
2123+ u8 vht_up;
2124+ u8 he_start_stream;
2125+ u8 he_mu_spatial;
2126+ u8 ack_policy;
2127+ __le16 tx_power_alpha;
2128+};
2129+
2130+struct mt7915_tm_muru_dl {
2131+ u8 user_num;
2132+ u8 tx_mode;
2133+ u8 bw;
2134+ u8 gi;
2135+ u8 ltf;
2136+ /* sigB */
2137+ u8 mcs;
2138+ u8 dcm;
2139+ u8 cmprs;
2140+
2141+ u8 tx_power;
2142+ u8 ru[8];
2143+ u8 c26[2];
2144+ u8 ack_policy;
2145+
2146+ struct mt7915_tm_muru_dl_usr usr[16];
2147+};
2148+
2149+struct mt7915_tm_muru_ul_usr {
2150+ __le16 wlan_idx;
2151+ u8 ru_alloc;
2152+ u8 ru_idx;
2153+ u8 ldpc;
2154+ u8 nss;
2155+ u8 mcs;
2156+ u8 target_rssi;
2157+ __le32 trig_pkt_size;
2158+};
2159+
2160+struct mt7915_tm_muru_ul {
2161+ u8 user_num;
2162+
2163+ /* UL TX */
2164+ u8 trig_type;
2165+ __le16 trig_cnt;
2166+ __le16 trig_intv;
2167+ u8 bw;
2168+ u8 gi_ltf;
2169+ __le16 ul_len;
2170+ u8 pad;
2171+ u8 trig_ta[ETH_ALEN];
2172+ u8 ru[8];
2173+ u8 c26[2];
2174+
2175+ struct mt7915_tm_muru_ul_usr usr[16];
2176+ /* HE TB RX Debug */
2177+ __le32 rx_hetb_nonsf_en_bitmap;
2178+ __le32 rx_hetb_cfg[2];
2179+
2180+ /* DL TX */
2181+ u8 ba_type;
2182+};
2183+
2184+struct mt7915_tm_muru {
2185+ __le32 cfg_comm;
2186+ __le32 cfg_dl;
2187+ __le32 cfg_ul;
2188+
2189+ struct mt7915_tm_muru_comm comm;
2190+ struct mt7915_tm_muru_dl dl;
2191+ struct mt7915_tm_muru_ul ul;
2192+};
2193+
2194+#define MURU_PPDU_HE_MU BIT(3)
2195+
2196+/* Common Config */
2197+/* #define MURU_COMM_PPDU_FMT BIT(0) */
2198+/* #define MURU_COMM_SCH_TYPE BIT(1) */
developer692ed9b2023-06-19 12:03:50 +08002199+/* #define MURU_COMM_BAND BIT(2) */
2200+/* #define MURU_COMM_WMM BIT(3) */
developer6caa5e22022-06-16 13:33:13 +08002201+/* #define MURU_COMM_SPE_IDX BIT(4) */
2202+/* #define MURU_COMM_PROC_TYPE BIT(5) */
2203+/* #define MURU_COMM_SET (MURU_COMM_PPDU_FMT | MURU_COMM_BAND | \ */
developer692ed9b2023-06-19 12:03:50 +08002204+/* MURU_COMM_WMM | MURU_COMM_SPE_IDX) */
developer6caa5e22022-06-16 13:33:13 +08002205+/* DL Config */
2206+#define MURU_DL_BW BIT(0)
2207+#define MURU_DL_GI BIT(1)
2208+#define MURU_DL_TX_MODE BIT(2)
2209+#define MURU_DL_TONE_PLAN BIT(3)
2210+#define MURU_DL_USER_CNT BIT(4)
2211+#define MURU_DL_LTF BIT(5)
2212+#define MURU_DL_SIGB_MCS BIT(6)
2213+#define MURU_DL_SIGB_DCM BIT(7)
2214+#define MURU_DL_SIGB_CMPRS BIT(8)
2215+#define MURU_DL_ACK_POLICY BIT(9)
2216+#define MURU_DL_TXPOWER BIT(10)
2217+/* DL Per User Config */
2218+#define MURU_DL_USER_WLAN_ID BIT(16)
2219+#define MURU_DL_USER_COD BIT(17)
2220+#define MURU_DL_USER_MCS BIT(18)
2221+#define MURU_DL_USER_NSS BIT(19)
2222+#define MURU_DL_USER_RU_ALLOC BIT(20)
2223+#define MURU_DL_USER_MUMIMO_GRP BIT(21)
2224+#define MURU_DL_USER_MUMIMO_VHT BIT(22)
2225+#define MURU_DL_USER_ACK_POLICY BIT(23)
2226+#define MURU_DL_USER_MUMIMO_HE BIT(24)
2227+#define MURU_DL_USER_PWR_ALPHA BIT(25)
2228+#define MURU_DL_SET (GENMASK(7, 0) | GENMASK(20, 16) | BIT(25))
2229+
2230+#define MAX_PHASE_GROUP_NUM 9
2231+
2232+struct mt7915_tm_txbf_phase {
2233+ u8 status;
2234+ struct {
2235+ u8 r0_uh;
2236+ u8 r0_h;
2237+ u8 r0_m;
2238+ u8 r0_l;
2239+ u8 r0_ul;
2240+ u8 r1_uh;
2241+ u8 r1_h;
2242+ u8 r1_m;
2243+ u8 r1_l;
2244+ u8 r1_ul;
2245+ u8 r2_uh;
2246+ u8 r2_h;
2247+ u8 r2_m;
2248+ u8 r2_l;
2249+ u8 r2_ul;
2250+ u8 r3_uh;
2251+ u8 r3_h;
2252+ u8 r3_m;
2253+ u8 r3_l;
2254+ u8 r3_ul;
2255+ u8 r2_uh_sx2;
2256+ u8 r2_h_sx2;
2257+ u8 r2_m_sx2;
2258+ u8 r2_l_sx2;
2259+ u8 r2_ul_sx2;
2260+ u8 r3_uh_sx2;
2261+ u8 r3_h_sx2;
2262+ u8 r3_m_sx2;
2263+ u8 r3_l_sx2;
2264+ u8 r3_ul_sx2;
2265+ u8 m_t0_h;
2266+ u8 m_t1_h;
2267+ u8 m_t2_h;
2268+ u8 m_t2_h_sx2;
2269+ u8 r0_reserved;
2270+ u8 r1_reserved;
2271+ u8 r2_reserved;
2272+ u8 r3_reserved;
2273+ u8 r2_sx2_reserved;
2274+ u8 r3_sx2_reserved;
2275+ } phase;
2276+};
2277+
2278+struct mt7915_tm_pfmu_tag1 {
2279+ __le32 pfmu_idx:10;
2280+ __le32 ebf:1;
2281+ __le32 data_bw:2;
2282+ __le32 lm:2;
2283+ __le32 is_mu:1;
2284+ __le32 nr:3, nc:3;
2285+ __le32 codebook:2;
2286+ __le32 ngroup:2;
2287+ __le32 _rsv:2;
2288+ __le32 invalid_prof:1;
2289+ __le32 rmsd:3;
2290+
2291+ __le32 col_id1:6, row_id1:10;
2292+ __le32 col_id2:6, row_id2:10;
2293+ __le32 col_id3:6, row_id3:10;
2294+ __le32 col_id4:6, row_id4:10;
2295+
2296+ __le32 ru_start_id:7;
2297+ __le32 _rsv1:1;
2298+ __le32 ru_end_id:7;
2299+ __le32 _rsv2:1;
2300+ __le32 mob_cal_en:1;
2301+ __le32 _rsv3:15;
2302+
2303+ __le32 snr_sts0:8, snr_sts1:8, snr_sts2:8, snr_sts3:8;
2304+ __le32 snr_sts4:8, snr_sts5:8, snr_sts6:8, snr_sts7:8;
2305+
2306+ __le32 _rsv4;
2307+} __packed;
2308+
2309+struct mt7915_tm_pfmu_tag2 {
2310+ __le32 smart_ant:24;
2311+ __le32 se_idx:5;
2312+ __le32 _rsv:3;
2313+
2314+ __le32 _rsv1:8;
2315+ __le32 rmsd_thres:3;
2316+ __le32 _rsv2:5;
2317+ __le32 ibf_timeout:8;
2318+ __le32 _rsv3:8;
2319+
2320+ __le32 _rsv4:16;
2321+ __le32 ibf_data_bw:2;
2322+ __le32 ibf_nc:3;
2323+ __le32 ibf_nr:3;
2324+ __le32 ibf_ru:8;
2325+
2326+ __le32 mob_delta_t:8;
2327+ __le32 mob_lq_result:7;
2328+ __le32 _rsv5:1;
2329+ __le32 _rsv6:16;
2330+
2331+ __le32 _rsv7;
2332+} __packed;
2333+
2334+struct mt7915_tm_pfmu_tag {
2335+ struct mt7915_tm_pfmu_tag1 t1;
2336+ struct mt7915_tm_pfmu_tag2 t2;
2337+};
2338+
2339+struct mt7915_tm_pfmu_data {
2340+ __le16 subc_idx;
2341+ __le16 phi11;
2342+ __le16 phi21;
2343+ __le16 phi31;
2344+};
2345+
2346+struct mt7915_tm_ibf_cal_info {
2347+ u8 format_id;
2348+ u8 group_l_m_n;
2349+ u8 group;
2350+ bool sx2;
2351+ u8 status;
2352+ u8 cal_type;
2353+ u8 _rsv[2];
2354+ u8 buf[1000];
2355+} __packed;
2356+
2357+enum {
2358+ IBF_PHASE_CAL_UNSPEC,
2359+ IBF_PHASE_CAL_NORMAL,
2360+ IBF_PHASE_CAL_VERIFY,
2361+ IBF_PHASE_CAL_NORMAL_INSTRUMENT,
2362+ IBF_PHASE_CAL_VERIFY_INSTRUMENT,
2363+};
2364+
2365 #endif
2366diff --git a/testmode.c b/testmode.c
developer692ed9b2023-06-19 12:03:50 +08002367index 1d0d5d3..97f65fd 100644
developer6caa5e22022-06-16 13:33:13 +08002368--- a/testmode.c
2369+++ b/testmode.c
developer072c5612022-07-15 18:30:03 +08002370@@ -27,28 +27,16 @@ const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
developer6caa5e22022-06-16 13:33:13 +08002371 };
2372 EXPORT_SYMBOL_GPL(mt76_tm_policy);
2373
2374-void mt76_testmode_tx_pending(struct mt76_phy *phy)
2375+static void
2376+mt76_testmode_queue_tx(struct mt76_phy *phy, struct mt76_wcid *wcid,
developerf79ad452022-07-12 11:37:54 +08002377+ struct sk_buff *skb, struct mt76_queue *q, int qid,
2378+ u16 limit)
developer6caa5e22022-06-16 13:33:13 +08002379 {
2380 struct mt76_testmode_data *td = &phy->test;
2381 struct mt76_dev *dev = phy->dev;
2382- struct mt76_wcid *wcid = &dev->global_wcid;
2383- struct sk_buff *skb = td->tx_skb;
2384- struct mt76_queue *q;
2385- u16 tx_queued_limit;
2386- int qid;
2387-
2388- if (!skb || !td->tx_pending)
2389- return;
2390+ u16 count = limit;
2391
2392- qid = skb_get_queue_mapping(skb);
2393- q = phy->q_tx[qid];
2394-
2395- tx_queued_limit = td->tx_queued_limit ? td->tx_queued_limit : 1000;
2396-
2397- spin_lock_bh(&q->lock);
2398-
2399- while (td->tx_pending > 0 &&
2400- td->tx_queued - td->tx_done < tx_queued_limit &&
2401+ while (td->tx_pending > 0 && count &&
2402 q->queued < q->ndesc / 2) {
2403 int ret;
2404
developer15c355d2023-03-21 17:28:34 +08002405@@ -57,13 +45,68 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy)
developer6caa5e22022-06-16 13:33:13 +08002406 if (ret < 0)
2407 break;
2408
developer15c355d2023-03-21 17:28:34 +08002409- td->tx_pending--;
developer6caa5e22022-06-16 13:33:13 +08002410+ count--;
developer15c355d2023-03-21 17:28:34 +08002411+
2412+ /* tx_count == UINT_MAX for continuous tx */
2413+ if (td->tx_count != UINT_MAX)
2414+ td->tx_pending--;
developer6caa5e22022-06-16 13:33:13 +08002415 td->tx_queued++;
2416+
2417+ if (td->tx_rate_mode != MT76_TM_TX_MODE_HE_MU)
developer692ed9b2023-06-19 12:03:50 +08002418+ if (td->tx_queued - td->tx_done >= limit)
2419+ break;
developer6caa5e22022-06-16 13:33:13 +08002420 }
2421
2422 dev->queue_ops->kick(dev, q);
2423+}
2424+
2425+void mt76_testmode_tx_pending(struct mt76_phy *phy)
2426+{
2427+ struct mt76_testmode_data *td = &phy->test;
2428+ struct mt76_testmode_entry_data *ed;
2429+ struct mt76_queue *q;
2430+ int qid;
2431+ u16 tx_queued_limit;
2432+ u32 remain;
2433+ bool is_mu;
2434+
2435+ if (!td->tx_pending)
2436+ return;
2437+
2438+ /* tx_queued_limit = td->tx_queued_limit ?: 100; */
2439+ tx_queued_limit = 100;
2440+
2441+ if (!td->aid) {
2442+ qid = skb_get_queue_mapping(td->tx_skb);
2443+ q = phy->q_tx[qid];
2444+ spin_lock_bh(&q->lock);
2445+ mt76_testmode_queue_tx(phy, &phy->dev->global_wcid,
developer692ed9b2023-06-19 12:03:50 +08002446+ td->tx_skb, q, qid, tx_queued_limit);
developer6caa5e22022-06-16 13:33:13 +08002447+ spin_unlock_bh(&q->lock);
2448+
2449+ return;
2450+ }
2451+
2452+ is_mu = td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU;
2453+ ed = mt76_testmode_entry_data(phy, td->cur_entry);
2454+ qid = skb_get_queue_mapping(ed->tx_skb);
2455+ q = phy->q_tx[qid];
2456+
2457+ spin_lock_bh(&q->lock);
2458+
2459+ remain = is_mu ? 1 : (td->tx_pending % td->tx_count) ?: td->tx_count;
2460+ if (remain < tx_queued_limit)
2461+ tx_queued_limit = remain;
2462+
developerf79ad452022-07-12 11:37:54 +08002463+ mt76_testmode_queue_tx(phy, td->cur_entry, ed->tx_skb, q, qid, tx_queued_limit);
developer6caa5e22022-06-16 13:33:13 +08002464+
developer692ed9b2023-06-19 12:03:50 +08002465+ if ((td->tx_count != UINT_MAX && td->tx_pending % td->tx_count == 0) || is_mu)
developer6caa5e22022-06-16 13:33:13 +08002466+ td->cur_entry = list_next_entry(td->cur_entry, list);
2467
2468 spin_unlock_bh(&q->lock);
2469+
2470+ if (is_mu && td->tx_pending)
2471+ mt76_worker_schedule(&phy->dev->tx_worker);
2472 }
2473
2474 static u32
developer15c355d2023-03-21 17:28:34 +08002475@@ -89,15 +132,31 @@ mt76_testmode_max_mpdu_len(struct mt76_phy *phy, u8 tx_rate_mode)
developer6caa5e22022-06-16 13:33:13 +08002476 }
2477
2478 static void
2479-mt76_testmode_free_skb(struct mt76_phy *phy)
2480+mt76_testmode_free_skb(struct sk_buff **tx_skb)
2481+{
2482+ if (!(*tx_skb))
2483+ return;
2484+
2485+ dev_kfree_skb(*tx_skb);
2486+ *tx_skb = NULL;
2487+}
2488+
2489+static void
2490+mt76_testmode_free_skb_all(struct mt76_phy *phy)
2491 {
2492 struct mt76_testmode_data *td = &phy->test;
2493+ struct mt76_testmode_entry_data *ed = &td->ed;
2494+ struct mt76_wcid *wcid;
2495+
2496+ mt76_testmode_free_skb(&ed->tx_skb);
2497
2498- dev_kfree_skb(td->tx_skb);
2499- td->tx_skb = NULL;
2500+ mt76_tm_for_each_entry(phy, wcid, ed)
2501+ mt76_testmode_free_skb(&ed->tx_skb);
2502 }
2503
2504-int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
2505+static int
2506+mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len,
2507+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN])
2508 {
2509 #define MT_TXP_MAX_LEN 4095
2510 u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
developer15c355d2023-03-21 17:28:34 +08002511@@ -118,7 +177,8 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer6caa5e22022-06-16 13:33:13 +08002512 nfrags = len / MT_TXP_MAX_LEN;
2513 head_len = nfrags ? MT_TXP_MAX_LEN : len;
2514
2515- if (len > IEEE80211_MAX_FRAME_LEN)
2516+ if (len > IEEE80211_MAX_FRAME_LEN ||
2517+ td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
2518 fc |= IEEE80211_STYPE_QOS_DATA;
2519
2520 head = alloc_skb(head_len, GFP_KERNEL);
developer15c355d2023-03-21 17:28:34 +08002521@@ -127,9 +187,9 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer6caa5e22022-06-16 13:33:13 +08002522
developer072c5612022-07-15 18:30:03 +08002523 hdr = __skb_put_zero(head, sizeof(*hdr));
developer6caa5e22022-06-16 13:33:13 +08002524 hdr->frame_control = cpu_to_le16(fc);
2525- memcpy(hdr->addr1, td->addr[0], ETH_ALEN);
2526- memcpy(hdr->addr2, td->addr[1], ETH_ALEN);
2527- memcpy(hdr->addr3, td->addr[2], ETH_ALEN);
2528+ memcpy(hdr->addr1, addr[0], ETH_ALEN);
2529+ memcpy(hdr->addr2, addr[1], ETH_ALEN);
2530+ memcpy(hdr->addr3, addr[2], ETH_ALEN);
2531 skb_set_queue_mapping(head, IEEE80211_AC_BE);
developer072c5612022-07-15 18:30:03 +08002532 get_random_bytes(__skb_put(head, head_len - sizeof(*hdr)),
2533 head_len - sizeof(*hdr));
developer15c355d2023-03-21 17:28:34 +08002534@@ -153,7 +213,7 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer6caa5e22022-06-16 13:33:13 +08002535
2536 frag = alloc_skb(frag_len, GFP_KERNEL);
2537 if (!frag) {
2538- mt76_testmode_free_skb(phy);
2539+ mt76_testmode_free_skb(tx_skb);
2540 dev_kfree_skb(head);
2541 return -ENOMEM;
2542 }
developer15c355d2023-03-21 17:28:34 +08002543@@ -166,15 +226,14 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
developer6caa5e22022-06-16 13:33:13 +08002544 frag_tail = &(*frag_tail)->next;
2545 }
2546
2547- mt76_testmode_free_skb(phy);
2548- td->tx_skb = head;
2549+ mt76_testmode_free_skb(tx_skb);
2550+ *tx_skb = head;
2551
2552 return 0;
2553 }
2554-EXPORT_SYMBOL(mt76_testmode_alloc_skb);
2555
2556-static int
2557-mt76_testmode_tx_init(struct mt76_phy *phy)
2558+int mt76_testmode_init_skb(struct mt76_phy *phy, u32 len,
2559+ struct sk_buff **tx_skb, u8 (*addr)[ETH_ALEN])
2560 {
2561 struct mt76_testmode_data *td = &phy->test;
2562 struct ieee80211_tx_info *info;
developer15c355d2023-03-21 17:28:34 +08002563@@ -182,7 +241,7 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
developer6caa5e22022-06-16 13:33:13 +08002564 u8 max_nss = hweight8(phy->antenna_mask);
2565 int ret;
2566
2567- ret = mt76_testmode_alloc_skb(phy, td->tx_mpdu_len);
2568+ ret = mt76_testmode_alloc_skb(phy, len, tx_skb, addr);
2569 if (ret)
2570 return ret;
2571
developer15c355d2023-03-21 17:28:34 +08002572@@ -192,7 +251,7 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
developer6caa5e22022-06-16 13:33:13 +08002573 if (td->tx_antenna_mask)
2574 max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask));
2575
2576- info = IEEE80211_SKB_CB(td->tx_skb);
2577+ info = IEEE80211_SKB_CB(*tx_skb);
2578 rate = &info->control.rates[0];
2579 rate->count = 1;
2580 rate->idx = td->tx_rate_idx;
developer15c355d2023-03-21 17:28:34 +08002581@@ -264,6 +323,25 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
developer6caa5e22022-06-16 13:33:13 +08002582 out:
2583 return 0;
2584 }
2585+EXPORT_SYMBOL(mt76_testmode_init_skb);
2586+
2587+static int
2588+mt76_testmode_tx_init(struct mt76_phy *phy)
2589+{
2590+ struct mt76_testmode_entry_data *ed;
2591+ struct mt76_wcid *wcid;
2592+
2593+ mt76_tm_for_each_entry(phy, wcid, ed) {
2594+ int ret;
2595+
2596+ ret = mt76_testmode_init_skb(phy, ed->tx_mpdu_len,
2597+ &ed->tx_skb, ed->addr);
2598+ if (ret)
2599+ return ret;
2600+ }
2601+
2602+ return 0;
2603+}
2604
2605 static void
2606 mt76_testmode_tx_start(struct mt76_phy *phy)
developer15c355d2023-03-21 17:28:34 +08002607@@ -274,6 +352,14 @@ mt76_testmode_tx_start(struct mt76_phy *phy)
developer6caa5e22022-06-16 13:33:13 +08002608 td->tx_queued = 0;
2609 td->tx_done = 0;
2610 td->tx_pending = td->tx_count;
2611+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
2612+ td->tx_pending = 1;
2613+ if (td->entry_num) {
2614+ td->tx_pending *= td->entry_num;
2615+ td->cur_entry = list_first_entry(&td->tm_entry_list,
2616+ struct mt76_wcid, list);
2617+ }
2618+
2619 mt76_worker_schedule(&dev->tx_worker);
2620 }
2621
developer15c355d2023-03-21 17:28:34 +08002622@@ -292,7 +378,7 @@ mt76_testmode_tx_stop(struct mt76_phy *phy)
developer6caa5e22022-06-16 13:33:13 +08002623 wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued,
2624 MT76_TM_TIMEOUT * HZ);
2625
2626- mt76_testmode_free_skb(phy);
2627+ mt76_testmode_free_skb_all(phy);
2628 }
2629
2630 static inline void
developer15c355d2023-03-21 17:28:34 +08002631@@ -323,6 +409,8 @@ mt76_testmode_init_defaults(struct mt76_phy *phy)
developer6caa5e22022-06-16 13:33:13 +08002632 memcpy(td->addr[0], phy->macaddr, ETH_ALEN);
2633 memcpy(td->addr[1], phy->macaddr, ETH_ALEN);
2634 memcpy(td->addr[2], phy->macaddr, ETH_ALEN);
2635+
2636+ INIT_LIST_HEAD(&phy->test.tm_entry_list);
2637 }
2638
2639 static int
developer15c355d2023-03-21 17:28:34 +08002640@@ -332,8 +420,12 @@ __mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state)
developer6caa5e22022-06-16 13:33:13 +08002641 struct mt76_dev *dev = phy->dev;
2642 int err;
2643
2644- if (prev_state == MT76_TM_STATE_TX_FRAMES)
2645+ if (prev_state == MT76_TM_STATE_TX_FRAMES) {
2646+ /* MU needs to clean hwq for free done event */
2647+ if (phy->test.tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
2648+ dev->test_ops->set_state(phy, MT76_TM_STATE_IDLE);
2649 mt76_testmode_tx_stop(phy);
2650+ }
2651
2652 if (state == MT76_TM_STATE_TX_FRAMES) {
2653 err = mt76_testmode_tx_init(phy);
developer15c355d2023-03-21 17:28:34 +08002654@@ -403,6 +495,44 @@ mt76_tm_get_u8(struct nlattr *attr, u8 *dest, u8 min, u8 max)
developer6caa5e22022-06-16 13:33:13 +08002655 return 0;
2656 }
2657
2658+static int
2659+mt76_testmode_set_eeprom(struct mt76_phy *phy, struct nlattr **tb)
2660+{
2661+ struct mt76_dev *dev = phy->dev;
2662+ u8 action, val[MT76_TM_EEPROM_BLOCK_SIZE];
2663+ u32 offset = 0;
2664+ int err = -EINVAL;
2665+
2666+ if (!dev->test_ops->set_eeprom)
2667+ return -EOPNOTSUPP;
2668+
2669+ if (mt76_tm_get_u8(tb[MT76_TM_ATTR_EEPROM_ACTION], &action,
2670+ 0, MT76_TM_EEPROM_ACTION_MAX))
2671+ goto out;
2672+
2673+ if (tb[MT76_TM_ATTR_EEPROM_OFFSET]) {
2674+ struct nlattr *cur;
2675+ int rem, idx = 0;
2676+
2677+ offset = nla_get_u32(tb[MT76_TM_ATTR_EEPROM_OFFSET]);
2678+ if (!!(offset % MT76_TM_EEPROM_BLOCK_SIZE) ||
2679+ !tb[MT76_TM_ATTR_EEPROM_VAL])
2680+ goto out;
2681+
2682+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_EEPROM_VAL], rem) {
2683+ if (nla_len(cur) != 1 || idx >= ARRAY_SIZE(val))
2684+ goto out;
2685+
2686+ val[idx++] = nla_get_u8(cur);
2687+ }
2688+ }
2689+
2690+ err = dev->test_ops->set_eeprom(phy, offset, val, action);
2691+
2692+out:
2693+ return err;
2694+}
2695+
2696 int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2697 void *data, int len)
2698 {
developer15c355d2023-03-21 17:28:34 +08002699@@ -426,6 +556,11 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer6caa5e22022-06-16 13:33:13 +08002700
2701 mutex_lock(&dev->mutex);
2702
2703+ if (tb[MT76_TM_ATTR_EEPROM_ACTION]) {
2704+ err = mt76_testmode_set_eeprom(phy, tb);
2705+ goto out;
2706+ }
2707+
2708 if (tb[MT76_TM_ATTR_RESET]) {
2709 mt76_testmode_set_state(phy, MT76_TM_STATE_OFF);
2710 memset(td, 0, sizeof(*td));
developer15c355d2023-03-21 17:28:34 +08002711@@ -452,7 +587,10 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer6caa5e22022-06-16 13:33:13 +08002712 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_DUTY_CYCLE],
2713 &td->tx_duty_cycle, 0, 99) ||
2714 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_POWER_CONTROL],
2715- &td->tx_power_control, 0, 1))
2716+ &td->tx_power_control, 0, 1) ||
2717+ mt76_tm_get_u8(tb[MT76_TM_ATTR_AID], &td->aid, 0, 16) ||
2718+ mt76_tm_get_u8(tb[MT76_TM_ATTR_RU_ALLOC], &td->ru_alloc, 0, 0xff) ||
2719+ mt76_tm_get_u8(tb[MT76_TM_ATTR_RU_IDX], &td->ru_idx, 0, 68))
2720 goto out;
2721
2722 if (tb[MT76_TM_ATTR_TX_LENGTH]) {
developer15c355d2023-03-21 17:28:34 +08002723@@ -484,8 +622,7 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer6caa5e22022-06-16 13:33:13 +08002724
2725 if (tb[MT76_TM_ATTR_TX_POWER]) {
2726 struct nlattr *cur;
2727- int idx = 0;
2728- int rem;
2729+ int rem, idx = 0;
2730
2731 nla_for_each_nested(cur, tb[MT76_TM_ATTR_TX_POWER], rem) {
2732 if (nla_len(cur) != 1 ||
developer15c355d2023-03-21 17:28:34 +08002733@@ -505,11 +642,45 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
developer6caa5e22022-06-16 13:33:13 +08002734 if (nla_len(cur) != ETH_ALEN || idx >= 3)
2735 goto out;
2736
2737- memcpy(td->addr[idx], nla_data(cur), ETH_ALEN);
2738+ memcpy(td->addr[idx++], nla_data(cur), ETH_ALEN);
2739+ }
2740+ }
2741+
2742+ if (tb[MT76_TM_ATTR_CFG]) {
2743+ struct nlattr *cur;
2744+ int rem, idx = 0;
2745+
2746+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_CFG], rem) {
2747+ if (nla_len(cur) != 1 || idx >= 2)
2748+ goto out;
2749+
2750+ if (idx == 0)
2751+ td->cfg.type = nla_get_u8(cur);
2752+ else
2753+ td->cfg.enable = nla_get_u8(cur);
2754 idx++;
2755 }
2756 }
2757
2758+ if (tb[MT76_TM_ATTR_TXBF_ACT]) {
2759+ struct nlattr *cur;
2760+ int rem, idx = 0;
2761+
2762+ if (!tb[MT76_TM_ATTR_TXBF_PARAM] ||
2763+ mt76_tm_get_u8(tb[MT76_TM_ATTR_TXBF_ACT], &td->txbf_act,
developer692ed9b2023-06-19 12:03:50 +08002764+ 0, MT76_TM_TXBF_ACT_MAX))
developer6caa5e22022-06-16 13:33:13 +08002765+ goto out;
2766+
2767+ memset(td->txbf_param, 0, sizeof(td->txbf_param));
2768+ nla_for_each_nested(cur, tb[MT76_TM_ATTR_TXBF_PARAM], rem) {
2769+ if (nla_len(cur) != 2 ||
2770+ idx >= ARRAY_SIZE(td->txbf_param))
2771+ goto out;
2772+
2773+ td->txbf_param[idx++] = nla_get_u16(cur);
2774+ }
2775+ }
2776+
2777 if (dev->test_ops->set_params) {
2778 err = dev->test_ops->set_params(phy, tb, state);
2779 if (err)
developer15c355d2023-03-21 17:28:34 +08002780@@ -574,6 +745,7 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer6caa5e22022-06-16 13:33:13 +08002781 struct mt76_phy *phy = hw->priv;
2782 struct mt76_dev *dev = phy->dev;
2783 struct mt76_testmode_data *td = &phy->test;
2784+ struct mt76_testmode_entry_data *ed = &td->ed;
2785 struct nlattr *tb[NUM_MT76_TM_ATTRS] = {};
2786 int err = 0;
2787 void *a;
developer15c355d2023-03-21 17:28:34 +08002788@@ -606,6 +778,19 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer6caa5e22022-06-16 13:33:13 +08002789 goto out;
2790 }
2791
2792+ if (tb[MT76_TM_ATTR_AID]) {
2793+ struct mt76_wcid *wcid;
2794+ u8 aid;
2795+
2796+ err = mt76_tm_get_u8(tb[MT76_TM_ATTR_AID], &aid, 1, 16);
2797+ if (err)
2798+ goto out;
2799+
2800+ mt76_tm_for_each_entry(phy, wcid, ed)
2801+ if (ed->aid == aid)
2802+ ed = mt76_testmode_entry_data(phy, wcid);
2803+ }
2804+
2805 mt76_testmode_init_defaults(phy);
2806
2807 err = -EMSGSIZE;
developer15c355d2023-03-21 17:28:34 +08002808@@ -618,12 +803,8 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
developer6caa5e22022-06-16 13:33:13 +08002809 goto out;
2810
2811 if (nla_put_u32(msg, MT76_TM_ATTR_TX_COUNT, td->tx_count) ||
2812- nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_mpdu_len) ||
2813 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_MODE, td->tx_rate_mode) ||
2814- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, td->tx_rate_nss) ||
2815- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, td->tx_rate_idx) ||
2816 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_SGI, td->tx_rate_sgi) ||
2817- nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, td->tx_rate_ldpc) ||
2818 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_STBC, td->tx_rate_stbc) ||
2819 (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_LTF) &&
2820 nla_put_u8(msg, MT76_TM_ATTR_TX_LTF, td->tx_ltf)) ||
developer692ed9b2023-06-19 12:03:50 +08002821@@ -640,7 +821,16 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
2822 (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER_CONTROL) &&
2823 nla_put_u8(msg, MT76_TM_ATTR_TX_POWER_CONTROL, td->tx_power_control)) ||
2824 (mt76_testmode_param_present(td, MT76_TM_ATTR_FREQ_OFFSET) &&
2825- nla_put_u8(msg, MT76_TM_ATTR_FREQ_OFFSET, td->freq_offset)))
2826+ nla_put_u32(msg, MT76_TM_ATTR_FREQ_OFFSET, td->freq_offset)))
2827+ goto out;
2828+
developer6caa5e22022-06-16 13:33:13 +08002829+ if (nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, ed->tx_mpdu_len) ||
2830+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, ed->tx_rate_nss) ||
2831+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, ed->tx_rate_idx) ||
2832+ nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, ed->tx_rate_ldpc) ||
2833+ nla_put_u8(msg, MT76_TM_ATTR_AID, ed->aid) ||
2834+ nla_put_u8(msg, MT76_TM_ATTR_RU_ALLOC, ed->ru_alloc) ||
2835+ nla_put_u8(msg, MT76_TM_ATTR_RU_IDX, ed->ru_idx))
developer692ed9b2023-06-19 12:03:50 +08002836 goto out;
2837
developer6caa5e22022-06-16 13:33:13 +08002838 if (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER)) {
developer6caa5e22022-06-16 13:33:13 +08002839diff --git a/testmode.h b/testmode.h
developer692ed9b2023-06-19 12:03:50 +08002840index 8961326..8c55fa0 100644
developer6caa5e22022-06-16 13:33:13 +08002841--- a/testmode.h
2842+++ b/testmode.h
2843@@ -6,6 +6,8 @@
2844 #define __MT76_TESTMODE_H
2845
2846 #define MT76_TM_TIMEOUT 10
2847+#define MT76_TM_MAX_ENTRY_NUM 16
2848+#define MT76_TM_EEPROM_BLOCK_SIZE 16
2849
2850 /**
2851 * enum mt76_testmode_attr - testmode attributes inside NL80211_ATTR_TESTDATA
2852@@ -47,6 +49,15 @@
2853 * @MT76_TM_ATTR_DRV_DATA: driver specific netlink attrs (nested)
2854 *
2855 * @MT76_TM_ATTR_MAC_ADDRS: array of nested MAC addresses (nested)
2856+ *
2857+ * @MT76_TM_ATTR_EEPROM_ACTION: eeprom setting actions
developer692ed9b2023-06-19 12:03:50 +08002858+ * (u8, see &enum mt76_testmode_eeprom_action)
developer6caa5e22022-06-16 13:33:13 +08002859+ * @MT76_TM_ATTR_EEPROM_OFFSET: offset of eeprom data block for writing (u32)
2860+ * @MT76_TM_ATTR_EEPROM_VAL: values for writing into a 16-byte data block
developer692ed9b2023-06-19 12:03:50 +08002861+ * (nested, u8 attrs)
developer6caa5e22022-06-16 13:33:13 +08002862+ *
2863+ * @MT76_TM_ATTR_CFG: config testmode rf feature (nested, see &mt76_testmode_cfg)
2864+ *
2865 */
2866 enum mt76_testmode_attr {
2867 MT76_TM_ATTR_UNSPEC,
2868@@ -84,6 +95,17 @@ enum mt76_testmode_attr {
2869 MT76_TM_ATTR_DRV_DATA,
2870
2871 MT76_TM_ATTR_MAC_ADDRS,
2872+ MT76_TM_ATTR_AID,
2873+ MT76_TM_ATTR_RU_ALLOC,
2874+ MT76_TM_ATTR_RU_IDX,
2875+
2876+ MT76_TM_ATTR_EEPROM_ACTION,
2877+ MT76_TM_ATTR_EEPROM_OFFSET,
2878+ MT76_TM_ATTR_EEPROM_VAL,
2879+
2880+ MT76_TM_ATTR_CFG,
2881+ MT76_TM_ATTR_TXBF_ACT,
2882+ MT76_TM_ATTR_TXBF_PARAM,
2883
2884 /* keep last */
2885 NUM_MT76_TM_ATTRS,
2886@@ -198,4 +220,57 @@ enum mt76_testmode_tx_mode {
2887
2888 extern const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS];
2889
2890+/**
2891+ * enum mt76_testmode_eeprom_action - eeprom setting actions
2892+ *
2893+ * @MT76_TM_EEPROM_ACTION_UPDATE_DATA: update rf values to specific
developer692ed9b2023-06-19 12:03:50 +08002894+ * eeprom data block
developer6caa5e22022-06-16 13:33:13 +08002895+ * @MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE: send updated eeprom data to fw
2896+ * @MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE: write eeprom data back to efuse
2897+ */
2898+enum mt76_testmode_eeprom_action {
2899+ MT76_TM_EEPROM_ACTION_UPDATE_DATA,
2900+ MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE,
2901+ MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE,
2902+
2903+ /* keep last */
2904+ NUM_MT76_TM_EEPROM_ACTION,
2905+ MT76_TM_EEPROM_ACTION_MAX = NUM_MT76_TM_EEPROM_ACTION - 1,
2906+};
2907+
2908+/**
2909+ * enum mt76_testmode_cfg - packet tx phy mode
2910+ *
2911+ * @MT76_TM_EEPROM_ACTION_UPDATE_DATA: update rf values to specific
developer692ed9b2023-06-19 12:03:50 +08002912+ * eeprom data block
developer6caa5e22022-06-16 13:33:13 +08002913+ * @MT76_TM_EEPROM_ACTION_UPDATE_BUFFER_MODE: send updated eeprom data to fw
2914+ * @MT76_TM_EEPROM_ACTION_WRITE_TO_EFUSE: write eeprom data back to efuse
2915+ */
2916+enum mt76_testmode_cfg {
2917+ MT76_TM_CFG_TSSI,
2918+ MT76_TM_CFG_DPD,
2919+ MT76_TM_CFG_RATE_POWER_OFFSET,
2920+ MT76_TM_CFG_THERMAL_COMP,
2921+
2922+ /* keep last */
2923+ NUM_MT76_TM_CFG,
2924+ MT76_TM_CFG_MAX = NUM_MT76_TM_CFG - 1,
2925+};
2926+
2927+enum mt76_testmode_txbf_act {
2928+ MT76_TM_TXBF_ACT_INIT,
2929+ MT76_TM_TXBF_ACT_UPDATE_CH,
2930+ MT76_TM_TXBF_ACT_PHASE_COMP,
2931+ MT76_TM_TXBF_ACT_TX_PREP,
2932+ MT76_TM_TXBF_ACT_IBF_PROF_UPDATE,
2933+ MT76_TM_TXBF_ACT_EBF_PROF_UPDATE,
2934+ MT76_TM_TXBF_ACT_PHASE_CAL,
2935+ MT76_TM_TXBF_ACT_PROF_UPDATE_ALL,
2936+ MT76_TM_TXBF_ACT_E2P_UPDATE,
2937+
2938+ /* keep last */
2939+ NUM_MT76_TM_TXBF_ACT,
2940+ MT76_TM_TXBF_ACT_MAX = NUM_MT76_TM_TXBF_ACT - 1,
2941+};
2942+
2943 #endif
2944diff --git a/tools/fields.c b/tools/fields.c
developerc9333e12023-04-06 18:07:42 +08002945index e3f6908..7e564a3 100644
developer6caa5e22022-06-16 13:33:13 +08002946--- a/tools/fields.c
2947+++ b/tools/fields.c
2948@@ -10,6 +10,7 @@ static const char * const testmode_state[] = {
2949 [MT76_TM_STATE_IDLE] = "idle",
2950 [MT76_TM_STATE_TX_FRAMES] = "tx_frames",
2951 [MT76_TM_STATE_RX_FRAMES] = "rx_frames",
2952+ [MT76_TM_STATE_TX_CONT] = "tx_cont",
2953 };
2954
2955 static const char * const testmode_tx_mode[] = {
developer15c355d2023-03-21 17:28:34 +08002956@@ -86,12 +87,12 @@ static void print_s32(const struct tm_field *field, struct nlattr *attr)
2957
2958 static void print_u32(const struct tm_field *field, struct nlattr *attr)
2959 {
2960- printf("%d", nla_get_u32(attr));
2961+ printf("%u", nla_get_u32(attr));
2962 }
2963
2964 static void print_u64(const struct tm_field *field, struct nlattr *attr)
2965 {
2966- printf("%lld", (unsigned long long)nla_get_u64(attr));
2967+ printf("%llu", (unsigned long long)nla_get_u64(attr));
2968 }
2969
2970 static bool parse_flag(const struct tm_field *field, int idx,
developer6caa5e22022-06-16 13:33:13 +08002971@@ -201,6 +202,63 @@ static void print_extra_stats(const struct tm_field *field, struct nlattr **tb)
2972 printf("%srx_per=%.02f%%\n", prefix, 100 * failed / total);
2973 }
2974
2975+static bool parse_mac(const struct tm_field *field, int idx,
2976+ struct nl_msg *msg, const char *val)
2977+{
2978+#define ETH_ALEN 6
2979+ bool ret = true;
2980+ char *str, *cur, *ap;
2981+ void *a;
2982+
2983+ ap = str = strdup(val);
2984+
2985+ a = nla_nest_start(msg, idx);
2986+
2987+ idx = 0;
2988+ while ((cur = strsep(&ap, ",")) != NULL) {
2989+ unsigned char addr[ETH_ALEN];
2990+ char *val, *tmp = cur;
2991+ int i = 0;
2992+
2993+ while ((val = strsep(&tmp, ":")) != NULL) {
2994+ if (i >= ETH_ALEN)
2995+ break;
2996+
2997+ addr[i++] = strtoul(val, NULL, 16);
2998+ }
2999+
3000+ nla_put(msg, idx, ETH_ALEN, addr);
3001+
3002+ idx++;
3003+ }
3004+
3005+ nla_nest_end(msg, a);
3006+
3007+ free(str);
3008+
3009+ return ret;
3010+}
3011+
3012+static void print_mac(const struct tm_field *field, struct nlattr *attr)
3013+{
3014+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
3015+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
3016+ unsigned char addr[3][6];
3017+ struct nlattr *cur;
3018+ int idx = 0;
3019+ int rem;
3020+
3021+ nla_for_each_nested(cur, attr, rem) {
3022+ if (nla_len(cur) != 6)
3023+ continue;
3024+ memcpy(addr[idx++], nla_data(cur), 6);
3025+ }
3026+
3027+ printf("" MACSTR "," MACSTR "," MACSTR "",
3028+ MAC2STR(addr[0]), MAC2STR(addr[1]), MAC2STR(addr[2]));
3029+
3030+ return;
3031+}
3032
3033 #define FIELD_GENERIC(_field, _name, ...) \
3034 [FIELD_NAME(_field)] = { \
3035@@ -250,6 +308,13 @@ static void print_extra_stats(const struct tm_field *field, struct nlattr **tb)
3036 ##__VA_ARGS__ \
3037 )
3038
3039+#define FIELD_MAC(_field, _name) \
3040+ [FIELD_NAME(_field)] = { \
3041+ .name = _name, \
3042+ .parse = parse_mac, \
3043+ .print = print_mac \
3044+ }
3045+
3046 #define FIELD_NAME(_field) MT76_TM_RX_ATTR_##_field
3047 static const struct tm_field rx_fields[NUM_MT76_TM_RX_ATTRS] = {
3048 FIELD_RO(s32, FREQ_OFFSET, "freq_offset"),
3049@@ -300,10 +365,18 @@ static const struct tm_field testdata_fields[NUM_MT76_TM_ATTRS] = {
3050 FIELD(u8, TX_RATE_LDPC, "tx_rate_ldpc"),
3051 FIELD(u8, TX_RATE_STBC, "tx_rate_stbc"),
3052 FIELD(u8, TX_LTF, "tx_ltf"),
3053+ FIELD(u8, TX_DUTY_CYCLE, "tx_duty_cycle"),
3054+ FIELD(u32, TX_IPG, "tx_ipg"),
3055+ FIELD(u32, TX_TIME, "tx_time"),
3056 FIELD(u8, TX_POWER_CONTROL, "tx_power_control"),
3057 FIELD_ARRAY(u8, TX_POWER, "tx_power"),
3058 FIELD(u8, TX_ANTENNA, "tx_antenna"),
3059+ FIELD(u8, TX_SPE_IDX, "tx_spe_idx"),
3060 FIELD(u32, FREQ_OFFSET, "freq_offset"),
3061+ FIELD(u8, AID, "aid"),
3062+ FIELD(u8, RU_ALLOC, "ru_alloc"),
3063+ FIELD(u8, RU_IDX, "ru_idx"),
3064+ FIELD_MAC(MAC_ADDRS, "mac_addrs"),
3065 FIELD_NESTED_RO(STATS, stats, "",
3066 .print_extra = print_extra_stats),
3067 };
3068@@ -322,9 +395,16 @@ static struct nla_policy testdata_policy[NUM_MT76_TM_ATTRS] = {
3069 [MT76_TM_ATTR_TX_RATE_LDPC] = { .type = NLA_U8 },
3070 [MT76_TM_ATTR_TX_RATE_STBC] = { .type = NLA_U8 },
3071 [MT76_TM_ATTR_TX_LTF] = { .type = NLA_U8 },
3072+ [MT76_TM_ATTR_TX_DUTY_CYCLE] = { .type = NLA_U8 },
3073+ [MT76_TM_ATTR_TX_IPG] = { .type = NLA_U32 },
3074+ [MT76_TM_ATTR_TX_TIME] = { .type = NLA_U32 },
3075 [MT76_TM_ATTR_TX_POWER_CONTROL] = { .type = NLA_U8 },
3076 [MT76_TM_ATTR_TX_ANTENNA] = { .type = NLA_U8 },
3077+ [MT76_TM_ATTR_TX_SPE_IDX] = { .type = NLA_U8 },
3078 [MT76_TM_ATTR_FREQ_OFFSET] = { .type = NLA_U32 },
3079+ [MT76_TM_ATTR_AID] = { .type = NLA_U8 },
3080+ [MT76_TM_ATTR_RU_ALLOC] = { .type = NLA_U8 },
3081+ [MT76_TM_ATTR_RU_IDX] = { .type = NLA_U8 },
3082 [MT76_TM_ATTR_STATS] = { .type = NLA_NESTED },
3083 };
3084
3085diff --git a/tx.c b/tx.c
developer7af0f762023-05-22 15:16:16 +08003086index 72b3ec7..94f0d82 100644
developer6caa5e22022-06-16 13:33:13 +08003087--- a/tx.c
3088+++ b/tx.c
developer7af0f762023-05-22 15:16:16 +08003089@@ -252,8 +252,7 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *
developer6caa5e22022-06-16 13:33:13 +08003090 if (mt76_is_testmode_skb(dev, skb, &hw)) {
3091 struct mt76_phy *phy = hw->priv;
3092
3093- if (skb == phy->test.tx_skb)
3094- phy->test.tx_done++;
3095+ phy->test.tx_done++;
3096 if (phy->test.tx_queued == phy->test.tx_done)
3097 wake_up(&dev->tx_wait);
3098
3099--
developer7af0f762023-05-22 15:16:16 +080031002.18.0
developer6caa5e22022-06-16 13:33:13 +08003101