blob: e92177ae6f0898c41b54f1797a39ea1456e751b5 [file] [log] [blame]
developerf11ee162022-04-12 11:17:45 +08001From 19e0036562d574c6ffe6a47790dbfa953b35050c Mon Sep 17 00:00:00 2001
2From: Shayne Chen <shayne.chen@mediatek.com>
3Date: Tue, 11 May 2021 15:17:31 +0800
4Subject: [PATCH 1109/1112] mt76: testmode: add support to queue skb of
5 multiple stations
6
7Rework queue skb flow to support sending packet for multiple virtual
8stations.
9
10Signed-off-by: Shayne Chen <shayne.chen@mediatek.com>
11---
12 drivers/net/wireless/mediatek/mt76/mt76.h | 1 +
13 drivers/net/wireless/mediatek/mt76/testmode.c | 70 ++++++++++++++++---
14 2 files changed, 63 insertions(+), 8 deletions(-)
15
16diff --git a/mt76.h b/mt76.h
17index b5f1367..4b502c6 100644
18--- a/mt76.h
19+++ b/mt76.h
20@@ -642,6 +642,7 @@ struct mt76_testmode_data {
21 u8 off_ch_scan_path;
22
23 struct mt76_wcid *tm_wcid[MT76_TM_MAX_STA_NUM + 1];
24+ u8 cur_aid;
25 u16 tm_sta_mask;
26 union {
27 struct mt76_testmode_sta_data sd;
28diff --git a/testmode.c b/testmode.c
29index 0f93338..9da490c 100644
30--- a/testmode.c
31+++ b/testmode.c
32@@ -25,18 +25,18 @@ const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
33 };
34 EXPORT_SYMBOL_GPL(mt76_tm_policy);
35
36-void mt76_testmode_tx_pending(struct mt76_phy *phy)
37+static u16
38+mt76_testmode_queue_tx(struct mt76_phy *phy, struct mt76_wcid *wcid,
39+ struct sk_buff *skb, u32 limit)
40 {
41 struct mt76_testmode_data *td = &phy->test;
42 struct mt76_dev *dev = phy->dev;
43- struct mt76_wcid *wcid = &dev->global_wcid;
44- struct sk_buff *skb = td->tx_skb;
45 struct mt76_queue *q;
46- u16 tx_queued_limit;
47+ u16 tx_queued_limit, count = 0;
48 int qid;
49
50- if (!skb || !td->tx_pending)
51- return;
52+ if (!skb)
53+ return 0;
54
55 qid = skb_get_queue_mapping(skb);
56 q = phy->q_tx[qid];
57@@ -45,7 +45,7 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy)
58
59 spin_lock_bh(&q->lock);
60
61- while (td->tx_pending > 0 &&
62+ while (count < limit &&
63 td->tx_queued - td->tx_done < tx_queued_limit &&
64 q->queued < q->ndesc / 2) {
65 int ret;
66@@ -55,13 +55,56 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy)
67 if (ret < 0)
68 break;
69
70- td->tx_pending--;
71 td->tx_queued++;
72+ count++;
73 }
74
75 dev->queue_ops->kick(dev, q);
76
77 spin_unlock_bh(&q->lock);
78+
79+ return count;
80+}
81+
82+void mt76_testmode_tx_pending(struct mt76_phy *phy)
83+{
84+ struct mt76_testmode_data *td = &phy->test;
85+ u16 count;
86+
87+ if (!td->tx_pending)
88+ return;
89+
90+ if (!mt76_testmode_has_sta(phy)) {
91+ count = mt76_testmode_queue_tx(phy, &phy->dev->global_wcid,
92+ td->tx_skb, td->tx_pending);
93+ td->tx_pending -= count;
94+
95+ return;
96+ }
97+
98+ while (true) {
99+ struct mt76_testmode_sta *tm_sta;
100+ struct mt76_wcid *wcid;
101+ u32 limit, per_sta_cnt = 1;
102+
103+ if (td->tx_rate_mode != MT76_TM_TX_MODE_HE_MU)
104+ per_sta_cnt = td->tx_count / hweight16(phy->test.tm_sta_mask);
105+
106+ limit = td->tx_pending % per_sta_cnt;
107+ if (limit == 0)
108+ limit = per_sta_cnt;
109+
110+ tm_sta = mt76_testmode_aid_get_sta(phy, td->cur_aid);
111+ wcid = td->tm_wcid[td->cur_aid];
112+ count = mt76_testmode_queue_tx(phy, wcid, tm_sta->tx_skb, limit);
113+
114+ td->tx_pending -= count;
115+
116+ if (td->tx_pending && (td->tx_pending % per_sta_cnt == 0))
117+ td->cur_aid = ffs(td->tm_sta_mask >> td->cur_aid) + td->cur_aid;
118+ else
119+ break;
120+ }
121 }
122
123 static u32
124@@ -318,6 +361,17 @@ mt76_testmode_tx_start(struct mt76_phy *phy)
125 td->tx_queued = 0;
126 td->tx_done = 0;
127 td->tx_pending = td->tx_count;
128+
129+ if (mt76_testmode_has_sta(phy)) {
130+ td->cur_aid = ffs(td->tm_sta_mask);
131+
132+ /* The actual tx count of MU packets will be pass to FW
133+ * by a mcu command in testmode.
134+ */
135+ if (td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU)
136+ td->tx_pending = hweight16(phy->test.tm_sta_mask);
137+ }
138+
139 mt76_worker_schedule(&dev->tx_worker);
140 }
141
142--
1432.25.1
144