blob: 91507f4a0d32cc0f2bb51031f04f7e12213fdfa2 [file] [log] [blame]
developerc83f1ac2023-10-23 10:30:11 +08001From 20d498a3d24a4c20a421bc6bc19a058620aa86ec Mon Sep 17 00:00:00 2001
2From: Peter Chiu <chui-hao.chiu@mediatek.com>
3Date: Mon, 23 Oct 2023 10:26:01 +0800
4Subject: [PATCH 2015/2015] Revert "wifi: mt76: fix race condition related to
5 checking tx queue fill status"
6
7This reverts commit f1e1e67d97d1e9a8bb01b59ab20c45ebc985a958.
8---
9 mac80211.c | 27 --------------
10 mt76.h | 5 ---
11 tx.c | 108 ++++++++++-------------------------------------------
12 3 files changed, 20 insertions(+), 120 deletions(-)
13
14diff --git a/mac80211.c b/mac80211.c
15index 37e6a627..923c6a31 100644
16--- a/mac80211.c
17+++ b/mac80211.c
18@@ -427,9 +427,6 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
19 struct mt76_dev *dev = phy->dev;
20 struct wiphy *wiphy = hw->wiphy;
21
22- INIT_LIST_HEAD(&phy->tx_list);
23- spin_lock_init(&phy->tx_lock);
24-
25 SET_IEEE80211_DEV(hw, dev->dev);
26 SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
27
28@@ -662,7 +659,6 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
29 int ret;
30
31 dev_set_drvdata(dev->dev, dev);
32- mt76_wcid_init(&dev->global_wcid);
33 ret = mt76_phy_init(phy, hw);
34 if (ret)
35 return ret;
36@@ -718,7 +714,6 @@ void mt76_unregister_device(struct mt76_dev *dev)
37 if (IS_ENABLED(CONFIG_MT76_LEDS))
38 mt76_led_cleanup(&dev->phy);
39 mt76_tx_status_check(dev, true);
40- mt76_wcid_cleanup(dev, &dev->global_wcid);
41 ieee80211_unregister_hw(hw);
42 }
43 EXPORT_SYMBOL_GPL(mt76_unregister_device);
44@@ -1460,9 +1455,6 @@ EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
45
46 void mt76_wcid_init(struct mt76_wcid *wcid)
47 {
48- INIT_LIST_HEAD(&wcid->tx_list);
49- skb_queue_head_init(&wcid->tx_pending);
50-
51 INIT_LIST_HEAD(&wcid->list);
52 idr_init(&wcid->pktid);
53 }
54@@ -1470,32 +1462,13 @@ EXPORT_SYMBOL_GPL(mt76_wcid_init);
55
56 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
57 {
58- struct mt76_phy *phy = dev->phys[wcid->phy_idx];
59- struct ieee80211_hw *hw;
60 struct sk_buff_head list;
61- struct sk_buff *skb;
62
63 mt76_tx_status_lock(dev, &list);
64 mt76_tx_status_skb_get(dev, wcid, -1, &list);
65 mt76_tx_status_unlock(dev, &list);
66
67 idr_destroy(&wcid->pktid);
68-
69- spin_lock_bh(&phy->tx_lock);
70-
71- if (!list_empty(&wcid->tx_list))
72- list_del_init(&wcid->tx_list);
73-
74- spin_lock(&wcid->tx_pending.lock);
75- skb_queue_splice_tail_init(&wcid->tx_pending, &list);
76- spin_unlock(&wcid->tx_pending.lock);
77-
78- spin_unlock_bh(&phy->tx_lock);
79-
80- while ((skb = __skb_dequeue(&list)) != NULL) {
81- hw = mt76_tx_status_get_hw(dev, skb);
82- ieee80211_free_txskb(hw, skb);
83- }
84 }
85 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
86
87diff --git a/mt76.h b/mt76.h
88index 3ff348f2..5eb571ef 100644
89--- a/mt76.h
90+++ b/mt76.h
91@@ -354,9 +354,6 @@ struct mt76_wcid {
92 u32 tx_info;
93 bool sw_iv;
94
95- struct list_head tx_list;
96- struct sk_buff_head tx_pending;
97-
98 struct list_head list;
99 struct idr pktid;
100
101@@ -808,8 +805,6 @@ struct mt76_phy {
102 unsigned long state;
103 u8 band_idx;
104
105- spinlock_t tx_lock;
106- struct list_head tx_list;
107 struct mt76_queue *q_tx[__MT_TXQ_MAX];
108
109 struct cfg80211_chan_def chandef;
110diff --git a/tx.c b/tx.c
111index 23a1e4e2..9dfc2890 100644
112--- a/tx.c
113+++ b/tx.c
114@@ -322,32 +322,40 @@ void
115 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
116 struct mt76_wcid *wcid, struct sk_buff *skb)
117 {
118+ struct mt76_dev *dev = phy->dev;
119 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
120+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
121+ struct mt76_queue *q;
122+ int qid = skb_get_queue_mapping(skb);
123
124 if (mt76_testmode_enabled(phy)) {
125 ieee80211_free_txskb(phy->hw, skb);
126 return;
127 }
128
129- if (WARN_ON(skb_get_queue_mapping(skb) >= MT_TXQ_PSD))
130- skb_set_queue_mapping(skb, MT_TXQ_BE);
131+ if (WARN_ON(qid >= MT_TXQ_PSD)) {
132+ qid = MT_TXQ_BE;
133+ skb_set_queue_mapping(skb, qid);
134+ }
135+
136+ if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
137+ !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
138+ !ieee80211_is_data(hdr->frame_control) &&
139+ !ieee80211_is_bufferable_mmpdu(skb)) {
140+ qid = MT_TXQ_PSD;
141+ }
142
143 if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
144 ieee80211_get_tx_rates(info->control.vif, sta, skb,
145 info->control.rates, 1);
146
147 info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
148+ q = phy->q_tx[qid];
149
150- spin_lock_bh(&wcid->tx_pending.lock);
151- __skb_queue_tail(&wcid->tx_pending, skb);
152- spin_unlock_bh(&wcid->tx_pending.lock);
153-
154- spin_lock_bh(&phy->tx_lock);
155- if (list_empty(&wcid->tx_list))
156- list_add_tail(&wcid->tx_list, &phy->tx_list);
157- spin_unlock_bh(&phy->tx_lock);
158-
159- mt76_worker_schedule(&phy->dev->tx_worker);
160+ spin_lock_bh(&q->lock);
161+ __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
162+ dev->queue_ops->kick(dev, q);
163+ spin_unlock_bh(&q->lock);
164 }
165 EXPORT_SYMBOL_GPL(mt76_tx);
166
167@@ -578,86 +586,10 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
168 }
169 EXPORT_SYMBOL_GPL(mt76_txq_schedule);
170
171-static int
172-mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
173-{
174- struct mt76_dev *dev = phy->dev;
175- struct ieee80211_sta *sta;
176- struct mt76_queue *q;
177- struct sk_buff *skb;
178- int ret = 0;
179-
180- spin_lock(&wcid->tx_pending.lock);
181- while ((skb = skb_peek(&wcid->tx_pending)) != NULL) {
182- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
183- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
184- int qid = skb_get_queue_mapping(skb);
185-
186- if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
187- !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
188- !ieee80211_is_data(hdr->frame_control) &&
189- !ieee80211_is_bufferable_mmpdu(skb))
190- qid = MT_TXQ_PSD;
191-
192- q = phy->q_tx[qid];
193- if (mt76_txq_stopped(q)) {
194- ret = -1;
195- break;
196- }
197-
198- __skb_unlink(skb, &wcid->tx_pending);
199- spin_unlock(&wcid->tx_pending.lock);
200-
201- sta = wcid_to_sta(wcid);
202- spin_lock(&q->lock);
203- __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
204- dev->queue_ops->kick(dev, q);
205- spin_unlock(&q->lock);
206-
207- spin_lock(&wcid->tx_pending.lock);
208- }
209- spin_unlock(&wcid->tx_pending.lock);
210-
211- return ret;
212-}
213-
214-static void mt76_txq_schedule_pending(struct mt76_phy *phy)
215-{
216- if (list_empty(&phy->tx_list))
217- return;
218-
219- local_bh_disable();
220- rcu_read_lock();
221-
222- spin_lock(&phy->tx_lock);
223- while (!list_empty(&phy->tx_list)) {
224- struct mt76_wcid *wcid = NULL;
225- int ret;
226-
227- wcid = list_first_entry(&phy->tx_list, struct mt76_wcid, tx_list);
228- list_del_init(&wcid->tx_list);
229-
230- spin_unlock(&phy->tx_lock);
231- ret = mt76_txq_schedule_pending_wcid(phy, wcid);
232- spin_lock(&phy->tx_lock);
233-
234- if (ret) {
235- if (list_empty(&wcid->tx_list))
236- list_add_tail(&wcid->tx_list, &phy->tx_list);
237- break;
238- }
239- }
240- spin_unlock(&phy->tx_lock);
241-
242- rcu_read_unlock();
243- local_bh_enable();
244-}
245-
246 void mt76_txq_schedule_all(struct mt76_phy *phy)
247 {
248 int i;
249
250- mt76_txq_schedule_pending(phy);
251 for (i = 0; i <= MT_TXQ_BK; i++)
252 mt76_txq_schedule(phy, i);
253 }
254--
2552.18.0
256