blob: 10eeb5ed09f972f4e52dd3c2ca2f3bf09dfef3d1 [file] [log] [blame]
developerd243af02023-12-21 14:49:33 +08001From 32681b271d223bd7646372cae382c11d8784797b Mon Sep 17 00:00:00 2001
developerb0c86782023-10-27 15:40:47 +08002From: Peter Chiu <chui-hao.chiu@mediatek.com>
3Date: Mon, 23 Oct 2023 10:26:01 +0800
developerd243af02023-12-21 14:49:33 +08004Subject: [PATCH 74/76] Revert "wifi: mt76: fix race condition related to
developerb0c86782023-10-27 15:40:47 +08005 checking tx queue fill status"
6
7This reverts commit f1e1e67d97d1e9a8bb01b59ab20c45ebc985a958.
8---
9 mac80211.c | 27 --------------
10 mt76.h | 5 ---
11 tx.c | 108 ++++++++++-------------------------------------------
12 3 files changed, 20 insertions(+), 120 deletions(-)
13
14diff --git a/mac80211.c b/mac80211.c
developerd243af02023-12-21 14:49:33 +080015index 09c9eb2..5e01353 100644
developerb0c86782023-10-27 15:40:47 +080016--- a/mac80211.c
17+++ b/mac80211.c
developerd243af02023-12-21 14:49:33 +080018@@ -438,9 +438,6 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
developerb0c86782023-10-27 15:40:47 +080019 struct mt76_dev *dev = phy->dev;
20 struct wiphy *wiphy = hw->wiphy;
21
22- INIT_LIST_HEAD(&phy->tx_list);
23- spin_lock_init(&phy->tx_lock);
24-
25 SET_IEEE80211_DEV(hw, dev->dev);
26 SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
27
developerd243af02023-12-21 14:49:33 +080028@@ -673,7 +670,6 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
developerb0c86782023-10-27 15:40:47 +080029 int ret;
30
31 dev_set_drvdata(dev->dev, dev);
32- mt76_wcid_init(&dev->global_wcid);
33 ret = mt76_phy_init(phy, hw);
34 if (ret)
35 return ret;
developerd243af02023-12-21 14:49:33 +080036@@ -729,7 +725,6 @@ void mt76_unregister_device(struct mt76_dev *dev)
developerb0c86782023-10-27 15:40:47 +080037 if (IS_ENABLED(CONFIG_MT76_LEDS))
38 mt76_led_cleanup(&dev->phy);
39 mt76_tx_status_check(dev, true);
40- mt76_wcid_cleanup(dev, &dev->global_wcid);
41 ieee80211_unregister_hw(hw);
42 }
43 EXPORT_SYMBOL_GPL(mt76_unregister_device);
developerd243af02023-12-21 14:49:33 +080044@@ -1477,9 +1472,6 @@ EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
developerb0c86782023-10-27 15:40:47 +080045
46 void mt76_wcid_init(struct mt76_wcid *wcid)
47 {
48- INIT_LIST_HEAD(&wcid->tx_list);
49- skb_queue_head_init(&wcid->tx_pending);
50-
51 INIT_LIST_HEAD(&wcid->list);
52 idr_init(&wcid->pktid);
53 }
developerd243af02023-12-21 14:49:33 +080054@@ -1487,32 +1479,13 @@ EXPORT_SYMBOL_GPL(mt76_wcid_init);
developerb0c86782023-10-27 15:40:47 +080055
56 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
57 {
58- struct mt76_phy *phy = dev->phys[wcid->phy_idx];
59- struct ieee80211_hw *hw;
60 struct sk_buff_head list;
61- struct sk_buff *skb;
62
63 mt76_tx_status_lock(dev, &list);
64 mt76_tx_status_skb_get(dev, wcid, -1, &list);
65 mt76_tx_status_unlock(dev, &list);
66
67 idr_destroy(&wcid->pktid);
68-
69- spin_lock_bh(&phy->tx_lock);
70-
71- if (!list_empty(&wcid->tx_list))
72- list_del_init(&wcid->tx_list);
73-
74- spin_lock(&wcid->tx_pending.lock);
75- skb_queue_splice_tail_init(&wcid->tx_pending, &list);
76- spin_unlock(&wcid->tx_pending.lock);
77-
78- spin_unlock_bh(&phy->tx_lock);
79-
80- while ((skb = __skb_dequeue(&list)) != NULL) {
81- hw = mt76_tx_status_get_hw(dev, skb);
82- ieee80211_free_txskb(hw, skb);
83- }
84 }
85 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
86
87diff --git a/mt76.h b/mt76.h
developerd243af02023-12-21 14:49:33 +080088index 699d84e..70801f7 100644
developerb0c86782023-10-27 15:40:47 +080089--- a/mt76.h
90+++ b/mt76.h
developerd243af02023-12-21 14:49:33 +080091@@ -380,9 +380,6 @@ struct mt76_wcid {
developerb0c86782023-10-27 15:40:47 +080092 u32 tx_info;
93 bool sw_iv;
94
95- struct list_head tx_list;
96- struct sk_buff_head tx_pending;
97-
98 struct list_head list;
99 struct idr pktid;
100
developerd243af02023-12-21 14:49:33 +0800101@@ -846,8 +843,6 @@ struct mt76_phy {
developerb0c86782023-10-27 15:40:47 +0800102 unsigned long state;
103 u8 band_idx;
104
105- spinlock_t tx_lock;
106- struct list_head tx_list;
107 struct mt76_queue *q_tx[__MT_TXQ_MAX];
108
109 struct cfg80211_chan_def chandef;
110diff --git a/tx.c b/tx.c
developerbf0f2d62023-11-14 17:01:47 +0800111index 96f9009..c88fb29 100644
developerb0c86782023-10-27 15:40:47 +0800112--- a/tx.c
113+++ b/tx.c
developerbf0f2d62023-11-14 17:01:47 +0800114@@ -328,32 +328,40 @@ void
developerb0c86782023-10-27 15:40:47 +0800115 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
116 struct mt76_wcid *wcid, struct sk_buff *skb)
117 {
118+ struct mt76_dev *dev = phy->dev;
119 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
120+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
121+ struct mt76_queue *q;
122+ int qid = skb_get_queue_mapping(skb);
123
124 if (mt76_testmode_enabled(phy)) {
125 ieee80211_free_txskb(phy->hw, skb);
126 return;
127 }
128
129- if (WARN_ON(skb_get_queue_mapping(skb) >= MT_TXQ_PSD))
130- skb_set_queue_mapping(skb, MT_TXQ_BE);
131+ if (WARN_ON(qid >= MT_TXQ_PSD)) {
132+ qid = MT_TXQ_BE;
133+ skb_set_queue_mapping(skb, qid);
134+ }
135+
136+ if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
137+ !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
138+ !ieee80211_is_data(hdr->frame_control) &&
139+ !ieee80211_is_bufferable_mmpdu(skb)) {
140+ qid = MT_TXQ_PSD;
141+ }
142
143 if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
144 ieee80211_get_tx_rates(info->control.vif, sta, skb,
145 info->control.rates, 1);
146
147 info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
148+ q = phy->q_tx[qid];
149
150- spin_lock_bh(&wcid->tx_pending.lock);
151- __skb_queue_tail(&wcid->tx_pending, skb);
152- spin_unlock_bh(&wcid->tx_pending.lock);
153-
154- spin_lock_bh(&phy->tx_lock);
155- if (list_empty(&wcid->tx_list))
156- list_add_tail(&wcid->tx_list, &phy->tx_list);
157- spin_unlock_bh(&phy->tx_lock);
158-
159- mt76_worker_schedule(&phy->dev->tx_worker);
160+ spin_lock_bh(&q->lock);
161+ __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
162+ dev->queue_ops->kick(dev, q);
163+ spin_unlock_bh(&q->lock);
164 }
165 EXPORT_SYMBOL_GPL(mt76_tx);
166
developerbf0f2d62023-11-14 17:01:47 +0800167@@ -584,86 +592,10 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
developerb0c86782023-10-27 15:40:47 +0800168 }
169 EXPORT_SYMBOL_GPL(mt76_txq_schedule);
170
171-static int
172-mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
173-{
174- struct mt76_dev *dev = phy->dev;
175- struct ieee80211_sta *sta;
176- struct mt76_queue *q;
177- struct sk_buff *skb;
178- int ret = 0;
179-
180- spin_lock(&wcid->tx_pending.lock);
181- while ((skb = skb_peek(&wcid->tx_pending)) != NULL) {
182- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
183- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
184- int qid = skb_get_queue_mapping(skb);
185-
186- if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
187- !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
188- !ieee80211_is_data(hdr->frame_control) &&
189- !ieee80211_is_bufferable_mmpdu(skb))
190- qid = MT_TXQ_PSD;
191-
192- q = phy->q_tx[qid];
193- if (mt76_txq_stopped(q)) {
194- ret = -1;
195- break;
196- }
197-
198- __skb_unlink(skb, &wcid->tx_pending);
199- spin_unlock(&wcid->tx_pending.lock);
200-
201- sta = wcid_to_sta(wcid);
202- spin_lock(&q->lock);
203- __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
204- dev->queue_ops->kick(dev, q);
205- spin_unlock(&q->lock);
206-
207- spin_lock(&wcid->tx_pending.lock);
208- }
209- spin_unlock(&wcid->tx_pending.lock);
210-
211- return ret;
212-}
213-
214-static void mt76_txq_schedule_pending(struct mt76_phy *phy)
215-{
216- if (list_empty(&phy->tx_list))
217- return;
218-
219- local_bh_disable();
220- rcu_read_lock();
221-
222- spin_lock(&phy->tx_lock);
223- while (!list_empty(&phy->tx_list)) {
224- struct mt76_wcid *wcid = NULL;
225- int ret;
226-
227- wcid = list_first_entry(&phy->tx_list, struct mt76_wcid, tx_list);
228- list_del_init(&wcid->tx_list);
229-
230- spin_unlock(&phy->tx_lock);
231- ret = mt76_txq_schedule_pending_wcid(phy, wcid);
232- spin_lock(&phy->tx_lock);
233-
234- if (ret) {
235- if (list_empty(&wcid->tx_list))
236- list_add_tail(&wcid->tx_list, &phy->tx_list);
237- break;
238- }
239- }
240- spin_unlock(&phy->tx_lock);
241-
242- rcu_read_unlock();
243- local_bh_enable();
244-}
245-
246 void mt76_txq_schedule_all(struct mt76_phy *phy)
247 {
248 int i;
249
250- mt76_txq_schedule_pending(phy);
251 for (i = 0; i <= MT_TXQ_BK; i++)
252 mt76_txq_schedule(phy, i);
253 }
254--
2552.18.0
256