blob: be43cae15e97a3c223095019eb7a12b2ceb2f164 [file] [log] [blame]
developer0f312e82022-11-01 12:31:52 +08001// SPDX-License-Identifier: ISC
2/*
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 */
5
6#include "mt76.h"
7
8static int
9mt76_txq_get_qid(struct ieee80211_txq *txq)
10{
11 if (!txq->sta)
12 return MT_TXQ_BE;
13
14 return txq->ac;
15}
16
17void
18mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb)
19{
20 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
21 struct ieee80211_txq *txq;
22 struct mt76_txq *mtxq;
23 u8 tid;
24
25 if (!sta || !ieee80211_is_data_qos(hdr->frame_control) ||
26 !ieee80211_is_data_present(hdr->frame_control))
27 return;
28
29 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
30 txq = sta->txq[tid];
31 mtxq = (struct mt76_txq *)txq->drv_priv;
32 if (!mtxq->aggr)
33 return;
34
35 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
36}
37EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn);
38
39void
40mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
41 __acquires(&dev->status_lock)
42{
43 __skb_queue_head_init(list);
44 spin_lock_bh(&dev->status_lock);
45}
46EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
47
48void
49mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
50 __releases(&dev->status_lock)
51{
52 struct ieee80211_hw *hw;
53 struct sk_buff *skb;
54
55 spin_unlock_bh(&dev->status_lock);
56
57 rcu_read_lock();
58 while ((skb = __skb_dequeue(list)) != NULL) {
59 struct ieee80211_tx_status status = {
60 .skb = skb,
61 .info = IEEE80211_SKB_CB(skb),
62 };
63 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
64 struct mt76_wcid *wcid;
65
66 wcid = rcu_dereference(dev->wcid[cb->wcid]);
67 if (wcid) {
68 status.sta = wcid_to_sta(wcid);
69
70 if (status.sta)
71 status.rate = &wcid->rate;
72 }
73
74 hw = mt76_tx_status_get_hw(dev, skb);
75 ieee80211_tx_status_ext(hw, &status);
76 }
77 rcu_read_unlock();
78}
79EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
80
81static void
82__mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
83 struct sk_buff_head *list)
84{
85 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
86 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
87 u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
88
89 flags |= cb->flags;
90 cb->flags = flags;
91
92 if ((flags & done) != done)
93 return;
94
95 /* Tx status can be unreliable. if it fails, mark the frame as ACKed */
96 if (flags & MT_TX_CB_TXS_FAILED) {
97 info->status.rates[0].count = 0;
98 info->status.rates[0].idx = -1;
99 info->flags |= IEEE80211_TX_STAT_ACK;
100 }
101
102 __skb_queue_tail(list, skb);
103}
104
105void
106mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
107 struct sk_buff_head *list)
108{
109 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
110}
111EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
112
113int
114mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
115 struct sk_buff *skb)
116{
117 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
118 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
119 int pid;
120
121 memset(cb, 0, sizeof(*cb));
122
123 if (!wcid || !rcu_access_pointer(dev->wcid[wcid->idx]))
124 return MT_PACKET_ID_NO_ACK;
125
126 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
127 return MT_PACKET_ID_NO_ACK;
128
129 if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
130 IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
131 return MT_PACKET_ID_NO_SKB;
132
133 spin_lock_bh(&dev->status_lock);
134
135 pid = idr_alloc(&wcid->pktid, skb, MT_PACKET_ID_FIRST,
136 MT_PACKET_ID_MASK, GFP_ATOMIC);
137 if (pid < 0) {
138 pid = MT_PACKET_ID_NO_SKB;
139 goto out;
140 }
141
142 cb->wcid = wcid->idx;
143 cb->pktid = pid;
144
145 if (list_empty(&wcid->list))
146 list_add_tail(&wcid->list, &dev->wcid_list);
147
148out:
149 spin_unlock_bh(&dev->status_lock);
150
151 return pid;
152}
153EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
154
155struct sk_buff *
156mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
157 struct sk_buff_head *list)
158{
159 struct sk_buff *skb;
160 int id;
161
162 lockdep_assert_held(&dev->status_lock);
163
164 skb = idr_remove(&wcid->pktid, pktid);
165 if (skb)
166 goto out;
167
168 /* look for stale entries in the wcid idr queue */
169 idr_for_each_entry(&wcid->pktid, skb, id) {
170 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
171
172 if (pktid >= 0) {
173 if (!(cb->flags & MT_TX_CB_DMA_DONE))
174 continue;
175
176 if (time_is_after_jiffies(cb->jiffies +
177 MT_TX_STATUS_SKB_TIMEOUT))
178 continue;
179 }
180
181 /* It has been too long since DMA_DONE, time out this packet
182 * and stop waiting for TXS callback.
183 */
184 idr_remove(&wcid->pktid, cb->pktid);
185 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
186 MT_TX_CB_TXS_DONE, list);
187 }
188
189out:
190 if (idr_is_empty(&wcid->pktid))
191 list_del_init(&wcid->list);
192
193 return skb;
194}
195EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
196
197void
198mt76_tx_status_check(struct mt76_dev *dev, bool flush)
199{
200 struct mt76_wcid *wcid, *tmp;
201 struct sk_buff_head list;
202
203 mt76_tx_status_lock(dev, &list);
204 list_for_each_entry_safe(wcid, tmp, &dev->wcid_list, list)
205 mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
206 mt76_tx_status_unlock(dev, &list);
207}
208EXPORT_SYMBOL_GPL(mt76_tx_status_check);
209
210static void
211mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid,
212 struct sk_buff *skb)
213{
214 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
215 int pending;
216
217 if (!wcid || info->tx_time_est)
218 return;
219
220 pending = atomic_dec_return(&wcid->non_aql_packets);
221 if (pending < 0)
222 atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
223}
224
225void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb,
226 struct list_head *free_list)
227{
228 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
229 struct ieee80211_tx_status status = {
230 .skb = skb,
231 .free_list = free_list,
232 };
233 struct mt76_wcid *wcid = NULL;
234 struct ieee80211_hw *hw;
235 struct sk_buff_head list;
236
237 rcu_read_lock();
238
239 if (wcid_idx < ARRAY_SIZE(dev->wcid))
240 wcid = rcu_dereference(dev->wcid[wcid_idx]);
241
242 mt76_tx_check_non_aql(dev, wcid, skb);
243
244#ifdef CONFIG_NL80211_TESTMODE
245 if (mt76_is_testmode_skb(dev, skb, &hw)) {
246 struct mt76_phy *phy = hw->priv;
247
248 if (skb == phy->test.tx_skb)
249 phy->test.tx_done++;
250 if (phy->test.tx_queued == phy->test.tx_done)
251 wake_up(&dev->tx_wait);
252
253 dev_kfree_skb_any(skb);
254 goto out;
255 }
256#endif
257
258 if (cb->pktid < MT_PACKET_ID_FIRST) {
259 hw = mt76_tx_status_get_hw(dev, skb);
260 status.sta = wcid_to_sta(wcid);
261 ieee80211_tx_status_ext(hw, &status);
262 goto out;
263 }
264
265 mt76_tx_status_lock(dev, &list);
266 cb->jiffies = jiffies;
267 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
268 mt76_tx_status_unlock(dev, &list);
269
270out:
271 rcu_read_unlock();
272}
273EXPORT_SYMBOL_GPL(__mt76_tx_complete_skb);
274
275static int
276__mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
277 struct mt76_wcid *wcid, struct ieee80211_sta *sta,
278 bool *stop)
279{
280 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
281 struct mt76_queue *q = phy->q_tx[qid];
282 struct mt76_dev *dev = phy->dev;
283 bool non_aql;
284 int pending;
285 int idx;
286
287 non_aql = !info->tx_time_est;
288 idx = dev->queue_ops->tx_queue_skb(dev, q, qid, skb, wcid, sta);
289 if (idx < 0 || !sta)
290 return idx;
291
292 wcid = (struct mt76_wcid *)sta->drv_priv;
293 q->entry[idx].wcid = wcid->idx;
294
295 if (!non_aql)
296 return idx;
297
298 pending = atomic_inc_return(&wcid->non_aql_packets);
299 if (stop && pending >= MT_MAX_NON_AQL_PKT)
300 *stop = true;
301
302 return idx;
303}
304
305void
306mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
307 struct mt76_wcid *wcid, struct sk_buff *skb)
308{
309 struct mt76_dev *dev = phy->dev;
310 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
311 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
312 struct mt76_queue *q;
313 int qid = skb_get_queue_mapping(skb);
314
315 if (mt76_testmode_enabled(phy)) {
316 ieee80211_free_txskb(phy->hw, skb);
317 return;
318 }
319
320 if (WARN_ON(qid >= MT_TXQ_PSD)) {
321 qid = MT_TXQ_BE;
322 skb_set_queue_mapping(skb, qid);
323 }
324
325 if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
326 !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
327 !ieee80211_is_data(hdr->frame_control) &&
328 !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
329 qid = MT_TXQ_PSD;
330 }
331
332 if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
333 ieee80211_get_tx_rates(info->control.vif, sta, skb,
334 info->control.rates, 1);
335
336 info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
337 q = phy->q_tx[qid];
338
339 spin_lock_bh(&q->lock);
340 __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
341 dev->queue_ops->kick(dev, q);
342 spin_unlock_bh(&q->lock);
343}
344EXPORT_SYMBOL_GPL(mt76_tx);
345
346static struct sk_buff *
347mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq)
348{
349 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
350 struct ieee80211_tx_info *info;
351 struct sk_buff *skb;
352
353 skb = ieee80211_tx_dequeue(phy->hw, txq);
354 if (!skb)
355 return NULL;
356
357 info = IEEE80211_SKB_CB(skb);
358 info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
359
360 return skb;
361}
362
363static void
364mt76_queue_ps_skb(struct mt76_phy *phy, struct ieee80211_sta *sta,
365 struct sk_buff *skb, bool last)
366{
367 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
368 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
369
370 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
371 if (last)
372 info->flags |= IEEE80211_TX_STATUS_EOSP |
373 IEEE80211_TX_CTL_REQ_TX_STATUS;
374
375 mt76_skb_set_moredata(skb, !last);
376 __mt76_tx_queue_skb(phy, MT_TXQ_PSD, skb, wcid, sta, NULL);
377}
378
379void
380mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
381 u16 tids, int nframes,
382 enum ieee80211_frame_release_type reason,
383 bool more_data)
384{
385 struct mt76_phy *phy = hw->priv;
386 struct mt76_dev *dev = phy->dev;
387 struct sk_buff *last_skb = NULL;
388 struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD];
389 int i;
390
391 spin_lock_bh(&hwq->lock);
392 for (i = 0; tids && nframes; i++, tids >>= 1) {
393 struct ieee80211_txq *txq = sta->txq[i];
394 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
395 struct sk_buff *skb;
396
397 if (!(tids & 1))
398 continue;
399
400 do {
401 skb = mt76_txq_dequeue(phy, mtxq);
402 if (!skb)
403 break;
404
405 nframes--;
406 if (last_skb)
407 mt76_queue_ps_skb(phy, sta, last_skb, false);
408
409 last_skb = skb;
410 } while (nframes);
411 }
412
413 if (last_skb) {
414 mt76_queue_ps_skb(phy, sta, last_skb, true);
415 dev->queue_ops->kick(dev, hwq);
416 } else {
417 ieee80211_sta_eosp(sta);
418 }
419
420 spin_unlock_bh(&hwq->lock);
421}
422EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
423
424static bool
425mt76_txq_stopped(struct mt76_queue *q)
426{
427 return q->stopped || q->blocked ||
428 q->queued + MT_TXQ_FREE_THR >= q->ndesc;
429}
430
431static int
432mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
433 struct mt76_txq *mtxq, struct mt76_wcid *wcid)
434{
435 struct mt76_dev *dev = phy->dev;
436 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
437 enum mt76_txq_id qid = mt76_txq_get_qid(txq);
438 struct ieee80211_tx_info *info;
439 struct sk_buff *skb;
440 int n_frames = 1;
441 bool stop = false;
442 int idx;
443
444 if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
445 return 0;
446
447 if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT)
448 return 0;
449
450 skb = mt76_txq_dequeue(phy, mtxq);
451 if (!skb)
452 return 0;
453
454 info = IEEE80211_SKB_CB(skb);
455 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
456 ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
457 info->control.rates, 1);
458
459 spin_lock(&q->lock);
460 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
461 spin_unlock(&q->lock);
462 if (idx < 0)
463 return idx;
464
465 do {
466 if (test_bit(MT76_RESET, &phy->state))
467 return -EBUSY;
468
469 if (stop || mt76_txq_stopped(q))
470 break;
471
472 skb = mt76_txq_dequeue(phy, mtxq);
473 if (!skb)
474 break;
475
476 info = IEEE80211_SKB_CB(skb);
477 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
478 ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
479 info->control.rates, 1);
480
481 spin_lock(&q->lock);
482 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
483 spin_unlock(&q->lock);
484 if (idx < 0)
485 break;
486
487 n_frames++;
488 } while (1);
489
490 spin_lock(&q->lock);
491 dev->queue_ops->kick(dev, q);
492 spin_unlock(&q->lock);
493
494 return n_frames;
495}
496
497static int
498mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
499{
500 struct mt76_queue *q = phy->q_tx[qid];
501 struct mt76_dev *dev = phy->dev;
502 struct ieee80211_txq *txq;
503 struct mt76_txq *mtxq;
504 struct mt76_wcid *wcid;
505 int ret = 0;
506
507 while (1) {
508 int n_frames = 0;
509
510 if (test_bit(MT76_RESET, &phy->state))
511 return -EBUSY;
512
513 if (dev->queue_ops->tx_cleanup &&
514 q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
515 dev->queue_ops->tx_cleanup(dev, q, false);
516 }
517
518 txq = ieee80211_next_txq(phy->hw, qid);
519 if (!txq)
520 break;
521
522 mtxq = (struct mt76_txq *)txq->drv_priv;
523 wcid = rcu_dereference(dev->wcid[mtxq->wcid]);
524 if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags))
525 continue;
526
527 if (mtxq->send_bar && mtxq->aggr) {
528 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
529 struct ieee80211_sta *sta = txq->sta;
530 struct ieee80211_vif *vif = txq->vif;
531 u16 agg_ssn = mtxq->agg_ssn;
532 u8 tid = txq->tid;
533
534 mtxq->send_bar = false;
535 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
536 }
537
538 if (!mt76_txq_stopped(q))
539 n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid);
540
541 ieee80211_return_txq(phy->hw, txq, false);
542
543 if (unlikely(n_frames < 0))
544 return n_frames;
545
546 ret += n_frames;
547 }
548
549 return ret;
550}
551
552void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
553{
554 int len;
555
556 if (qid >= 4)
557 return;
558
559 local_bh_disable();
560 rcu_read_lock();
561
562 do {
563 ieee80211_txq_schedule_start(phy->hw, qid);
564 len = mt76_txq_schedule_list(phy, qid);
565 ieee80211_txq_schedule_end(phy->hw, qid);
566 } while (len > 0);
567
568 rcu_read_unlock();
569 local_bh_enable();
570}
571EXPORT_SYMBOL_GPL(mt76_txq_schedule);
572
573void mt76_txq_schedule_all(struct mt76_phy *phy)
574{
575 int i;
576
577 for (i = 0; i <= MT_TXQ_BK; i++)
578 mt76_txq_schedule(phy, i);
579}
580EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
581
582void mt76_tx_worker_run(struct mt76_dev *dev)
583{
584 struct mt76_phy *phy;
585 int i;
586
587 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
588 phy = dev->phys[i];
589 if (!phy)
590 continue;
591
592 mt76_txq_schedule_all(phy);
593 }
594
595#ifdef CONFIG_NL80211_TESTMODE
596 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
597 phy = dev->phys[i];
598 if (!phy || !phy->test.tx_pending)
599 continue;
600
601 mt76_testmode_tx_pending(phy);
602 }
603#endif
604}
605EXPORT_SYMBOL_GPL(mt76_tx_worker_run);
606
607void mt76_tx_worker(struct mt76_worker *w)
608{
609 struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
610
611 mt76_tx_worker_run(dev);
612}
613
614void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
615 bool send_bar)
616{
617 int i;
618
619 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
620 struct ieee80211_txq *txq = sta->txq[i];
621 struct mt76_queue *hwq;
622 struct mt76_txq *mtxq;
623
624 if (!txq)
625 continue;
626
627 hwq = phy->q_tx[mt76_txq_get_qid(txq)];
628 mtxq = (struct mt76_txq *)txq->drv_priv;
629
630 spin_lock_bh(&hwq->lock);
631 mtxq->send_bar = mtxq->aggr && send_bar;
632 spin_unlock_bh(&hwq->lock);
633 }
634}
635EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
636
637void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
638{
639 struct mt76_phy *phy = hw->priv;
640 struct mt76_dev *dev = phy->dev;
641
642 if (!test_bit(MT76_STATE_RUNNING, &phy->state))
643 return;
644
645 mt76_worker_schedule(&dev->tx_worker);
646}
647EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
648
649u8 mt76_ac_to_hwq(u8 ac)
650{
651 static const u8 wmm_queue_map[] = {
652 [IEEE80211_AC_BE] = 0,
653 [IEEE80211_AC_BK] = 1,
654 [IEEE80211_AC_VI] = 2,
655 [IEEE80211_AC_VO] = 3,
656 };
657
658 if (WARN_ON(ac >= IEEE80211_NUM_ACS))
659 return 0;
660
661 return wmm_queue_map[ac];
662}
663EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);
664
665int mt76_skb_adjust_pad(struct sk_buff *skb, int pad)
666{
667 struct sk_buff *iter, *last = skb;
668
669 /* First packet of a A-MSDU burst keeps track of the whole burst
670 * length, need to update length of it and the last packet.
671 */
672 skb_walk_frags(skb, iter) {
673 last = iter;
674 if (!iter->next) {
675 skb->data_len += pad;
676 skb->len += pad;
677 break;
678 }
679 }
680
681 if (skb_pad(last, pad))
682 return -ENOMEM;
683
684 __skb_put(last, pad);
685
686 return 0;
687}
688EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);
689
690void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
691 struct mt76_queue_entry *e)
692{
693 if (e->skb)
694 dev->drv->tx_complete_skb(dev, e);
695
696 spin_lock_bh(&q->lock);
697 q->tail = (q->tail + 1) % q->ndesc;
698 q->queued--;
699 spin_unlock_bh(&q->lock);
700}
701EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
702
703void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
704{
705 struct mt76_phy *phy = &dev->phy;
706 struct mt76_queue *q = phy->q_tx[0];
707
708 if (blocked == q->blocked)
709 return;
710
711 q->blocked = blocked;
712
713 phy = dev->phys[MT_BAND1];
714 if (phy) {
715 q = phy->q_tx[0];
716 q->blocked = blocked;
717 }
718 phy = dev->phys[MT_BAND2];
719 if (phy) {
720 q = phy->q_tx[0];
721 q->blocked = blocked;
722 }
723
724 if (!blocked)
725 mt76_worker_schedule(&dev->tx_worker);
726}
727EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked);
728
729int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
730{
731 int token;
732
733 spin_lock_bh(&dev->token_lock);
734
735 token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC);
736 if (token >= 0)
737 dev->token_count++;
738
739 if (dev->token_count >= dev->token_size - MT76_TOKEN_FREE_THR)
740 __mt76_set_tx_blocked(dev, true);
741
742 spin_unlock_bh(&dev->token_lock);
743
744 return token;
745}
746EXPORT_SYMBOL_GPL(mt76_token_consume);
747
748struct mt76_txwi_cache *
749mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
750{
751 struct mt76_txwi_cache *txwi;
752
753 spin_lock_bh(&dev->token_lock);
754
755 txwi = idr_remove(&dev->token, token);
756 if (txwi)
757 dev->token_count--;
758
759 if (dev->token_count < dev->token_size - MT76_TOKEN_FREE_THR &&
760 dev->phy.q_tx[0]->blocked)
761 *wake = true;
762
763 spin_unlock_bh(&dev->token_lock);
764
765 return txwi;
766}
767EXPORT_SYMBOL_GPL(mt76_token_release);