developer | b11a539 | 2022-03-31 00:34:47 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: ISC |
| 2 | /* |
| 3 | * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> |
| 4 | */ |
| 5 | |
| 6 | #include "mt76.h" |
| 7 | |
| 8 | static int |
| 9 | mt76_txq_get_qid(struct ieee80211_txq *txq) |
| 10 | { |
| 11 | if (!txq->sta) |
| 12 | return MT_TXQ_BE; |
| 13 | |
| 14 | return txq->ac; |
| 15 | } |
| 16 | |
| 17 | void |
| 18 | mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb) |
| 19 | { |
| 20 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
| 21 | struct ieee80211_txq *txq; |
| 22 | struct mt76_txq *mtxq; |
| 23 | u8 tid; |
| 24 | |
| 25 | if (!sta || !ieee80211_is_data_qos(hdr->frame_control) || |
| 26 | !ieee80211_is_data_present(hdr->frame_control)) |
| 27 | return; |
| 28 | |
| 29 | tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; |
| 30 | txq = sta->txq[tid]; |
| 31 | mtxq = (struct mt76_txq *)txq->drv_priv; |
| 32 | if (!mtxq->aggr) |
| 33 | return; |
| 34 | |
| 35 | mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; |
| 36 | } |
| 37 | EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn); |
| 38 | |
| 39 | void |
| 40 | mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) |
| 41 | __acquires(&dev->status_lock) |
| 42 | { |
| 43 | __skb_queue_head_init(list); |
| 44 | spin_lock_bh(&dev->status_lock); |
| 45 | } |
| 46 | EXPORT_SYMBOL_GPL(mt76_tx_status_lock); |
| 47 | |
| 48 | void |
| 49 | mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) |
| 50 | __releases(&dev->status_lock) |
| 51 | { |
| 52 | struct ieee80211_hw *hw; |
| 53 | struct sk_buff *skb; |
| 54 | |
| 55 | spin_unlock_bh(&dev->status_lock); |
| 56 | |
| 57 | rcu_read_lock(); |
| 58 | while ((skb = __skb_dequeue(list)) != NULL) { |
| 59 | struct ieee80211_tx_status status = { |
| 60 | .skb = skb, |
| 61 | .info = IEEE80211_SKB_CB(skb), |
| 62 | }; |
| 63 | struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); |
| 64 | struct mt76_wcid *wcid; |
| 65 | |
| 66 | wcid = rcu_dereference(dev->wcid[cb->wcid]); |
| 67 | if (wcid) { |
| 68 | status.sta = wcid_to_sta(wcid); |
| 69 | |
| 70 | if (status.sta) |
| 71 | status.rate = &wcid->rate; |
| 72 | } |
| 73 | |
| 74 | hw = mt76_tx_status_get_hw(dev, skb); |
| 75 | ieee80211_tx_status_ext(hw, &status); |
| 76 | } |
| 77 | rcu_read_unlock(); |
| 78 | } |
| 79 | EXPORT_SYMBOL_GPL(mt76_tx_status_unlock); |
| 80 | |
| 81 | static void |
| 82 | __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags, |
| 83 | struct sk_buff_head *list) |
| 84 | { |
| 85 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 86 | struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); |
| 87 | u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE; |
| 88 | |
| 89 | flags |= cb->flags; |
| 90 | cb->flags = flags; |
| 91 | |
| 92 | if ((flags & done) != done) |
| 93 | return; |
| 94 | |
| 95 | /* Tx status can be unreliable. if it fails, mark the frame as ACKed */ |
| 96 | if (flags & MT_TX_CB_TXS_FAILED) { |
| 97 | info->status.rates[0].count = 0; |
| 98 | info->status.rates[0].idx = -1; |
| 99 | info->flags |= IEEE80211_TX_STAT_ACK; |
| 100 | } |
| 101 | |
| 102 | __skb_queue_tail(list, skb); |
| 103 | } |
| 104 | |
| 105 | void |
| 106 | mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, |
| 107 | struct sk_buff_head *list) |
| 108 | { |
| 109 | __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list); |
| 110 | } |
| 111 | EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done); |
| 112 | |
| 113 | int |
| 114 | mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, |
| 115 | struct sk_buff *skb) |
| 116 | { |
| 117 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 118 | struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); |
| 119 | int pid; |
| 120 | |
| 121 | memset(cb, 0, sizeof(*cb)); |
| 122 | |
| 123 | if (!wcid) |
| 124 | return MT_PACKET_ID_NO_ACK; |
| 125 | |
| 126 | if (info->flags & IEEE80211_TX_CTL_NO_ACK) |
| 127 | return MT_PACKET_ID_NO_ACK; |
| 128 | |
| 129 | if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | |
| 130 | IEEE80211_TX_CTL_RATE_CTRL_PROBE))) |
| 131 | return MT_PACKET_ID_NO_SKB; |
| 132 | |
| 133 | spin_lock_bh(&dev->status_lock); |
| 134 | |
| 135 | pid = idr_alloc(&wcid->pktid, skb, MT_PACKET_ID_FIRST, |
| 136 | MT_PACKET_ID_MASK, GFP_ATOMIC); |
| 137 | if (pid < 0) { |
| 138 | pid = MT_PACKET_ID_NO_SKB; |
| 139 | goto out; |
| 140 | } |
| 141 | |
| 142 | cb->wcid = wcid->idx; |
| 143 | cb->pktid = pid; |
| 144 | |
| 145 | if (list_empty(&wcid->list)) |
| 146 | list_add_tail(&wcid->list, &dev->wcid_list); |
| 147 | |
| 148 | out: |
| 149 | spin_unlock_bh(&dev->status_lock); |
| 150 | |
| 151 | return pid; |
| 152 | } |
| 153 | EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add); |
| 154 | |
| 155 | struct sk_buff * |
| 156 | mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, |
| 157 | struct sk_buff_head *list) |
| 158 | { |
| 159 | struct sk_buff *skb; |
| 160 | int id; |
| 161 | |
| 162 | lockdep_assert_held(&dev->status_lock); |
| 163 | |
| 164 | skb = idr_remove(&wcid->pktid, pktid); |
| 165 | if (skb) |
| 166 | goto out; |
| 167 | |
| 168 | /* look for stale entries in the wcid idr queue */ |
| 169 | idr_for_each_entry(&wcid->pktid, skb, id) { |
| 170 | struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); |
| 171 | |
| 172 | if (pktid >= 0) { |
| 173 | if (!(cb->flags & MT_TX_CB_DMA_DONE)) |
| 174 | continue; |
| 175 | |
| 176 | if (time_is_after_jiffies(cb->jiffies + |
| 177 | MT_TX_STATUS_SKB_TIMEOUT)) |
| 178 | continue; |
| 179 | } |
| 180 | |
| 181 | /* It has been too long since DMA_DONE, time out this packet |
| 182 | * and stop waiting for TXS callback. |
| 183 | */ |
| 184 | idr_remove(&wcid->pktid, cb->pktid); |
| 185 | __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED | |
| 186 | MT_TX_CB_TXS_DONE, list); |
| 187 | } |
| 188 | |
| 189 | out: |
| 190 | if (idr_is_empty(&wcid->pktid)) |
| 191 | list_del_init(&wcid->list); |
| 192 | |
| 193 | return skb; |
| 194 | } |
| 195 | EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get); |
| 196 | |
| 197 | void |
| 198 | mt76_tx_status_check(struct mt76_dev *dev, bool flush) |
| 199 | { |
| 200 | struct mt76_wcid *wcid, *tmp; |
| 201 | struct sk_buff_head list; |
| 202 | |
| 203 | mt76_tx_status_lock(dev, &list); |
| 204 | list_for_each_entry_safe(wcid, tmp, &dev->wcid_list, list) |
| 205 | mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list); |
| 206 | mt76_tx_status_unlock(dev, &list); |
| 207 | } |
| 208 | EXPORT_SYMBOL_GPL(mt76_tx_status_check); |
| 209 | |
| 210 | static void |
| 211 | mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid, |
| 212 | struct sk_buff *skb) |
| 213 | { |
| 214 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 215 | int pending; |
| 216 | |
| 217 | if (!wcid || info->tx_time_est) |
| 218 | return; |
| 219 | |
| 220 | pending = atomic_dec_return(&wcid->non_aql_packets); |
| 221 | if (pending < 0) |
| 222 | atomic_cmpxchg(&wcid->non_aql_packets, pending, 0); |
| 223 | } |
| 224 | |
| 225 | void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb, |
| 226 | struct list_head *free_list) |
| 227 | { |
| 228 | struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); |
| 229 | struct ieee80211_tx_status status = { |
| 230 | .skb = skb, |
| 231 | .free_list = free_list, |
| 232 | }; |
| 233 | struct mt76_wcid *wcid = NULL; |
| 234 | struct ieee80211_hw *hw; |
| 235 | struct sk_buff_head list; |
| 236 | |
| 237 | rcu_read_lock(); |
| 238 | |
| 239 | if (wcid_idx < ARRAY_SIZE(dev->wcid)) |
| 240 | wcid = rcu_dereference(dev->wcid[wcid_idx]); |
| 241 | |
| 242 | mt76_tx_check_non_aql(dev, wcid, skb); |
| 243 | |
| 244 | #ifdef CONFIG_NL80211_TESTMODE |
| 245 | if (mt76_is_testmode_skb(dev, skb, &hw)) { |
| 246 | struct mt76_phy *phy = hw->priv; |
| 247 | |
| 248 | if (skb == phy->test.tx_skb) |
| 249 | phy->test.tx_done++; |
| 250 | if (phy->test.tx_queued == phy->test.tx_done) |
| 251 | wake_up(&dev->tx_wait); |
| 252 | |
| 253 | dev_kfree_skb_any(skb); |
| 254 | goto out; |
| 255 | } |
| 256 | #endif |
| 257 | |
| 258 | if (cb->pktid < MT_PACKET_ID_FIRST) { |
| 259 | hw = mt76_tx_status_get_hw(dev, skb); |
| 260 | status.sta = wcid_to_sta(wcid); |
| 261 | ieee80211_tx_status_ext(hw, &status); |
| 262 | goto out; |
| 263 | } |
| 264 | |
| 265 | mt76_tx_status_lock(dev, &list); |
| 266 | cb->jiffies = jiffies; |
| 267 | __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list); |
| 268 | mt76_tx_status_unlock(dev, &list); |
| 269 | |
| 270 | out: |
| 271 | rcu_read_unlock(); |
| 272 | } |
| 273 | EXPORT_SYMBOL_GPL(__mt76_tx_complete_skb); |
| 274 | |
| 275 | static int |
| 276 | __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb, |
| 277 | struct mt76_wcid *wcid, struct ieee80211_sta *sta, |
| 278 | bool *stop) |
| 279 | { |
| 280 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 281 | struct mt76_queue *q = phy->q_tx[qid]; |
| 282 | struct mt76_dev *dev = phy->dev; |
| 283 | bool non_aql; |
| 284 | int pending; |
| 285 | int idx; |
| 286 | |
| 287 | non_aql = !info->tx_time_est; |
| 288 | idx = dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta); |
| 289 | if (idx < 0 || !sta) |
| 290 | return idx; |
| 291 | |
| 292 | wcid = (struct mt76_wcid *)sta->drv_priv; |
| 293 | q->entry[idx].wcid = wcid->idx; |
| 294 | |
| 295 | if (!non_aql) |
| 296 | return idx; |
| 297 | |
| 298 | pending = atomic_inc_return(&wcid->non_aql_packets); |
| 299 | if (stop && pending >= MT_MAX_NON_AQL_PKT) |
| 300 | *stop = true; |
| 301 | |
| 302 | return idx; |
| 303 | } |
| 304 | |
| 305 | void |
| 306 | mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, |
| 307 | struct mt76_wcid *wcid, struct sk_buff *skb) |
| 308 | { |
| 309 | struct mt76_dev *dev = phy->dev; |
| 310 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 311 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
| 312 | struct mt76_queue *q; |
| 313 | int qid = skb_get_queue_mapping(skb); |
| 314 | u8 phy_idx = mt76_get_phy_id(phy); |
| 315 | |
| 316 | if (mt76_testmode_enabled(phy)) { |
| 317 | ieee80211_free_txskb(phy->hw, skb); |
| 318 | return; |
| 319 | } |
| 320 | |
| 321 | if (WARN_ON(qid >= MT_TXQ_PSD)) { |
| 322 | qid = MT_TXQ_BE; |
| 323 | skb_set_queue_mapping(skb, qid); |
| 324 | } |
| 325 | |
| 326 | if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && |
| 327 | !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && |
| 328 | !ieee80211_is_data(hdr->frame_control) && |
| 329 | !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) { |
| 330 | qid = MT_TXQ_PSD; |
| 331 | skb_set_queue_mapping(skb, qid); |
| 332 | } |
| 333 | |
| 334 | if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET)) |
| 335 | ieee80211_get_tx_rates(info->control.vif, sta, skb, |
| 336 | info->control.rates, 1); |
| 337 | |
| 338 | info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy_idx); |
| 339 | |
| 340 | q = phy->q_tx[qid]; |
| 341 | |
| 342 | spin_lock_bh(&q->lock); |
| 343 | __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL); |
| 344 | dev->queue_ops->kick(dev, q); |
| 345 | spin_unlock_bh(&q->lock); |
| 346 | } |
| 347 | EXPORT_SYMBOL_GPL(mt76_tx); |
| 348 | |
| 349 | static struct sk_buff * |
| 350 | mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq) |
| 351 | { |
| 352 | struct ieee80211_txq *txq = mtxq_to_txq(mtxq); |
| 353 | struct ieee80211_tx_info *info; |
| 354 | u8 phy_idx = mt76_get_phy_id(phy); |
| 355 | struct sk_buff *skb; |
| 356 | |
| 357 | skb = ieee80211_tx_dequeue(phy->hw, txq); |
| 358 | if (!skb) |
| 359 | return NULL; |
| 360 | |
| 361 | info = IEEE80211_SKB_CB(skb); |
| 362 | info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy_idx); |
| 363 | |
| 364 | return skb; |
| 365 | } |
| 366 | |
| 367 | static void |
| 368 | mt76_queue_ps_skb(struct mt76_phy *phy, struct ieee80211_sta *sta, |
| 369 | struct sk_buff *skb, bool last) |
| 370 | { |
| 371 | struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; |
| 372 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
| 373 | |
| 374 | info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; |
| 375 | if (last) |
| 376 | info->flags |= IEEE80211_TX_STATUS_EOSP | |
| 377 | IEEE80211_TX_CTL_REQ_TX_STATUS; |
| 378 | |
| 379 | mt76_skb_set_moredata(skb, !last); |
| 380 | __mt76_tx_queue_skb(phy, MT_TXQ_PSD, skb, wcid, sta, NULL); |
| 381 | } |
| 382 | |
| 383 | void |
| 384 | mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, |
| 385 | u16 tids, int nframes, |
| 386 | enum ieee80211_frame_release_type reason, |
| 387 | bool more_data) |
| 388 | { |
| 389 | struct mt76_phy *phy = hw->priv; |
| 390 | struct mt76_dev *dev = phy->dev; |
| 391 | struct sk_buff *last_skb = NULL; |
| 392 | struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD]; |
| 393 | int i; |
| 394 | |
| 395 | spin_lock_bh(&hwq->lock); |
| 396 | for (i = 0; tids && nframes; i++, tids >>= 1) { |
| 397 | struct ieee80211_txq *txq = sta->txq[i]; |
| 398 | struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; |
| 399 | struct sk_buff *skb; |
| 400 | |
| 401 | if (!(tids & 1)) |
| 402 | continue; |
| 403 | |
| 404 | do { |
| 405 | skb = mt76_txq_dequeue(phy, mtxq); |
| 406 | if (!skb) |
| 407 | break; |
| 408 | |
| 409 | nframes--; |
| 410 | if (last_skb) |
| 411 | mt76_queue_ps_skb(phy, sta, last_skb, false); |
| 412 | |
| 413 | last_skb = skb; |
| 414 | } while (nframes); |
| 415 | } |
| 416 | |
| 417 | if (last_skb) { |
| 418 | mt76_queue_ps_skb(phy, sta, last_skb, true); |
| 419 | dev->queue_ops->kick(dev, hwq); |
| 420 | } else { |
| 421 | ieee80211_sta_eosp(sta); |
| 422 | } |
| 423 | |
| 424 | spin_unlock_bh(&hwq->lock); |
| 425 | } |
| 426 | EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); |
| 427 | |
| 428 | static bool |
| 429 | mt76_txq_stopped(struct mt76_queue *q) |
| 430 | { |
| 431 | return q->stopped || q->blocked || |
| 432 | q->queued + MT_TXQ_FREE_THR >= q->ndesc; |
| 433 | } |
| 434 | |
| 435 | static int |
| 436 | mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, |
| 437 | struct mt76_txq *mtxq, struct mt76_wcid *wcid) |
| 438 | { |
| 439 | struct mt76_dev *dev = phy->dev; |
| 440 | struct ieee80211_txq *txq = mtxq_to_txq(mtxq); |
| 441 | enum mt76_txq_id qid = mt76_txq_get_qid(txq); |
| 442 | struct ieee80211_tx_info *info; |
| 443 | struct sk_buff *skb; |
| 444 | int n_frames = 1; |
| 445 | bool stop = false; |
| 446 | int idx; |
| 447 | |
| 448 | if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) |
| 449 | return 0; |
| 450 | |
| 451 | if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT) |
| 452 | return 0; |
| 453 | |
| 454 | skb = mt76_txq_dequeue(phy, mtxq); |
| 455 | if (!skb) |
| 456 | return 0; |
| 457 | |
| 458 | info = IEEE80211_SKB_CB(skb); |
| 459 | if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) |
| 460 | ieee80211_get_tx_rates(txq->vif, txq->sta, skb, |
| 461 | info->control.rates, 1); |
| 462 | |
| 463 | spin_lock(&q->lock); |
| 464 | idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); |
| 465 | spin_unlock(&q->lock); |
| 466 | if (idx < 0) |
| 467 | return idx; |
| 468 | |
| 469 | do { |
| 470 | if (test_bit(MT76_RESET, &phy->state)) |
| 471 | return -EBUSY; |
| 472 | |
| 473 | if (stop || mt76_txq_stopped(q)) |
| 474 | break; |
| 475 | |
| 476 | skb = mt76_txq_dequeue(phy, mtxq); |
| 477 | if (!skb) |
| 478 | break; |
| 479 | |
| 480 | info = IEEE80211_SKB_CB(skb); |
| 481 | if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) |
| 482 | ieee80211_get_tx_rates(txq->vif, txq->sta, skb, |
| 483 | info->control.rates, 1); |
| 484 | |
| 485 | spin_lock(&q->lock); |
| 486 | idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); |
| 487 | spin_unlock(&q->lock); |
| 488 | if (idx < 0) |
| 489 | break; |
| 490 | |
| 491 | n_frames++; |
| 492 | } while (1); |
| 493 | |
| 494 | spin_lock(&q->lock); |
| 495 | dev->queue_ops->kick(dev, q); |
| 496 | spin_unlock(&q->lock); |
| 497 | |
| 498 | return n_frames; |
| 499 | } |
| 500 | |
| 501 | static int |
| 502 | mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) |
| 503 | { |
| 504 | struct mt76_queue *q = phy->q_tx[qid]; |
| 505 | struct mt76_dev *dev = phy->dev; |
| 506 | struct ieee80211_txq *txq; |
| 507 | struct mt76_txq *mtxq; |
| 508 | struct mt76_wcid *wcid; |
| 509 | int ret = 0; |
| 510 | |
| 511 | while (1) { |
| 512 | int n_frames = 0; |
| 513 | |
| 514 | if (test_bit(MT76_RESET, &phy->state)) |
| 515 | return -EBUSY; |
| 516 | |
| 517 | if (dev->queue_ops->tx_cleanup && |
| 518 | q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) { |
| 519 | dev->queue_ops->tx_cleanup(dev, q, false); |
| 520 | } |
| 521 | |
| 522 | txq = ieee80211_next_txq(phy->hw, qid); |
| 523 | if (!txq) |
| 524 | break; |
| 525 | |
| 526 | mtxq = (struct mt76_txq *)txq->drv_priv; |
| 527 | wcid = rcu_dereference(dev->wcid[mtxq->wcid]); |
| 528 | if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags)) |
| 529 | continue; |
| 530 | |
| 531 | if (mtxq->send_bar && mtxq->aggr) { |
| 532 | struct ieee80211_txq *txq = mtxq_to_txq(mtxq); |
| 533 | struct ieee80211_sta *sta = txq->sta; |
| 534 | struct ieee80211_vif *vif = txq->vif; |
| 535 | u16 agg_ssn = mtxq->agg_ssn; |
| 536 | u8 tid = txq->tid; |
| 537 | |
| 538 | mtxq->send_bar = false; |
| 539 | ieee80211_send_bar(vif, sta->addr, tid, agg_ssn); |
| 540 | } |
| 541 | |
| 542 | if (!mt76_txq_stopped(q)) |
| 543 | n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid); |
| 544 | |
| 545 | ieee80211_return_txq(phy->hw, txq, false); |
| 546 | |
| 547 | if (unlikely(n_frames < 0)) |
| 548 | return n_frames; |
| 549 | |
| 550 | ret += n_frames; |
| 551 | } |
| 552 | |
| 553 | return ret; |
| 554 | } |
| 555 | |
| 556 | void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid) |
| 557 | { |
| 558 | int len; |
| 559 | |
| 560 | if (qid >= 4) |
| 561 | return; |
| 562 | |
| 563 | local_bh_disable(); |
| 564 | rcu_read_lock(); |
| 565 | |
| 566 | do { |
| 567 | ieee80211_txq_schedule_start(phy->hw, qid); |
| 568 | len = mt76_txq_schedule_list(phy, qid); |
| 569 | ieee80211_txq_schedule_end(phy->hw, qid); |
| 570 | } while (len > 0); |
| 571 | |
| 572 | rcu_read_unlock(); |
| 573 | local_bh_enable(); |
| 574 | } |
| 575 | EXPORT_SYMBOL_GPL(mt76_txq_schedule); |
| 576 | |
| 577 | void mt76_txq_schedule_all(struct mt76_phy *phy) |
| 578 | { |
| 579 | int i; |
| 580 | |
| 581 | for (i = 0; i <= MT_TXQ_BK; i++) |
| 582 | mt76_txq_schedule(phy, i); |
| 583 | } |
| 584 | EXPORT_SYMBOL_GPL(mt76_txq_schedule_all); |
| 585 | |
| 586 | void mt76_tx_worker_run(struct mt76_dev *dev) |
| 587 | { |
| 588 | mt76_txq_schedule_all(&dev->phy); |
| 589 | if (dev->phy2) |
| 590 | mt76_txq_schedule_all(dev->phy2); |
| 591 | if (dev->phy3) |
| 592 | mt76_txq_schedule_all(dev->phy3); |
| 593 | |
| 594 | #ifdef CONFIG_NL80211_TESTMODE |
| 595 | if (dev->phy.test.tx_pending) |
| 596 | mt76_testmode_tx_pending(&dev->phy); |
| 597 | if (dev->phy2 && dev->phy2->test.tx_pending) |
| 598 | mt76_testmode_tx_pending(dev->phy2); |
| 599 | if (dev->phy3 && dev->phy3->test.tx_pending) |
| 600 | mt76_testmode_tx_pending(dev->phy3); |
| 601 | #endif |
| 602 | } |
| 603 | EXPORT_SYMBOL_GPL(mt76_tx_worker_run); |
| 604 | |
| 605 | void mt76_tx_worker(struct mt76_worker *w) |
| 606 | { |
| 607 | struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker); |
| 608 | |
| 609 | mt76_tx_worker_run(dev); |
| 610 | } |
| 611 | |
| 612 | void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta, |
| 613 | bool send_bar) |
| 614 | { |
| 615 | int i; |
| 616 | |
| 617 | for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { |
| 618 | struct ieee80211_txq *txq = sta->txq[i]; |
| 619 | struct mt76_queue *hwq; |
| 620 | struct mt76_txq *mtxq; |
| 621 | |
| 622 | if (!txq) |
| 623 | continue; |
| 624 | |
| 625 | hwq = phy->q_tx[mt76_txq_get_qid(txq)]; |
| 626 | mtxq = (struct mt76_txq *)txq->drv_priv; |
| 627 | |
| 628 | spin_lock_bh(&hwq->lock); |
| 629 | mtxq->send_bar = mtxq->aggr && send_bar; |
| 630 | spin_unlock_bh(&hwq->lock); |
| 631 | } |
| 632 | } |
| 633 | EXPORT_SYMBOL_GPL(mt76_stop_tx_queues); |
| 634 | |
| 635 | void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) |
| 636 | { |
| 637 | struct mt76_phy *phy = hw->priv; |
| 638 | struct mt76_dev *dev = phy->dev; |
| 639 | |
| 640 | if (!test_bit(MT76_STATE_RUNNING, &phy->state)) |
| 641 | return; |
| 642 | |
| 643 | mt76_worker_schedule(&dev->tx_worker); |
| 644 | } |
| 645 | EXPORT_SYMBOL_GPL(mt76_wake_tx_queue); |
| 646 | |
| 647 | u8 mt76_ac_to_hwq(u8 ac) |
| 648 | { |
| 649 | static const u8 wmm_queue_map[] = { |
| 650 | [IEEE80211_AC_BE] = 0, |
| 651 | [IEEE80211_AC_BK] = 1, |
| 652 | [IEEE80211_AC_VI] = 2, |
| 653 | [IEEE80211_AC_VO] = 3, |
| 654 | }; |
| 655 | |
| 656 | if (WARN_ON(ac >= IEEE80211_NUM_ACS)) |
| 657 | return 0; |
| 658 | |
| 659 | return wmm_queue_map[ac]; |
| 660 | } |
| 661 | EXPORT_SYMBOL_GPL(mt76_ac_to_hwq); |
| 662 | |
| 663 | int mt76_skb_adjust_pad(struct sk_buff *skb, int pad) |
| 664 | { |
| 665 | struct sk_buff *iter, *last = skb; |
| 666 | |
| 667 | /* First packet of a A-MSDU burst keeps track of the whole burst |
| 668 | * length, need to update length of it and the last packet. |
| 669 | */ |
| 670 | skb_walk_frags(skb, iter) { |
| 671 | last = iter; |
| 672 | if (!iter->next) { |
| 673 | skb->data_len += pad; |
| 674 | skb->len += pad; |
| 675 | break; |
| 676 | } |
| 677 | } |
| 678 | |
| 679 | if (skb_pad(last, pad)) |
| 680 | return -ENOMEM; |
| 681 | |
| 682 | __skb_put(last, pad); |
| 683 | |
| 684 | return 0; |
| 685 | } |
| 686 | EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad); |
| 687 | |
| 688 | void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q, |
| 689 | struct mt76_queue_entry *e) |
| 690 | { |
| 691 | if (e->skb) |
| 692 | dev->drv->tx_complete_skb(dev, e); |
| 693 | |
| 694 | spin_lock_bh(&q->lock); |
| 695 | q->tail = (q->tail + 1) % q->ndesc; |
| 696 | q->queued--; |
| 697 | spin_unlock_bh(&q->lock); |
| 698 | } |
| 699 | EXPORT_SYMBOL_GPL(mt76_queue_tx_complete); |
| 700 | |
| 701 | void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) |
| 702 | { |
| 703 | struct mt76_phy *phy = &dev->phy, *phy2 = dev->phy2, *phy3 = dev->phy3; |
| 704 | struct mt76_queue *q, *q2 = NULL; |
| 705 | |
| 706 | q = phy->q_tx[0]; |
| 707 | if (blocked == q->blocked) |
| 708 | return; |
| 709 | |
| 710 | q->blocked = blocked; |
| 711 | if (phy2) { |
| 712 | q2 = phy2->q_tx[0]; |
| 713 | q2->blocked = blocked; |
| 714 | } |
| 715 | |
| 716 | if (phy3) { |
| 717 | q2 = phy3->q_tx[0]; |
| 718 | q2->blocked = blocked; |
| 719 | } |
| 720 | |
| 721 | if (!blocked) |
| 722 | mt76_worker_schedule(&dev->tx_worker); |
| 723 | } |
| 724 | EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked); |
| 725 | |
| 726 | int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) |
| 727 | { |
| 728 | int token; |
| 729 | |
| 730 | spin_lock_bh(&dev->token_lock); |
| 731 | |
| 732 | token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size, |
| 733 | GFP_ATOMIC); |
| 734 | if (token >= 0) |
| 735 | dev->token_count++; |
| 736 | |
| 737 | if (dev->token_count >= dev->drv->token_size - MT76_TOKEN_FREE_THR) |
| 738 | __mt76_set_tx_blocked(dev, true); |
| 739 | |
| 740 | spin_unlock_bh(&dev->token_lock); |
| 741 | |
| 742 | return token; |
| 743 | } |
| 744 | EXPORT_SYMBOL_GPL(mt76_token_consume); |
| 745 | |
| 746 | struct mt76_txwi_cache * |
| 747 | mt76_token_release(struct mt76_dev *dev, int token, bool *wake) |
| 748 | { |
| 749 | struct mt76_txwi_cache *txwi; |
| 750 | |
| 751 | spin_lock_bh(&dev->token_lock); |
| 752 | |
| 753 | txwi = idr_remove(&dev->token, token); |
| 754 | if (txwi) |
| 755 | dev->token_count--; |
| 756 | |
| 757 | if (dev->token_count < dev->drv->token_size - MT76_TOKEN_FREE_THR && |
| 758 | dev->phy.q_tx[0]->blocked) |
| 759 | *wake = true; |
| 760 | |
| 761 | spin_unlock_bh(&dev->token_lock); |
| 762 | |
| 763 | return txwi; |
| 764 | } |
| 765 | EXPORT_SYMBOL_GPL(mt76_token_release); |