1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #include "mt76.h" 7 8 static int 9 mt76_txq_get_qid(struct ieee80211_txq *txq) 10 { 11 if (!txq->sta) 12 return MT_TXQ_BE; 13 14 return txq->ac; 15 } 16 17 void 18 mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb) 19 { 20 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 21 struct ieee80211_txq *txq; 22 struct mt76_txq *mtxq; 23 u8 tid; 24 25 if (!sta || !ieee80211_is_data_qos(hdr->frame_control) || 26 !ieee80211_is_data_present(hdr->frame_control)) 27 return; 28 29 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 30 txq = sta->txq[tid]; 31 mtxq = (struct mt76_txq *)txq->drv_priv; 32 if (!mtxq->aggr) 33 return; 34 35 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; 36 } 37 EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn); 38 39 void 40 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) 41 __acquires(&dev->status_lock) 42 { 43 __skb_queue_head_init(list); 44 spin_lock_bh(&dev->status_lock); 45 } 46 EXPORT_SYMBOL_GPL(mt76_tx_status_lock); 47 48 void 49 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) 50 __releases(&dev->status_lock) 51 { 52 struct ieee80211_hw *hw; 53 struct sk_buff *skb; 54 55 spin_unlock_bh(&dev->status_lock); 56 57 rcu_read_lock(); 58 while ((skb = __skb_dequeue(list)) != NULL) { 59 struct ieee80211_tx_status status = { 60 .skb = skb, 61 .info = IEEE80211_SKB_CB(skb), 62 }; 63 struct ieee80211_rate_status rs = {}; 64 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 65 struct mt76_wcid *wcid; 66 67 wcid = rcu_dereference(dev->wcid[cb->wcid]); 68 if (wcid) { 69 status.sta = wcid_to_sta(wcid); 70 if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) { 71 rs.rate_idx = wcid->rate; 72 status.rates = &rs; 73 status.n_rates = 1; 74 } else { 75 status.n_rates = 0; 76 } 77 } 78 79 hw = mt76_tx_status_get_hw(dev, skb); 80 spin_lock_bh(&dev->rx_lock); 81 ieee80211_tx_status_ext(hw, &status); 82 spin_unlock_bh(&dev->rx_lock); 83 } 84 rcu_read_unlock(); 85 } 86 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock); 87 88 static void 89 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags, 90 struct sk_buff_head *list) 91 { 92 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 93 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 94 u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE; 95 96 flags |= cb->flags; 97 cb->flags = flags; 98 99 if ((flags & done) != done) 100 return; 101 102 /* Tx status can be unreliable. if it fails, mark the frame as ACKed */ 103 if (flags & MT_TX_CB_TXS_FAILED) { 104 info->status.rates[0].count = 0; 105 info->status.rates[0].idx = -1; 106 info->flags |= IEEE80211_TX_STAT_ACK; 107 } 108 109 __skb_queue_tail(list, skb); 110 } 111 112 void 113 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 114 struct sk_buff_head *list) 115 { 116 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list); 117 } 118 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done); 119 120 int 121 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, 122 struct sk_buff *skb) 123 { 124 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 125 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 126 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 127 int pid; 128 129 memset(cb, 0, sizeof(*cb)); 130 131 if (!wcid || !rcu_access_pointer(dev->wcid[wcid->idx])) 132 return MT_PACKET_ID_NO_ACK; 133 134 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 135 return MT_PACKET_ID_NO_ACK; 136 137 if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | 138 IEEE80211_TX_CTL_RATE_CTRL_PROBE))) { 139 if (mtk_wed_device_active(&dev->mmio.wed) && 140 ((info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) || 141 ieee80211_is_data(hdr->frame_control))) 142 return MT_PACKET_ID_WED; 143 144 return MT_PACKET_ID_NO_SKB; 145 } 146 147 spin_lock_bh(&dev->status_lock); 148 149 pid = idr_alloc(&wcid->pktid, skb, MT_PACKET_ID_FIRST, 150 MT_PACKET_ID_MASK, GFP_ATOMIC); 151 if (pid < 0) { 152 pid = MT_PACKET_ID_NO_SKB; 153 goto out; 154 } 155 156 cb->wcid = wcid->idx; 157 cb->pktid = pid; 158 159 if (list_empty(&wcid->list)) 160 list_add_tail(&wcid->list, &dev->wcid_list); 161 162 out: 163 spin_unlock_bh(&dev->status_lock); 164 165 return pid; 166 } 167 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add); 168 169 struct sk_buff * 170 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, 171 struct sk_buff_head *list) 172 { 173 struct sk_buff *skb; 174 int id; 175 176 lockdep_assert_held(&dev->status_lock); 177 178 skb = idr_remove(&wcid->pktid, pktid); 179 if (skb) 180 goto out; 181 182 /* look for stale entries in the wcid idr queue */ 183 idr_for_each_entry(&wcid->pktid, skb, id) { 184 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 185 186 if (pktid >= 0) { 187 if (!(cb->flags & MT_TX_CB_DMA_DONE)) 188 continue; 189 190 if (time_is_after_jiffies(cb->jiffies + 191 MT_TX_STATUS_SKB_TIMEOUT)) 192 continue; 193 } 194 195 /* It has been too long since DMA_DONE, time out this packet 196 * and stop waiting for TXS callback. 197 */ 198 idr_remove(&wcid->pktid, cb->pktid); 199 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED | 200 MT_TX_CB_TXS_DONE, list); 201 } 202 203 out: 204 if (idr_is_empty(&wcid->pktid)) 205 list_del_init(&wcid->list); 206 207 return skb; 208 } 209 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get); 210 211 void 212 mt76_tx_status_check(struct mt76_dev *dev, bool flush) 213 { 214 struct mt76_wcid *wcid, *tmp; 215 struct sk_buff_head list; 216 217 mt76_tx_status_lock(dev, &list); 218 list_for_each_entry_safe(wcid, tmp, &dev->wcid_list, list) 219 mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list); 220 mt76_tx_status_unlock(dev, &list); 221 } 222 EXPORT_SYMBOL_GPL(mt76_tx_status_check); 223 224 static void 225 mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid, 226 struct sk_buff *skb) 227 { 228 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 229 int pending; 230 231 if (!wcid || info->tx_time_est) 232 return; 233 234 pending = atomic_dec_return(&wcid->non_aql_packets); 235 if (pending < 0) 236 atomic_cmpxchg(&wcid->non_aql_packets, pending, 0); 237 } 238 239 void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb, 240 struct list_head *free_list) 241 { 242 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 243 struct ieee80211_tx_status status = { 244 .skb = skb, 245 .free_list = free_list, 246 }; 247 struct mt76_wcid *wcid = NULL; 248 struct ieee80211_hw *hw; 249 struct sk_buff_head list; 250 251 rcu_read_lock(); 252 253 if (wcid_idx < ARRAY_SIZE(dev->wcid)) 254 wcid = rcu_dereference(dev->wcid[wcid_idx]); 255 256 mt76_tx_check_non_aql(dev, wcid, skb); 257 258 #ifdef CONFIG_NL80211_TESTMODE 259 if (mt76_is_testmode_skb(dev, skb, &hw)) { 260 struct mt76_phy *phy = hw->priv; 261 262 if (skb == phy->test.tx_skb) 263 phy->test.tx_done++; 264 if (phy->test.tx_queued == phy->test.tx_done) 265 wake_up(&dev->tx_wait); 266 267 dev_kfree_skb_any(skb); 268 goto out; 269 } 270 #endif 271 272 if (cb->pktid < MT_PACKET_ID_FIRST) { 273 struct ieee80211_rate_status rs = {}; 274 275 hw = mt76_tx_status_get_hw(dev, skb); 276 status.sta = wcid_to_sta(wcid); 277 if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) { 278 rs.rate_idx = wcid->rate; 279 status.rates = &rs; 280 status.n_rates = 1; 281 } 282 spin_lock_bh(&dev->rx_lock); 283 ieee80211_tx_status_ext(hw, &status); 284 spin_unlock_bh(&dev->rx_lock); 285 goto out; 286 } 287 288 mt76_tx_status_lock(dev, &list); 289 cb->jiffies = jiffies; 290 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list); 291 mt76_tx_status_unlock(dev, &list); 292 293 out: 294 rcu_read_unlock(); 295 } 296 EXPORT_SYMBOL_GPL(__mt76_tx_complete_skb); 297 298 static int 299 __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb, 300 struct mt76_wcid *wcid, struct ieee80211_sta *sta, 301 bool *stop) 302 { 303 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 304 struct mt76_queue *q = phy->q_tx[qid]; 305 struct mt76_dev *dev = phy->dev; 306 bool non_aql; 307 int pending; 308 int idx; 309 310 non_aql = !info->tx_time_est; 311 idx = dev->queue_ops->tx_queue_skb(phy, q, qid, skb, wcid, sta); 312 if (idx < 0 || !sta) 313 return idx; 314 315 wcid = (struct mt76_wcid *)sta->drv_priv; 316 if (!wcid->sta) 317 return idx; 318 319 q->entry[idx].wcid = wcid->idx; 320 321 if (!non_aql) 322 return idx; 323 324 pending = atomic_inc_return(&wcid->non_aql_packets); 325 if (stop && pending >= MT_MAX_NON_AQL_PKT) 326 *stop = true; 327 328 return idx; 329 } 330 331 void 332 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, 333 struct mt76_wcid *wcid, struct sk_buff *skb) 334 { 335 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 336 struct sk_buff_head *head; 337 338 if (mt76_testmode_enabled(phy)) { 339 ieee80211_free_txskb(phy->hw, skb); 340 return; 341 } 342 343 if (WARN_ON(skb_get_queue_mapping(skb) >= MT_TXQ_PSD)) 344 skb_set_queue_mapping(skb, MT_TXQ_BE); 345 346 if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET)) 347 ieee80211_get_tx_rates(info->control.vif, sta, skb, 348 info->control.rates, 1); 349 350 info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx); 351 352 if ((info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) || 353 (info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK)) 354 head = &wcid->tx_offchannel; 355 else 356 head = &wcid->tx_pending; 357 358 spin_lock_bh(&head->lock); 359 __skb_queue_tail(head, skb); 360 spin_unlock_bh(&head->lock); 361 362 spin_lock_bh(&phy->tx_lock); 363 if (list_empty(&wcid->tx_list)) 364 list_add_tail(&wcid->tx_list, &phy->tx_list); 365 spin_unlock_bh(&phy->tx_lock); 366 367 mt76_worker_schedule(&phy->dev->tx_worker); 368 } 369 EXPORT_SYMBOL_GPL(mt76_tx); 370 371 static struct sk_buff * 372 mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq) 373 { 374 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 375 struct ieee80211_tx_info *info; 376 struct sk_buff *skb; 377 378 skb = ieee80211_tx_dequeue(phy->hw, txq); 379 if (!skb) 380 return NULL; 381 382 info = IEEE80211_SKB_CB(skb); 383 info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx); 384 385 return skb; 386 } 387 388 static void 389 mt76_queue_ps_skb(struct mt76_phy *phy, struct ieee80211_sta *sta, 390 struct sk_buff *skb, bool last) 391 { 392 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 393 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 394 395 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 396 if (last) 397 info->flags |= IEEE80211_TX_STATUS_EOSP | 398 IEEE80211_TX_CTL_REQ_TX_STATUS; 399 400 mt76_skb_set_moredata(skb, !last); 401 __mt76_tx_queue_skb(phy, MT_TXQ_PSD, skb, wcid, sta, NULL); 402 } 403 404 void 405 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, 406 u16 tids, int nframes, 407 enum ieee80211_frame_release_type reason, 408 bool more_data) 409 { 410 struct mt76_phy *phy = hw->priv; 411 struct mt76_dev *dev = phy->dev; 412 struct sk_buff *last_skb = NULL; 413 struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD]; 414 int i; 415 416 spin_lock_bh(&hwq->lock); 417 for (i = 0; tids && nframes; i++, tids >>= 1) { 418 struct ieee80211_txq *txq = sta->txq[i]; 419 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; 420 struct sk_buff *skb; 421 422 if (!(tids & 1)) 423 continue; 424 425 do { 426 skb = mt76_txq_dequeue(phy, mtxq); 427 if (!skb) 428 break; 429 430 nframes--; 431 if (last_skb) 432 mt76_queue_ps_skb(phy, sta, last_skb, false); 433 434 last_skb = skb; 435 } while (nframes); 436 } 437 438 if (last_skb) { 439 mt76_queue_ps_skb(phy, sta, last_skb, true); 440 dev->queue_ops->kick(dev, hwq); 441 } else { 442 ieee80211_sta_eosp(sta); 443 } 444 445 spin_unlock_bh(&hwq->lock); 446 } 447 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); 448 449 static bool 450 mt76_txq_stopped(struct mt76_queue *q) 451 { 452 return q->stopped || q->blocked || 453 q->queued + MT_TXQ_FREE_THR >= q->ndesc; 454 } 455 456 static int 457 mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, 458 struct mt76_txq *mtxq, struct mt76_wcid *wcid) 459 { 460 struct mt76_dev *dev = phy->dev; 461 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 462 enum mt76_txq_id qid = mt76_txq_get_qid(txq); 463 struct ieee80211_tx_info *info; 464 struct sk_buff *skb; 465 int n_frames = 1; 466 bool stop = false; 467 int idx; 468 469 if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) 470 return 0; 471 472 if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT) 473 return 0; 474 475 skb = mt76_txq_dequeue(phy, mtxq); 476 if (!skb) 477 return 0; 478 479 info = IEEE80211_SKB_CB(skb); 480 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 481 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, 482 info->control.rates, 1); 483 484 spin_lock(&q->lock); 485 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); 486 spin_unlock(&q->lock); 487 if (idx < 0) 488 return idx; 489 490 do { 491 if (test_bit(MT76_RESET, &phy->state) || phy->offchannel) 492 break; 493 494 if (stop || mt76_txq_stopped(q)) 495 break; 496 497 skb = mt76_txq_dequeue(phy, mtxq); 498 if (!skb) 499 break; 500 501 info = IEEE80211_SKB_CB(skb); 502 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 503 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, 504 info->control.rates, 1); 505 506 spin_lock(&q->lock); 507 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); 508 spin_unlock(&q->lock); 509 if (idx < 0) 510 break; 511 512 n_frames++; 513 } while (1); 514 515 spin_lock(&q->lock); 516 dev->queue_ops->kick(dev, q); 517 spin_unlock(&q->lock); 518 519 return n_frames; 520 } 521 522 static int 523 mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) 524 { 525 struct mt76_dev *dev = phy->dev; 526 struct ieee80211_txq *txq; 527 struct mt76_txq *mtxq; 528 struct mt76_wcid *wcid; 529 struct mt76_queue *q; 530 int ret = 0; 531 532 while (1) { 533 int n_frames = 0; 534 535 txq = ieee80211_next_txq(phy->hw, qid); 536 if (!txq) 537 break; 538 539 mtxq = (struct mt76_txq *)txq->drv_priv; 540 wcid = rcu_dereference(dev->wcid[mtxq->wcid]); 541 if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags)) 542 continue; 543 544 phy = mt76_dev_phy(dev, wcid->phy_idx); 545 if (test_bit(MT76_RESET, &phy->state) || phy->offchannel) 546 continue; 547 548 q = phy->q_tx[qid]; 549 if (dev->queue_ops->tx_cleanup && 550 q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) { 551 dev->queue_ops->tx_cleanup(dev, q, false); 552 } 553 554 if (mtxq->send_bar && mtxq->aggr) { 555 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 556 struct ieee80211_sta *sta = txq->sta; 557 struct ieee80211_vif *vif = txq->vif; 558 u16 agg_ssn = mtxq->agg_ssn; 559 u8 tid = txq->tid; 560 561 mtxq->send_bar = false; 562 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn); 563 } 564 565 if (!mt76_txq_stopped(q)) 566 n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid); 567 568 ieee80211_return_txq(phy->hw, txq, false); 569 570 if (unlikely(n_frames < 0)) 571 return n_frames; 572 573 ret += n_frames; 574 } 575 576 return ret; 577 } 578 579 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid) 580 { 581 int len; 582 583 if (qid >= 4) 584 return; 585 586 local_bh_disable(); 587 rcu_read_lock(); 588 589 do { 590 ieee80211_txq_schedule_start(phy->hw, qid); 591 len = mt76_txq_schedule_list(phy, qid); 592 ieee80211_txq_schedule_end(phy->hw, qid); 593 } while (len > 0); 594 595 rcu_read_unlock(); 596 local_bh_enable(); 597 } 598 EXPORT_SYMBOL_GPL(mt76_txq_schedule); 599 600 static int 601 mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid, 602 struct sk_buff_head *head) 603 { 604 struct mt76_dev *dev = phy->dev; 605 struct ieee80211_sta *sta; 606 struct mt76_queue *q; 607 struct sk_buff *skb; 608 int ret = 0; 609 610 spin_lock(&head->lock); 611 while ((skb = skb_peek(head)) != NULL) { 612 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 613 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 614 int qid = skb_get_queue_mapping(skb); 615 616 if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && 617 !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && 618 !ieee80211_is_data(hdr->frame_control) && 619 !ieee80211_is_bufferable_mmpdu(skb)) 620 qid = MT_TXQ_PSD; 621 622 q = phy->q_tx[qid]; 623 if (mt76_txq_stopped(q) || test_bit(MT76_RESET, &phy->state)) { 624 ret = -1; 625 break; 626 } 627 628 __skb_unlink(skb, head); 629 spin_unlock(&head->lock); 630 631 sta = wcid_to_sta(wcid); 632 spin_lock(&q->lock); 633 __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL); 634 dev->queue_ops->kick(dev, q); 635 spin_unlock(&q->lock); 636 637 spin_lock(&head->lock); 638 } 639 spin_unlock(&head->lock); 640 641 return ret; 642 } 643 644 static void mt76_txq_schedule_pending(struct mt76_phy *phy) 645 { 646 LIST_HEAD(tx_list); 647 648 if (list_empty(&phy->tx_list)) 649 return; 650 651 local_bh_disable(); 652 rcu_read_lock(); 653 654 spin_lock(&phy->tx_lock); 655 list_splice_init(&phy->tx_list, &tx_list); 656 while (!list_empty(&tx_list)) { 657 struct mt76_wcid *wcid; 658 int ret; 659 660 wcid = list_first_entry(&tx_list, struct mt76_wcid, tx_list); 661 list_del_init(&wcid->tx_list); 662 663 spin_unlock(&phy->tx_lock); 664 ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_offchannel); 665 if (ret >= 0 && !phy->offchannel) 666 ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_pending); 667 spin_lock(&phy->tx_lock); 668 669 if (!skb_queue_empty(&wcid->tx_pending) && 670 !skb_queue_empty(&wcid->tx_offchannel) && 671 list_empty(&wcid->tx_list)) 672 list_add_tail(&wcid->tx_list, &phy->tx_list); 673 674 if (ret < 0) 675 break; 676 } 677 spin_unlock(&phy->tx_lock); 678 679 rcu_read_unlock(); 680 local_bh_enable(); 681 } 682 683 void mt76_txq_schedule_all(struct mt76_phy *phy) 684 { 685 struct mt76_phy *main_phy = &phy->dev->phy; 686 int i; 687 688 mt76_txq_schedule_pending(phy); 689 690 if (phy != main_phy && phy->hw == main_phy->hw) 691 return; 692 693 for (i = 0; i <= MT_TXQ_BK; i++) 694 mt76_txq_schedule(phy, i); 695 } 696 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all); 697 698 void mt76_tx_worker_run(struct mt76_dev *dev) 699 { 700 struct mt76_phy *phy; 701 int i; 702 703 mt76_txq_schedule_all(&dev->phy); 704 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { 705 phy = dev->phys[i]; 706 if (!phy) 707 continue; 708 709 mt76_txq_schedule_all(phy); 710 } 711 712 #ifdef CONFIG_NL80211_TESTMODE 713 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { 714 phy = dev->phys[i]; 715 if (!phy || !phy->test.tx_pending) 716 continue; 717 718 mt76_testmode_tx_pending(phy); 719 } 720 #endif 721 } 722 EXPORT_SYMBOL_GPL(mt76_tx_worker_run); 723 724 void mt76_tx_worker(struct mt76_worker *w) 725 { 726 struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker); 727 728 mt76_tx_worker_run(dev); 729 } 730 731 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta, 732 bool send_bar) 733 { 734 int i; 735 736 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 737 struct ieee80211_txq *txq = sta->txq[i]; 738 struct mt76_queue *hwq; 739 struct mt76_txq *mtxq; 740 741 if (!txq) 742 continue; 743 744 hwq = phy->q_tx[mt76_txq_get_qid(txq)]; 745 mtxq = (struct mt76_txq *)txq->drv_priv; 746 747 spin_lock_bh(&hwq->lock); 748 mtxq->send_bar = mtxq->aggr && send_bar; 749 spin_unlock_bh(&hwq->lock); 750 } 751 } 752 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues); 753 754 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) 755 { 756 struct mt76_phy *phy = hw->priv; 757 struct mt76_dev *dev = phy->dev; 758 759 mt76_worker_schedule(&dev->tx_worker); 760 } 761 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue); 762 763 u8 mt76_ac_to_hwq(u8 ac) 764 { 765 static const u8 wmm_queue_map[] = { 766 [IEEE80211_AC_BE] = 0, 767 [IEEE80211_AC_BK] = 1, 768 [IEEE80211_AC_VI] = 2, 769 [IEEE80211_AC_VO] = 3, 770 }; 771 772 if (WARN_ON(ac >= IEEE80211_NUM_ACS)) 773 return 0; 774 775 return wmm_queue_map[ac]; 776 } 777 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq); 778 779 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad) 780 { 781 struct sk_buff *iter, *last = skb; 782 783 /* First packet of a A-MSDU burst keeps track of the whole burst 784 * length, need to update length of it and the last packet. 785 */ 786 skb_walk_frags(skb, iter) { 787 last = iter; 788 if (!iter->next) { 789 skb->data_len += pad; 790 skb->len += pad; 791 break; 792 } 793 } 794 795 if (skb_pad(last, pad)) 796 return -ENOMEM; 797 798 __skb_put(last, pad); 799 800 return 0; 801 } 802 EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad); 803 804 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q, 805 struct mt76_queue_entry *e) 806 { 807 if (e->skb) 808 dev->drv->tx_complete_skb(dev, e); 809 810 spin_lock_bh(&q->lock); 811 q->tail = (q->tail + 1) % q->ndesc; 812 q->queued--; 813 spin_unlock_bh(&q->lock); 814 } 815 EXPORT_SYMBOL_GPL(mt76_queue_tx_complete); 816 817 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) 818 { 819 struct mt76_phy *phy = &dev->phy; 820 struct mt76_queue *q = phy->q_tx[0]; 821 822 if (blocked == q->blocked) 823 return; 824 825 q->blocked = blocked; 826 827 phy = dev->phys[MT_BAND1]; 828 if (phy) { 829 q = phy->q_tx[0]; 830 q->blocked = blocked; 831 } 832 phy = dev->phys[MT_BAND2]; 833 if (phy) { 834 q = phy->q_tx[0]; 835 q->blocked = blocked; 836 } 837 838 if (!blocked) 839 mt76_worker_schedule(&dev->tx_worker); 840 } 841 EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked); 842 843 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) 844 { 845 int token; 846 847 spin_lock_bh(&dev->token_lock); 848 849 token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC); 850 if (token >= 0) 851 dev->token_count++; 852 853 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 854 if (mtk_wed_device_active(&dev->mmio.wed) && 855 token >= dev->mmio.wed.wlan.token_start) 856 dev->wed_token_count++; 857 #endif 858 859 if (dev->token_count >= dev->token_size - MT76_TOKEN_FREE_THR) 860 __mt76_set_tx_blocked(dev, true); 861 862 spin_unlock_bh(&dev->token_lock); 863 864 return token; 865 } 866 EXPORT_SYMBOL_GPL(mt76_token_consume); 867 868 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr, 869 struct mt76_txwi_cache *t, dma_addr_t phys) 870 { 871 int token; 872 873 spin_lock_bh(&dev->rx_token_lock); 874 token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size, 875 GFP_ATOMIC); 876 if (token >= 0) { 877 t->ptr = ptr; 878 t->dma_addr = phys; 879 } 880 spin_unlock_bh(&dev->rx_token_lock); 881 882 return token; 883 } 884 EXPORT_SYMBOL_GPL(mt76_rx_token_consume); 885 886 struct mt76_txwi_cache * 887 mt76_token_release(struct mt76_dev *dev, int token, bool *wake) 888 { 889 struct mt76_txwi_cache *txwi; 890 891 spin_lock_bh(&dev->token_lock); 892 893 txwi = idr_remove(&dev->token, token); 894 if (txwi) { 895 dev->token_count--; 896 897 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 898 if (mtk_wed_device_active(&dev->mmio.wed) && 899 token >= dev->mmio.wed.wlan.token_start && 900 --dev->wed_token_count == 0) 901 wake_up(&dev->tx_wait); 902 #endif 903 } 904 905 if (dev->token_count < dev->token_size - MT76_TOKEN_FREE_THR && 906 dev->phy.q_tx[0]->blocked) 907 *wake = true; 908 909 spin_unlock_bh(&dev->token_lock); 910 911 return txwi; 912 } 913 EXPORT_SYMBOL_GPL(mt76_token_release); 914 915 struct mt76_txwi_cache * 916 mt76_rx_token_release(struct mt76_dev *dev, int token) 917 { 918 struct mt76_txwi_cache *t; 919 920 spin_lock_bh(&dev->rx_token_lock); 921 t = idr_remove(&dev->rx_token, token); 922 spin_unlock_bh(&dev->rx_token_lock); 923 924 return t; 925 } 926 EXPORT_SYMBOL_GPL(mt76_rx_token_release); 927