1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #include "mt76.h" 7 8 static int 9 mt76_txq_get_qid(struct ieee80211_txq *txq) 10 { 11 if (!txq->sta) 12 return MT_TXQ_BE; 13 14 return txq->ac; 15 } 16 17 void 18 mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb) 19 { 20 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 21 struct ieee80211_txq *txq; 22 struct mt76_txq *mtxq; 23 u8 tid; 24 25 if (!sta || !ieee80211_is_data_qos(hdr->frame_control) || 26 !ieee80211_is_data_present(hdr->frame_control)) 27 return; 28 29 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 30 txq = sta->txq[tid]; 31 mtxq = (struct mt76_txq *)txq->drv_priv; 32 if (!mtxq->aggr) 33 return; 34 35 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; 36 } 37 EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn); 38 39 void 40 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) 41 __acquires(&dev->status_lock) 42 { 43 __skb_queue_head_init(list); 44 spin_lock_bh(&dev->status_lock); 45 } 46 EXPORT_SYMBOL_GPL(mt76_tx_status_lock); 47 48 void 49 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) 50 __releases(&dev->status_lock) 51 { 52 struct ieee80211_hw *hw; 53 struct sk_buff *skb; 54 55 spin_unlock_bh(&dev->status_lock); 56 57 rcu_read_lock(); 58 while ((skb = __skb_dequeue(list)) != NULL) { 59 struct ieee80211_tx_status status = { 60 .skb = skb, 61 .info = IEEE80211_SKB_CB(skb), 62 }; 63 struct ieee80211_rate_status rs = {}; 64 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 65 struct mt76_wcid *wcid; 66 67 wcid = __mt76_wcid_ptr(dev, cb->wcid); 68 if (wcid) { 69 status.sta = wcid_to_sta(wcid); 70 if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) { 71 rs.rate_idx = wcid->rate; 72 status.rates = &rs; 73 status.n_rates = 1; 74 } else { 75 status.n_rates = 0; 76 } 77 } 78 79 hw = mt76_tx_status_get_hw(dev, skb); 80 spin_lock_bh(&dev->rx_lock); 81 ieee80211_tx_status_ext(hw, &status); 82 spin_unlock_bh(&dev->rx_lock); 83 } 84 rcu_read_unlock(); 85 } 86 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock); 87 88 static void 89 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags, 90 struct sk_buff_head *list) 91 { 92 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 93 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 94 u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE; 95 96 flags |= cb->flags; 97 cb->flags = flags; 98 99 if ((flags & done) != done) 100 return; 101 102 /* Tx status can be unreliable. if it fails, mark the frame as ACKed */ 103 if (flags & MT_TX_CB_TXS_FAILED && 104 (dev->drv->drv_flags & MT_DRV_IGNORE_TXS_FAILED)) { 105 info->status.rates[0].count = 0; 106 info->status.rates[0].idx = -1; 107 info->flags |= IEEE80211_TX_STAT_ACK; 108 } 109 110 __skb_queue_tail(list, skb); 111 } 112 113 void 114 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 115 struct sk_buff_head *list) 116 { 117 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list); 118 } 119 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done); 120 121 int 122 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, 123 struct sk_buff *skb) 124 { 125 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 126 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 127 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 128 int pid; 129 130 memset(cb, 0, sizeof(*cb)); 131 132 if (!wcid || !rcu_access_pointer(dev->wcid[wcid->idx])) 133 return MT_PACKET_ID_NO_ACK; 134 135 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 136 return MT_PACKET_ID_NO_ACK; 137 138 if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | 139 IEEE80211_TX_CTL_RATE_CTRL_PROBE))) { 140 if (mtk_wed_device_active(&dev->mmio.wed) && 141 ((info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) || 142 ieee80211_is_data(hdr->frame_control))) 143 return MT_PACKET_ID_WED; 144 145 return MT_PACKET_ID_NO_SKB; 146 } 147 148 spin_lock_bh(&dev->status_lock); 149 150 pid = idr_alloc(&wcid->pktid, skb, MT_PACKET_ID_FIRST, 151 MT_PACKET_ID_MASK, GFP_ATOMIC); 152 if (pid < 0) { 153 pid = MT_PACKET_ID_NO_SKB; 154 goto out; 155 } 156 157 cb->wcid = wcid->idx; 158 cb->pktid = pid; 159 160 if (list_empty(&wcid->list)) 161 list_add_tail(&wcid->list, &dev->wcid_list); 162 163 out: 164 spin_unlock_bh(&dev->status_lock); 165 166 return pid; 167 } 168 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add); 169 170 struct sk_buff * 171 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, 172 struct sk_buff_head *list) 173 { 174 struct sk_buff *skb; 175 int id; 176 177 lockdep_assert_held(&dev->status_lock); 178 179 skb = idr_remove(&wcid->pktid, pktid); 180 if (skb) 181 goto out; 182 183 /* look for stale entries in the wcid idr queue */ 184 idr_for_each_entry(&wcid->pktid, skb, id) { 185 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 186 187 if (pktid >= 0) { 188 if (!(cb->flags & MT_TX_CB_DMA_DONE)) 189 continue; 190 191 if (time_is_after_jiffies(cb->jiffies + 192 MT_TX_STATUS_SKB_TIMEOUT)) 193 continue; 194 } 195 196 /* It has been too long since DMA_DONE, time out this packet 197 * and stop waiting for TXS callback. 198 */ 199 idr_remove(&wcid->pktid, cb->pktid); 200 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED | 201 MT_TX_CB_TXS_DONE, list); 202 } 203 204 out: 205 if (idr_is_empty(&wcid->pktid)) 206 list_del_init(&wcid->list); 207 208 return skb; 209 } 210 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get); 211 212 void 213 mt76_tx_status_check(struct mt76_dev *dev, bool flush) 214 { 215 struct mt76_wcid *wcid, *tmp; 216 struct sk_buff_head list; 217 218 mt76_tx_status_lock(dev, &list); 219 list_for_each_entry_safe(wcid, tmp, &dev->wcid_list, list) 220 mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list); 221 mt76_tx_status_unlock(dev, &list); 222 } 223 EXPORT_SYMBOL_GPL(mt76_tx_status_check); 224 225 static void 226 mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid, 227 struct sk_buff *skb) 228 { 229 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 230 int pending; 231 232 if (!wcid || info->tx_time_est) 233 return; 234 235 pending = atomic_dec_return(&wcid->non_aql_packets); 236 if (pending < 0) 237 atomic_cmpxchg(&wcid->non_aql_packets, pending, 0); 238 } 239 240 void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb, 241 struct list_head *free_list) 242 { 243 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 244 struct ieee80211_tx_status status = { 245 .skb = skb, 246 .free_list = free_list, 247 }; 248 struct mt76_wcid *wcid = NULL; 249 struct ieee80211_hw *hw; 250 struct sk_buff_head list; 251 252 rcu_read_lock(); 253 254 wcid = __mt76_wcid_ptr(dev, wcid_idx); 255 mt76_tx_check_non_aql(dev, wcid, skb); 256 257 #ifdef CONFIG_NL80211_TESTMODE 258 if (mt76_is_testmode_skb(dev, skb, &hw)) { 259 struct mt76_phy *phy = hw->priv; 260 261 if (skb == phy->test.tx_skb) 262 phy->test.tx_done++; 263 if (phy->test.tx_queued == phy->test.tx_done) 264 wake_up(&dev->tx_wait); 265 266 dev_kfree_skb_any(skb); 267 goto out; 268 } 269 #endif 270 271 if (cb->pktid < MT_PACKET_ID_FIRST) { 272 struct ieee80211_rate_status rs = {}; 273 274 hw = mt76_tx_status_get_hw(dev, skb); 275 status.sta = wcid_to_sta(wcid); 276 if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) { 277 rs.rate_idx = wcid->rate; 278 status.rates = &rs; 279 status.n_rates = 1; 280 } 281 spin_lock_bh(&dev->rx_lock); 282 ieee80211_tx_status_ext(hw, &status); 283 spin_unlock_bh(&dev->rx_lock); 284 goto out; 285 } 286 287 mt76_tx_status_lock(dev, &list); 288 cb->jiffies = jiffies; 289 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list); 290 mt76_tx_status_unlock(dev, &list); 291 292 out: 293 rcu_read_unlock(); 294 } 295 EXPORT_SYMBOL_GPL(__mt76_tx_complete_skb); 296 297 static int 298 __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb, 299 struct mt76_wcid *wcid, struct ieee80211_sta *sta, 300 bool *stop) 301 { 302 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 303 struct mt76_queue *q = phy->q_tx[qid]; 304 struct mt76_dev *dev = phy->dev; 305 bool non_aql; 306 int pending; 307 int idx; 308 309 non_aql = !info->tx_time_est; 310 idx = dev->queue_ops->tx_queue_skb(phy, q, qid, skb, wcid, sta); 311 if (idx < 0 || !sta) 312 return idx; 313 314 wcid = (struct mt76_wcid *)sta->drv_priv; 315 if (!wcid->sta) 316 return idx; 317 318 q->entry[idx].wcid = wcid->idx; 319 320 if (!non_aql) 321 return idx; 322 323 pending = atomic_inc_return(&wcid->non_aql_packets); 324 if (stop && pending >= MT_MAX_NON_AQL_PKT) 325 *stop = true; 326 327 return idx; 328 } 329 330 void 331 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, 332 struct mt76_wcid *wcid, struct sk_buff *skb) 333 { 334 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 335 struct sk_buff_head *head; 336 337 if (mt76_testmode_enabled(phy)) { 338 ieee80211_free_txskb(phy->hw, skb); 339 return; 340 } 341 342 if (WARN_ON(skb_get_queue_mapping(skb) >= MT_TXQ_PSD)) 343 skb_set_queue_mapping(skb, MT_TXQ_BE); 344 345 if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET)) 346 ieee80211_get_tx_rates(info->control.vif, sta, skb, 347 info->control.rates, 1); 348 349 info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx); 350 351 if ((info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) || 352 (info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK)) 353 head = &wcid->tx_offchannel; 354 else 355 head = &wcid->tx_pending; 356 357 spin_lock_bh(&head->lock); 358 __skb_queue_tail(head, skb); 359 spin_unlock_bh(&head->lock); 360 361 spin_lock_bh(&phy->tx_lock); 362 if (list_empty(&wcid->tx_list)) 363 list_add_tail(&wcid->tx_list, &phy->tx_list); 364 spin_unlock_bh(&phy->tx_lock); 365 366 mt76_worker_schedule(&phy->dev->tx_worker); 367 } 368 EXPORT_SYMBOL_GPL(mt76_tx); 369 370 static struct sk_buff * 371 mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq) 372 { 373 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 374 struct ieee80211_tx_info *info; 375 struct sk_buff *skb; 376 377 skb = ieee80211_tx_dequeue(phy->hw, txq); 378 if (!skb) 379 return NULL; 380 381 info = IEEE80211_SKB_CB(skb); 382 info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx); 383 384 return skb; 385 } 386 387 static void 388 mt76_queue_ps_skb(struct mt76_phy *phy, struct ieee80211_sta *sta, 389 struct sk_buff *skb, bool last) 390 { 391 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 392 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 393 394 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 395 if (last) 396 info->flags |= IEEE80211_TX_STATUS_EOSP | 397 IEEE80211_TX_CTL_REQ_TX_STATUS; 398 399 mt76_skb_set_moredata(skb, !last); 400 __mt76_tx_queue_skb(phy, MT_TXQ_PSD, skb, wcid, sta, NULL); 401 } 402 403 void 404 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, 405 u16 tids, int nframes, 406 enum ieee80211_frame_release_type reason, 407 bool more_data) 408 { 409 struct mt76_phy *phy = hw->priv; 410 struct mt76_dev *dev = phy->dev; 411 struct sk_buff *last_skb = NULL; 412 struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD]; 413 int i; 414 415 spin_lock_bh(&hwq->lock); 416 for (i = 0; tids && nframes; i++, tids >>= 1) { 417 struct ieee80211_txq *txq = sta->txq[i]; 418 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; 419 struct sk_buff *skb; 420 421 if (!(tids & 1)) 422 continue; 423 424 do { 425 skb = mt76_txq_dequeue(phy, mtxq); 426 if (!skb) 427 break; 428 429 nframes--; 430 if (last_skb) 431 mt76_queue_ps_skb(phy, sta, last_skb, false); 432 433 last_skb = skb; 434 } while (nframes); 435 } 436 437 if (last_skb) { 438 mt76_queue_ps_skb(phy, sta, last_skb, true); 439 dev->queue_ops->kick(dev, hwq); 440 } else { 441 ieee80211_sta_eosp(sta); 442 } 443 444 spin_unlock_bh(&hwq->lock); 445 } 446 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); 447 448 static bool 449 mt76_txq_stopped(struct mt76_queue *q) 450 { 451 return q->stopped || q->blocked || 452 q->queued + MT_TXQ_FREE_THR >= q->ndesc; 453 } 454 455 static int 456 mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, 457 struct mt76_txq *mtxq, struct mt76_wcid *wcid) 458 { 459 struct mt76_dev *dev = phy->dev; 460 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 461 enum mt76_txq_id qid = mt76_txq_get_qid(txq); 462 struct ieee80211_tx_info *info; 463 struct sk_buff *skb; 464 int n_frames = 1; 465 bool stop = false; 466 int idx; 467 468 if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) 469 return 0; 470 471 if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT) 472 return 0; 473 474 skb = mt76_txq_dequeue(phy, mtxq); 475 if (!skb) 476 return 0; 477 478 info = IEEE80211_SKB_CB(skb); 479 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 480 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, 481 info->control.rates, 1); 482 483 spin_lock(&q->lock); 484 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); 485 spin_unlock(&q->lock); 486 if (idx < 0) 487 return idx; 488 489 do { 490 if (test_bit(MT76_RESET, &phy->state) || phy->offchannel) 491 break; 492 493 if (stop || mt76_txq_stopped(q)) 494 break; 495 496 skb = mt76_txq_dequeue(phy, mtxq); 497 if (!skb) 498 break; 499 500 info = IEEE80211_SKB_CB(skb); 501 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 502 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, 503 info->control.rates, 1); 504 505 spin_lock(&q->lock); 506 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); 507 spin_unlock(&q->lock); 508 if (idx < 0) 509 break; 510 511 n_frames++; 512 } while (1); 513 514 spin_lock(&q->lock); 515 dev->queue_ops->kick(dev, q); 516 spin_unlock(&q->lock); 517 518 return n_frames; 519 } 520 521 static int 522 mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) 523 { 524 struct mt76_dev *dev = phy->dev; 525 struct ieee80211_txq *txq; 526 struct mt76_txq *mtxq; 527 struct mt76_wcid *wcid; 528 struct mt76_queue *q; 529 int ret = 0; 530 531 while (1) { 532 int n_frames = 0; 533 534 txq = ieee80211_next_txq(phy->hw, qid); 535 if (!txq) 536 break; 537 538 mtxq = (struct mt76_txq *)txq->drv_priv; 539 wcid = __mt76_wcid_ptr(dev, mtxq->wcid); 540 if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags)) 541 continue; 542 543 phy = mt76_dev_phy(dev, wcid->phy_idx); 544 if (test_bit(MT76_RESET, &phy->state) || phy->offchannel) 545 continue; 546 547 q = phy->q_tx[qid]; 548 if (dev->queue_ops->tx_cleanup && 549 q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) { 550 dev->queue_ops->tx_cleanup(dev, q, false); 551 } 552 553 if (mtxq->send_bar && mtxq->aggr) { 554 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 555 struct ieee80211_sta *sta = txq->sta; 556 struct ieee80211_vif *vif = txq->vif; 557 u16 agg_ssn = mtxq->agg_ssn; 558 u8 tid = txq->tid; 559 560 mtxq->send_bar = false; 561 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn); 562 } 563 564 if (!mt76_txq_stopped(q)) 565 n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid); 566 567 ieee80211_return_txq(phy->hw, txq, false); 568 569 if (unlikely(n_frames < 0)) 570 return n_frames; 571 572 ret += n_frames; 573 } 574 575 return ret; 576 } 577 578 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid) 579 { 580 int len; 581 582 if (qid >= 4) 583 return; 584 585 local_bh_disable(); 586 rcu_read_lock(); 587 588 do { 589 ieee80211_txq_schedule_start(phy->hw, qid); 590 len = mt76_txq_schedule_list(phy, qid); 591 ieee80211_txq_schedule_end(phy->hw, qid); 592 } while (len > 0); 593 594 rcu_read_unlock(); 595 local_bh_enable(); 596 } 597 EXPORT_SYMBOL_GPL(mt76_txq_schedule); 598 599 static int 600 mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid, 601 struct sk_buff_head *head) 602 { 603 struct mt76_dev *dev = phy->dev; 604 struct ieee80211_sta *sta; 605 struct mt76_queue *q; 606 struct sk_buff *skb; 607 int ret = 0; 608 609 spin_lock(&head->lock); 610 while ((skb = skb_peek(head)) != NULL) { 611 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 612 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 613 int qid = skb_get_queue_mapping(skb); 614 615 if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && 616 !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && 617 !ieee80211_is_data(hdr->frame_control) && 618 (!ieee80211_is_bufferable_mmpdu(skb) || 619 ieee80211_is_deauth(hdr->frame_control))) 620 qid = MT_TXQ_PSD; 621 622 q = phy->q_tx[qid]; 623 if (mt76_txq_stopped(q) || test_bit(MT76_RESET, &phy->state)) { 624 ret = -1; 625 break; 626 } 627 628 __skb_unlink(skb, head); 629 spin_unlock(&head->lock); 630 631 sta = wcid_to_sta(wcid); 632 spin_lock(&q->lock); 633 __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL); 634 dev->queue_ops->kick(dev, q); 635 spin_unlock(&q->lock); 636 637 spin_lock(&head->lock); 638 } 639 spin_unlock(&head->lock); 640 641 return ret; 642 } 643 644 static void mt76_txq_schedule_pending(struct mt76_phy *phy) 645 { 646 LIST_HEAD(tx_list); 647 648 if (list_empty(&phy->tx_list)) 649 return; 650 651 local_bh_disable(); 652 rcu_read_lock(); 653 654 spin_lock(&phy->tx_lock); 655 list_splice_init(&phy->tx_list, &tx_list); 656 while (!list_empty(&tx_list)) { 657 struct mt76_wcid *wcid; 658 int ret; 659 660 wcid = list_first_entry(&tx_list, struct mt76_wcid, tx_list); 661 list_del_init(&wcid->tx_list); 662 663 spin_unlock(&phy->tx_lock); 664 ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_offchannel); 665 if (ret >= 0 && !phy->offchannel) 666 ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_pending); 667 spin_lock(&phy->tx_lock); 668 669 if (!skb_queue_empty(&wcid->tx_pending) && 670 !skb_queue_empty(&wcid->tx_offchannel) && 671 list_empty(&wcid->tx_list)) 672 list_add_tail(&wcid->tx_list, &phy->tx_list); 673 674 if (ret < 0) 675 break; 676 } 677 spin_unlock(&phy->tx_lock); 678 679 rcu_read_unlock(); 680 local_bh_enable(); 681 } 682 683 void mt76_txq_schedule_all(struct mt76_phy *phy) 684 { 685 struct mt76_phy *main_phy = &phy->dev->phy; 686 int i; 687 688 mt76_txq_schedule_pending(phy); 689 690 if (phy != main_phy && phy->hw == main_phy->hw) 691 return; 692 693 for (i = 0; i <= MT_TXQ_BK; i++) 694 mt76_txq_schedule(phy, i); 695 } 696 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all); 697 698 void mt76_tx_worker_run(struct mt76_dev *dev) 699 { 700 struct mt76_phy *phy; 701 int i; 702 703 mt76_txq_schedule_all(&dev->phy); 704 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { 705 phy = dev->phys[i]; 706 if (!phy) 707 continue; 708 709 mt76_txq_schedule_all(phy); 710 } 711 712 #ifdef CONFIG_NL80211_TESTMODE 713 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { 714 phy = dev->phys[i]; 715 if (!phy || !phy->test.tx_pending) 716 continue; 717 718 mt76_testmode_tx_pending(phy); 719 } 720 #endif 721 } 722 EXPORT_SYMBOL_GPL(mt76_tx_worker_run); 723 724 void mt76_tx_worker(struct mt76_worker *w) 725 { 726 struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker); 727 728 mt76_tx_worker_run(dev); 729 } 730 731 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta, 732 bool send_bar) 733 { 734 int i; 735 736 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 737 struct ieee80211_txq *txq = sta->txq[i]; 738 struct mt76_queue *hwq; 739 struct mt76_txq *mtxq; 740 741 if (!txq) 742 continue; 743 744 hwq = phy->q_tx[mt76_txq_get_qid(txq)]; 745 mtxq = (struct mt76_txq *)txq->drv_priv; 746 747 spin_lock_bh(&hwq->lock); 748 mtxq->send_bar = mtxq->aggr && send_bar; 749 spin_unlock_bh(&hwq->lock); 750 } 751 } 752 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues); 753 754 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) 755 { 756 struct mt76_phy *phy = hw->priv; 757 struct mt76_dev *dev = phy->dev; 758 759 mt76_worker_schedule(&dev->tx_worker); 760 } 761 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue); 762 763 u8 mt76_ac_to_hwq(u8 ac) 764 { 765 static const u8 wmm_queue_map[] = { 766 [IEEE80211_AC_BE] = 0, 767 [IEEE80211_AC_BK] = 1, 768 [IEEE80211_AC_VI] = 2, 769 [IEEE80211_AC_VO] = 3, 770 }; 771 772 if (WARN_ON(ac >= IEEE80211_NUM_ACS)) 773 return 0; 774 775 return wmm_queue_map[ac]; 776 } 777 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq); 778 779 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad) 780 { 781 struct sk_buff *iter, *last = skb; 782 783 /* First packet of a A-MSDU burst keeps track of the whole burst 784 * length, need to update length of it and the last packet. 785 */ 786 skb_walk_frags(skb, iter) { 787 last = iter; 788 if (!iter->next) { 789 skb->data_len += pad; 790 skb->len += pad; 791 break; 792 } 793 } 794 795 if (skb_pad(last, pad)) 796 return -ENOMEM; 797 798 __skb_put(last, pad); 799 800 return 0; 801 } 802 EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad); 803 804 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q, 805 struct mt76_queue_entry *e) 806 { 807 if (e->skb) 808 dev->drv->tx_complete_skb(dev, e); 809 810 spin_lock_bh(&q->lock); 811 q->tail = (q->tail + 1) % q->ndesc; 812 q->queued--; 813 spin_unlock_bh(&q->lock); 814 } 815 EXPORT_SYMBOL_GPL(mt76_queue_tx_complete); 816 817 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) 818 { 819 struct mt76_phy *phy = &dev->phy; 820 struct mt76_queue *q = phy->q_tx[0]; 821 822 if (blocked == q->blocked) 823 return; 824 825 q->blocked = blocked; 826 827 phy = dev->phys[MT_BAND1]; 828 if (phy) { 829 q = phy->q_tx[0]; 830 q->blocked = blocked; 831 } 832 phy = dev->phys[MT_BAND2]; 833 if (phy) { 834 q = phy->q_tx[0]; 835 q->blocked = blocked; 836 } 837 838 if (!blocked) 839 mt76_worker_schedule(&dev->tx_worker); 840 } 841 EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked); 842 843 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) 844 { 845 int token; 846 847 spin_lock_bh(&dev->token_lock); 848 849 token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC); 850 if (token >= 0) 851 dev->token_count++; 852 853 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 854 if (mtk_wed_device_active(&dev->mmio.wed) && 855 token >= dev->mmio.wed.wlan.token_start) 856 dev->wed_token_count++; 857 #endif 858 859 if (dev->token_count >= dev->token_size - MT76_TOKEN_FREE_THR) 860 __mt76_set_tx_blocked(dev, true); 861 862 spin_unlock_bh(&dev->token_lock); 863 864 return token; 865 } 866 EXPORT_SYMBOL_GPL(mt76_token_consume); 867 868 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr, 869 struct mt76_txwi_cache *t, dma_addr_t phys) 870 { 871 int token; 872 873 spin_lock_bh(&dev->rx_token_lock); 874 token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size, 875 GFP_ATOMIC); 876 if (token >= 0) { 877 t->ptr = ptr; 878 t->dma_addr = phys; 879 } 880 spin_unlock_bh(&dev->rx_token_lock); 881 882 return token; 883 } 884 EXPORT_SYMBOL_GPL(mt76_rx_token_consume); 885 886 struct mt76_txwi_cache * 887 mt76_token_release(struct mt76_dev *dev, int token, bool *wake) 888 { 889 struct mt76_txwi_cache *txwi; 890 891 spin_lock_bh(&dev->token_lock); 892 893 txwi = idr_remove(&dev->token, token); 894 if (txwi) { 895 dev->token_count--; 896 897 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 898 if (mtk_wed_device_active(&dev->mmio.wed) && 899 token >= dev->mmio.wed.wlan.token_start && 900 --dev->wed_token_count == 0) 901 wake_up(&dev->tx_wait); 902 #endif 903 } 904 905 if (dev->token_count < dev->token_size - MT76_TOKEN_FREE_THR && 906 dev->phy.q_tx[0]->blocked) 907 *wake = true; 908 909 spin_unlock_bh(&dev->token_lock); 910 911 return txwi; 912 } 913 EXPORT_SYMBOL_GPL(mt76_token_release); 914 915 struct mt76_txwi_cache * 916 mt76_rx_token_release(struct mt76_dev *dev, int token) 917 { 918 struct mt76_txwi_cache *t; 919 920 spin_lock_bh(&dev->rx_token_lock); 921 t = idr_remove(&dev->rx_token, token); 922 spin_unlock_bh(&dev->rx_token_lock); 923 924 return t; 925 } 926 EXPORT_SYMBOL_GPL(mt76_rx_token_release); 927