1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #include "mt76.h" 7 8 static int 9 mt76_txq_get_qid(struct ieee80211_txq *txq) 10 { 11 if (!txq->sta) 12 return MT_TXQ_BE; 13 14 return txq->ac; 15 } 16 17 void 18 mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb) 19 { 20 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 21 struct ieee80211_txq *txq; 22 struct mt76_txq *mtxq; 23 u8 tid; 24 25 if (!sta || !ieee80211_is_data_qos(hdr->frame_control) || 26 !ieee80211_is_data_present(hdr->frame_control)) 27 return; 28 29 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 30 txq = sta->txq[tid]; 31 mtxq = (struct mt76_txq *)txq->drv_priv; 32 if (!mtxq->aggr) 33 return; 34 35 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; 36 } 37 EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn); 38 39 void 40 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) 41 __acquires(&dev->status_lock) 42 { 43 __skb_queue_head_init(list); 44 spin_lock_bh(&dev->status_lock); 45 } 46 EXPORT_SYMBOL_GPL(mt76_tx_status_lock); 47 48 void 49 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) 50 __releases(&dev->status_lock) 51 { 52 struct ieee80211_hw *hw; 53 struct sk_buff *skb; 54 55 spin_unlock_bh(&dev->status_lock); 56 57 rcu_read_lock(); 58 while ((skb = __skb_dequeue(list)) != NULL) { 59 struct ieee80211_tx_status status = { 60 .skb = skb, 61 .info = IEEE80211_SKB_CB(skb), 62 }; 63 struct ieee80211_rate_status rs = {}; 64 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 65 struct mt76_wcid *wcid; 66 67 wcid = rcu_dereference(dev->wcid[cb->wcid]); 68 if (wcid) { 69 status.sta = wcid_to_sta(wcid); 70 if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) { 71 rs.rate_idx = wcid->rate; 72 status.rates = &rs; 73 status.n_rates = 1; 74 } else { 75 status.n_rates = 0; 76 } 77 } 78 79 hw = mt76_tx_status_get_hw(dev, skb); 80 spin_lock_bh(&dev->rx_lock); 81 ieee80211_tx_status_ext(hw, &status); 82 spin_unlock_bh(&dev->rx_lock); 83 } 84 rcu_read_unlock(); 85 } 86 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock); 87 88 static void 89 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags, 90 struct sk_buff_head *list) 91 { 92 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 93 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 94 u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE; 95 96 flags |= cb->flags; 97 cb->flags = flags; 98 99 if ((flags & done) != done) 100 return; 101 102 /* Tx status can be unreliable. if it fails, mark the frame as ACKed */ 103 if (flags & MT_TX_CB_TXS_FAILED) { 104 info->status.rates[0].count = 0; 105 info->status.rates[0].idx = -1; 106 info->flags |= IEEE80211_TX_STAT_ACK; 107 } 108 109 __skb_queue_tail(list, skb); 110 } 111 112 void 113 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 114 struct sk_buff_head *list) 115 { 116 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list); 117 } 118 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done); 119 120 int 121 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, 122 struct sk_buff *skb) 123 { 124 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 125 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 126 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 127 int pid; 128 129 memset(cb, 0, sizeof(*cb)); 130 131 if (!wcid || !rcu_access_pointer(dev->wcid[wcid->idx])) 132 return MT_PACKET_ID_NO_ACK; 133 134 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 135 return MT_PACKET_ID_NO_ACK; 136 137 if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | 138 IEEE80211_TX_CTL_RATE_CTRL_PROBE))) { 139 if (mtk_wed_device_active(&dev->mmio.wed) && 140 ((info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) || 141 ieee80211_is_data(hdr->frame_control))) 142 return MT_PACKET_ID_WED; 143 144 return MT_PACKET_ID_NO_SKB; 145 } 146 147 spin_lock_bh(&dev->status_lock); 148 149 pid = idr_alloc(&wcid->pktid, skb, MT_PACKET_ID_FIRST, 150 MT_PACKET_ID_MASK, GFP_ATOMIC); 151 if (pid < 0) { 152 pid = MT_PACKET_ID_NO_SKB; 153 goto out; 154 } 155 156 cb->wcid = wcid->idx; 157 cb->pktid = pid; 158 159 if (list_empty(&wcid->list)) 160 list_add_tail(&wcid->list, &dev->wcid_list); 161 162 out: 163 spin_unlock_bh(&dev->status_lock); 164 165 return pid; 166 } 167 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add); 168 169 struct sk_buff * 170 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, 171 struct sk_buff_head *list) 172 { 173 struct sk_buff *skb; 174 int id; 175 176 lockdep_assert_held(&dev->status_lock); 177 178 skb = idr_remove(&wcid->pktid, pktid); 179 if (skb) 180 goto out; 181 182 /* look for stale entries in the wcid idr queue */ 183 idr_for_each_entry(&wcid->pktid, skb, id) { 184 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 185 186 if (pktid >= 0) { 187 if (!(cb->flags & MT_TX_CB_DMA_DONE)) 188 continue; 189 190 if (time_is_after_jiffies(cb->jiffies + 191 MT_TX_STATUS_SKB_TIMEOUT)) 192 continue; 193 } 194 195 /* It has been too long since DMA_DONE, time out this packet 196 * and stop waiting for TXS callback. 197 */ 198 idr_remove(&wcid->pktid, cb->pktid); 199 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED | 200 MT_TX_CB_TXS_DONE, list); 201 } 202 203 out: 204 if (idr_is_empty(&wcid->pktid)) 205 list_del_init(&wcid->list); 206 207 return skb; 208 } 209 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get); 210 211 void 212 mt76_tx_status_check(struct mt76_dev *dev, bool flush) 213 { 214 struct mt76_wcid *wcid, *tmp; 215 struct sk_buff_head list; 216 217 mt76_tx_status_lock(dev, &list); 218 list_for_each_entry_safe(wcid, tmp, &dev->wcid_list, list) 219 mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list); 220 mt76_tx_status_unlock(dev, &list); 221 } 222 EXPORT_SYMBOL_GPL(mt76_tx_status_check); 223 224 static void 225 mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid, 226 struct sk_buff *skb) 227 { 228 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 229 int pending; 230 231 if (!wcid || info->tx_time_est) 232 return; 233 234 pending = atomic_dec_return(&wcid->non_aql_packets); 235 if (pending < 0) 236 atomic_cmpxchg(&wcid->non_aql_packets, pending, 0); 237 } 238 239 void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb, 240 struct list_head *free_list) 241 { 242 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); 243 struct ieee80211_tx_status status = { 244 .skb = skb, 245 .free_list = free_list, 246 }; 247 struct mt76_wcid *wcid = NULL; 248 struct ieee80211_hw *hw; 249 struct sk_buff_head list; 250 251 rcu_read_lock(); 252 253 if (wcid_idx < ARRAY_SIZE(dev->wcid)) 254 wcid = rcu_dereference(dev->wcid[wcid_idx]); 255 256 mt76_tx_check_non_aql(dev, wcid, skb); 257 258 #ifdef CONFIG_NL80211_TESTMODE 259 if (mt76_is_testmode_skb(dev, skb, &hw)) { 260 struct mt76_phy *phy = hw->priv; 261 262 if (skb == phy->test.tx_skb) 263 phy->test.tx_done++; 264 if (phy->test.tx_queued == phy->test.tx_done) 265 wake_up(&dev->tx_wait); 266 267 dev_kfree_skb_any(skb); 268 goto out; 269 } 270 #endif 271 272 if (cb->pktid < MT_PACKET_ID_FIRST) { 273 struct ieee80211_rate_status rs = {}; 274 275 hw = mt76_tx_status_get_hw(dev, skb); 276 status.sta = wcid_to_sta(wcid); 277 if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) { 278 rs.rate_idx = wcid->rate; 279 status.rates = &rs; 280 status.n_rates = 1; 281 } 282 spin_lock_bh(&dev->rx_lock); 283 ieee80211_tx_status_ext(hw, &status); 284 spin_unlock_bh(&dev->rx_lock); 285 goto out; 286 } 287 288 mt76_tx_status_lock(dev, &list); 289 cb->jiffies = jiffies; 290 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list); 291 mt76_tx_status_unlock(dev, &list); 292 293 out: 294 rcu_read_unlock(); 295 } 296 EXPORT_SYMBOL_GPL(__mt76_tx_complete_skb); 297 298 static int 299 __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb, 300 struct mt76_wcid *wcid, struct ieee80211_sta *sta, 301 bool *stop) 302 { 303 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 304 struct mt76_queue *q = phy->q_tx[qid]; 305 struct mt76_dev *dev = phy->dev; 306 bool non_aql; 307 int pending; 308 int idx; 309 310 non_aql = !info->tx_time_est; 311 idx = dev->queue_ops->tx_queue_skb(phy, q, qid, skb, wcid, sta); 312 if (idx < 0 || !sta) 313 return idx; 314 315 wcid = (struct mt76_wcid *)sta->drv_priv; 316 if (!wcid->sta) 317 return idx; 318 319 q->entry[idx].wcid = wcid->idx; 320 321 if (!non_aql) 322 return idx; 323 324 pending = atomic_inc_return(&wcid->non_aql_packets); 325 if (stop && pending >= MT_MAX_NON_AQL_PKT) 326 *stop = true; 327 328 return idx; 329 } 330 331 void 332 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, 333 struct mt76_wcid *wcid, struct sk_buff *skb) 334 { 335 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 336 struct sk_buff_head *head; 337 338 if (mt76_testmode_enabled(phy)) { 339 ieee80211_free_txskb(phy->hw, skb); 340 return; 341 } 342 343 if (WARN_ON(skb_get_queue_mapping(skb) >= MT_TXQ_PSD)) 344 skb_set_queue_mapping(skb, MT_TXQ_BE); 345 346 if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET)) 347 ieee80211_get_tx_rates(info->control.vif, sta, skb, 348 info->control.rates, 1); 349 350 info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx); 351 352 if ((info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) || 353 (info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK)) 354 head = &wcid->tx_offchannel; 355 else 356 head = &wcid->tx_pending; 357 358 spin_lock_bh(&head->lock); 359 __skb_queue_tail(head, skb); 360 spin_unlock_bh(&head->lock); 361 362 spin_lock_bh(&phy->tx_lock); 363 if (list_empty(&wcid->tx_list)) 364 list_add_tail(&wcid->tx_list, &phy->tx_list); 365 spin_unlock_bh(&phy->tx_lock); 366 367 mt76_worker_schedule(&phy->dev->tx_worker); 368 } 369 EXPORT_SYMBOL_GPL(mt76_tx); 370 371 static struct sk_buff * 372 mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq) 373 { 374 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 375 struct ieee80211_tx_info *info; 376 struct sk_buff *skb; 377 378 skb = ieee80211_tx_dequeue(phy->hw, txq); 379 if (!skb) 380 return NULL; 381 382 info = IEEE80211_SKB_CB(skb); 383 info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx); 384 385 return skb; 386 } 387 388 static void 389 mt76_queue_ps_skb(struct mt76_phy *phy, struct ieee80211_sta *sta, 390 struct sk_buff *skb, bool last) 391 { 392 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 393 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 394 395 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 396 if (last) 397 info->flags |= IEEE80211_TX_STATUS_EOSP | 398 IEEE80211_TX_CTL_REQ_TX_STATUS; 399 400 mt76_skb_set_moredata(skb, !last); 401 __mt76_tx_queue_skb(phy, MT_TXQ_PSD, skb, wcid, sta, NULL); 402 } 403 404 void 405 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, 406 u16 tids, int nframes, 407 enum ieee80211_frame_release_type reason, 408 bool more_data) 409 { 410 struct mt76_phy *phy = hw->priv; 411 struct mt76_dev *dev = phy->dev; 412 struct sk_buff *last_skb = NULL; 413 struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD]; 414 int i; 415 416 spin_lock_bh(&hwq->lock); 417 for (i = 0; tids && nframes; i++, tids >>= 1) { 418 struct ieee80211_txq *txq = sta->txq[i]; 419 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; 420 struct sk_buff *skb; 421 422 if (!(tids & 1)) 423 continue; 424 425 do { 426 skb = mt76_txq_dequeue(phy, mtxq); 427 if (!skb) 428 break; 429 430 nframes--; 431 if (last_skb) 432 mt76_queue_ps_skb(phy, sta, last_skb, false); 433 434 last_skb = skb; 435 } while (nframes); 436 } 437 438 if (last_skb) { 439 mt76_queue_ps_skb(phy, sta, last_skb, true); 440 dev->queue_ops->kick(dev, hwq); 441 } else { 442 ieee80211_sta_eosp(sta); 443 } 444 445 spin_unlock_bh(&hwq->lock); 446 } 447 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); 448 449 static bool 450 mt76_txq_stopped(struct mt76_queue *q) 451 { 452 return q->stopped || q->blocked || 453 q->queued + MT_TXQ_FREE_THR >= q->ndesc; 454 } 455 456 static int 457 mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, 458 struct mt76_txq *mtxq, struct mt76_wcid *wcid) 459 { 460 struct mt76_dev *dev = phy->dev; 461 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 462 enum mt76_txq_id qid = mt76_txq_get_qid(txq); 463 struct ieee80211_tx_info *info; 464 struct sk_buff *skb; 465 int n_frames = 1; 466 bool stop = false; 467 int idx; 468 469 if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) 470 return 0; 471 472 if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT) 473 return 0; 474 475 skb = mt76_txq_dequeue(phy, mtxq); 476 if (!skb) 477 return 0; 478 479 info = IEEE80211_SKB_CB(skb); 480 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 481 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, 482 info->control.rates, 1); 483 484 spin_lock(&q->lock); 485 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); 486 spin_unlock(&q->lock); 487 if (idx < 0) 488 return idx; 489 490 do { 491 if (test_bit(MT76_RESET, &phy->state) || phy->offchannel) 492 return -EBUSY; 493 494 if (stop || mt76_txq_stopped(q)) 495 break; 496 497 skb = mt76_txq_dequeue(phy, mtxq); 498 if (!skb) 499 break; 500 501 info = IEEE80211_SKB_CB(skb); 502 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 503 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, 504 info->control.rates, 1); 505 506 spin_lock(&q->lock); 507 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); 508 spin_unlock(&q->lock); 509 if (idx < 0) 510 break; 511 512 n_frames++; 513 } while (1); 514 515 spin_lock(&q->lock); 516 dev->queue_ops->kick(dev, q); 517 spin_unlock(&q->lock); 518 519 return n_frames; 520 } 521 522 static int 523 mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) 524 { 525 struct mt76_queue *q = phy->q_tx[qid]; 526 struct mt76_dev *dev = phy->dev; 527 struct ieee80211_txq *txq; 528 struct mt76_txq *mtxq; 529 struct mt76_wcid *wcid; 530 int ret = 0; 531 532 while (1) { 533 int n_frames = 0; 534 535 if (test_bit(MT76_RESET, &phy->state) || phy->offchannel) 536 return -EBUSY; 537 538 if (dev->queue_ops->tx_cleanup && 539 q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) { 540 dev->queue_ops->tx_cleanup(dev, q, false); 541 } 542 543 txq = ieee80211_next_txq(phy->hw, qid); 544 if (!txq) 545 break; 546 547 mtxq = (struct mt76_txq *)txq->drv_priv; 548 wcid = rcu_dereference(dev->wcid[mtxq->wcid]); 549 if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags)) 550 continue; 551 552 if (mtxq->send_bar && mtxq->aggr) { 553 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 554 struct ieee80211_sta *sta = txq->sta; 555 struct ieee80211_vif *vif = txq->vif; 556 u16 agg_ssn = mtxq->agg_ssn; 557 u8 tid = txq->tid; 558 559 mtxq->send_bar = false; 560 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn); 561 } 562 563 if (!mt76_txq_stopped(q)) 564 n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid); 565 566 ieee80211_return_txq(phy->hw, txq, false); 567 568 if (unlikely(n_frames < 0)) 569 return n_frames; 570 571 ret += n_frames; 572 } 573 574 return ret; 575 } 576 577 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid) 578 { 579 int len; 580 581 if (qid >= 4 || phy->offchannel) 582 return; 583 584 local_bh_disable(); 585 rcu_read_lock(); 586 587 do { 588 ieee80211_txq_schedule_start(phy->hw, qid); 589 len = mt76_txq_schedule_list(phy, qid); 590 ieee80211_txq_schedule_end(phy->hw, qid); 591 } while (len > 0); 592 593 rcu_read_unlock(); 594 local_bh_enable(); 595 } 596 EXPORT_SYMBOL_GPL(mt76_txq_schedule); 597 598 static int 599 mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid, 600 struct sk_buff_head *head) 601 { 602 struct mt76_dev *dev = phy->dev; 603 struct ieee80211_sta *sta; 604 struct mt76_queue *q; 605 struct sk_buff *skb; 606 int ret = 0; 607 608 spin_lock(&head->lock); 609 while ((skb = skb_peek(head)) != NULL) { 610 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 611 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 612 int qid = skb_get_queue_mapping(skb); 613 614 if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && 615 !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && 616 !ieee80211_is_data(hdr->frame_control) && 617 !ieee80211_is_bufferable_mmpdu(skb)) 618 qid = MT_TXQ_PSD; 619 620 q = phy->q_tx[qid]; 621 if (mt76_txq_stopped(q) || test_bit(MT76_RESET, &phy->state)) { 622 ret = -1; 623 break; 624 } 625 626 __skb_unlink(skb, head); 627 spin_unlock(&head->lock); 628 629 sta = wcid_to_sta(wcid); 630 spin_lock(&q->lock); 631 __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL); 632 dev->queue_ops->kick(dev, q); 633 spin_unlock(&q->lock); 634 635 spin_lock(&head->lock); 636 } 637 spin_unlock(&head->lock); 638 639 return ret; 640 } 641 642 static void mt76_txq_schedule_pending(struct mt76_phy *phy) 643 { 644 LIST_HEAD(tx_list); 645 646 if (list_empty(&phy->tx_list)) 647 return; 648 649 local_bh_disable(); 650 rcu_read_lock(); 651 652 spin_lock(&phy->tx_lock); 653 list_splice_init(&phy->tx_list, &tx_list); 654 while (!list_empty(&tx_list)) { 655 struct mt76_wcid *wcid; 656 int ret; 657 658 wcid = list_first_entry(&tx_list, struct mt76_wcid, tx_list); 659 list_del_init(&wcid->tx_list); 660 661 spin_unlock(&phy->tx_lock); 662 ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_offchannel); 663 if (ret >= 0 && !phy->offchannel) 664 ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_pending); 665 spin_lock(&phy->tx_lock); 666 667 if (!skb_queue_empty(&wcid->tx_pending) && 668 !skb_queue_empty(&wcid->tx_offchannel) && 669 list_empty(&wcid->tx_list)) 670 list_add_tail(&wcid->tx_list, &phy->tx_list); 671 672 if (ret < 0) 673 break; 674 } 675 spin_unlock(&phy->tx_lock); 676 677 rcu_read_unlock(); 678 local_bh_enable(); 679 } 680 681 void mt76_txq_schedule_all(struct mt76_phy *phy) 682 { 683 int i; 684 685 mt76_txq_schedule_pending(phy); 686 for (i = 0; i <= MT_TXQ_BK; i++) 687 mt76_txq_schedule(phy, i); 688 } 689 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all); 690 691 void mt76_tx_worker_run(struct mt76_dev *dev) 692 { 693 struct mt76_phy *phy; 694 int i; 695 696 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { 697 phy = dev->phys[i]; 698 if (!phy) 699 continue; 700 701 mt76_txq_schedule_all(phy); 702 } 703 704 #ifdef CONFIG_NL80211_TESTMODE 705 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { 706 phy = dev->phys[i]; 707 if (!phy || !phy->test.tx_pending) 708 continue; 709 710 mt76_testmode_tx_pending(phy); 711 } 712 #endif 713 } 714 EXPORT_SYMBOL_GPL(mt76_tx_worker_run); 715 716 void mt76_tx_worker(struct mt76_worker *w) 717 { 718 struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker); 719 720 mt76_tx_worker_run(dev); 721 } 722 723 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta, 724 bool send_bar) 725 { 726 int i; 727 728 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 729 struct ieee80211_txq *txq = sta->txq[i]; 730 struct mt76_queue *hwq; 731 struct mt76_txq *mtxq; 732 733 if (!txq) 734 continue; 735 736 hwq = phy->q_tx[mt76_txq_get_qid(txq)]; 737 mtxq = (struct mt76_txq *)txq->drv_priv; 738 739 spin_lock_bh(&hwq->lock); 740 mtxq->send_bar = mtxq->aggr && send_bar; 741 spin_unlock_bh(&hwq->lock); 742 } 743 } 744 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues); 745 746 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) 747 { 748 struct mt76_phy *phy = hw->priv; 749 struct mt76_dev *dev = phy->dev; 750 751 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) 752 return; 753 754 mt76_worker_schedule(&dev->tx_worker); 755 } 756 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue); 757 758 u8 mt76_ac_to_hwq(u8 ac) 759 { 760 static const u8 wmm_queue_map[] = { 761 [IEEE80211_AC_BE] = 0, 762 [IEEE80211_AC_BK] = 1, 763 [IEEE80211_AC_VI] = 2, 764 [IEEE80211_AC_VO] = 3, 765 }; 766 767 if (WARN_ON(ac >= IEEE80211_NUM_ACS)) 768 return 0; 769 770 return wmm_queue_map[ac]; 771 } 772 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq); 773 774 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad) 775 { 776 struct sk_buff *iter, *last = skb; 777 778 /* First packet of a A-MSDU burst keeps track of the whole burst 779 * length, need to update length of it and the last packet. 780 */ 781 skb_walk_frags(skb, iter) { 782 last = iter; 783 if (!iter->next) { 784 skb->data_len += pad; 785 skb->len += pad; 786 break; 787 } 788 } 789 790 if (skb_pad(last, pad)) 791 return -ENOMEM; 792 793 __skb_put(last, pad); 794 795 return 0; 796 } 797 EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad); 798 799 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q, 800 struct mt76_queue_entry *e) 801 { 802 if (e->skb) 803 dev->drv->tx_complete_skb(dev, e); 804 805 spin_lock_bh(&q->lock); 806 q->tail = (q->tail + 1) % q->ndesc; 807 q->queued--; 808 spin_unlock_bh(&q->lock); 809 } 810 EXPORT_SYMBOL_GPL(mt76_queue_tx_complete); 811 812 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) 813 { 814 struct mt76_phy *phy = &dev->phy; 815 struct mt76_queue *q = phy->q_tx[0]; 816 817 if (blocked == q->blocked) 818 return; 819 820 q->blocked = blocked; 821 822 phy = dev->phys[MT_BAND1]; 823 if (phy) { 824 q = phy->q_tx[0]; 825 q->blocked = blocked; 826 } 827 phy = dev->phys[MT_BAND2]; 828 if (phy) { 829 q = phy->q_tx[0]; 830 q->blocked = blocked; 831 } 832 833 if (!blocked) 834 mt76_worker_schedule(&dev->tx_worker); 835 } 836 EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked); 837 838 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) 839 { 840 int token; 841 842 spin_lock_bh(&dev->token_lock); 843 844 token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC); 845 if (token >= 0) 846 dev->token_count++; 847 848 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 849 if (mtk_wed_device_active(&dev->mmio.wed) && 850 token >= dev->mmio.wed.wlan.token_start) 851 dev->wed_token_count++; 852 #endif 853 854 if (dev->token_count >= dev->token_size - MT76_TOKEN_FREE_THR) 855 __mt76_set_tx_blocked(dev, true); 856 857 spin_unlock_bh(&dev->token_lock); 858 859 return token; 860 } 861 EXPORT_SYMBOL_GPL(mt76_token_consume); 862 863 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr, 864 struct mt76_txwi_cache *t, dma_addr_t phys) 865 { 866 int token; 867 868 spin_lock_bh(&dev->rx_token_lock); 869 token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size, 870 GFP_ATOMIC); 871 if (token >= 0) { 872 t->ptr = ptr; 873 t->dma_addr = phys; 874 } 875 spin_unlock_bh(&dev->rx_token_lock); 876 877 return token; 878 } 879 EXPORT_SYMBOL_GPL(mt76_rx_token_consume); 880 881 struct mt76_txwi_cache * 882 mt76_token_release(struct mt76_dev *dev, int token, bool *wake) 883 { 884 struct mt76_txwi_cache *txwi; 885 886 spin_lock_bh(&dev->token_lock); 887 888 txwi = idr_remove(&dev->token, token); 889 if (txwi) { 890 dev->token_count--; 891 892 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 893 if (mtk_wed_device_active(&dev->mmio.wed) && 894 token >= dev->mmio.wed.wlan.token_start && 895 --dev->wed_token_count == 0) 896 wake_up(&dev->tx_wait); 897 #endif 898 } 899 900 if (dev->token_count < dev->token_size - MT76_TOKEN_FREE_THR && 901 dev->phy.q_tx[0]->blocked) 902 *wake = true; 903 904 spin_unlock_bh(&dev->token_lock); 905 906 return txwi; 907 } 908 EXPORT_SYMBOL_GPL(mt76_token_release); 909 910 struct mt76_txwi_cache * 911 mt76_rx_token_release(struct mt76_dev *dev, int token) 912 { 913 struct mt76_txwi_cache *t; 914 915 spin_lock_bh(&dev->rx_token_lock); 916 t = idr_remove(&dev->rx_token, token); 917 spin_unlock_bh(&dev->rx_token_lock); 918 919 return t; 920 } 921 EXPORT_SYMBOL_GPL(mt76_rx_token_release); 922