1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/dma-mapping.h> 18 #include "ath9k.h" 19 #include "ar9003_mac.h" 20 21 #define BITS_PER_BYTE 8 22 #define OFDM_PLCP_BITS 22 23 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1) 24 #define L_STF 8 25 #define L_LTF 8 26 #define L_SIG 4 27 #define HT_SIG 8 28 #define HT_STF 4 29 #define HT_LTF(_ns) (4 * (_ns)) 30 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */ 31 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */ 32 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2) 33 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18) 34 35 36 static u16 bits_per_symbol[][2] = { 37 /* 20MHz 40MHz */ 38 { 26, 54 }, /* 0: BPSK */ 39 { 52, 108 }, /* 1: QPSK 1/2 */ 40 { 78, 162 }, /* 2: QPSK 3/4 */ 41 { 104, 216 }, /* 3: 16-QAM 1/2 */ 42 { 156, 324 }, /* 4: 16-QAM 3/4 */ 43 { 208, 432 }, /* 5: 64-QAM 2/3 */ 44 { 234, 486 }, /* 6: 64-QAM 3/4 */ 45 { 260, 540 }, /* 7: 64-QAM 5/6 */ 46 }; 47 48 #define IS_HT_RATE(_rate) ((_rate) & 0x80) 49 50 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 51 struct ath_atx_tid *tid, struct sk_buff *skb); 52 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 53 int tx_flags, struct ath_txq *txq); 54 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 55 struct ath_txq *txq, struct list_head *bf_q, 56 struct ath_tx_status *ts, int txok, int sendbar); 57 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 58 struct list_head *head, bool internal); 59 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, 60 struct ath_tx_status *ts, int nframes, int nbad, 61 int txok); 62 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 63 int seqno); 64 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, 65 struct ath_txq *txq, 66 struct ath_atx_tid *tid, 67 struct sk_buff *skb); 68 69 enum { 70 MCS_HT20, 71 MCS_HT20_SGI, 72 MCS_HT40, 73 MCS_HT40_SGI, 74 }; 75 76 static int ath_max_4ms_framelen[4][32] = { 77 [MCS_HT20] = { 78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172, 79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280, 80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532, 81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532, 82 }, 83 [MCS_HT20_SGI] = { 84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744, 85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532, 86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532, 87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532, 88 }, 89 [MCS_HT40] = { 90 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532, 91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532, 92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532, 93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532, 94 }, 95 [MCS_HT40_SGI] = { 96 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532, 97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532, 98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532, 99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532, 100 } 101 }; 102 103 /*********************/ 104 /* Aggregation logic */ 105 /*********************/ 106 107 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) 108 { 109 struct ath_atx_ac *ac = tid->ac; 110 111 if (tid->paused) 112 return; 113 114 if (tid->sched) 115 return; 116 117 tid->sched = true; 118 list_add_tail(&tid->list, &ac->tid_q); 119 120 if (ac->sched) 121 return; 122 123 ac->sched = true; 124 list_add_tail(&ac->list, &txq->axq_acq); 125 } 126 127 static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 128 { 129 struct ath_txq *txq = tid->ac->txq; 130 131 WARN_ON(!tid->paused); 132 133 spin_lock_bh(&txq->axq_lock); 134 tid->paused = false; 135 136 if (skb_queue_empty(&tid->buf_q)) 137 goto unlock; 138 139 ath_tx_queue_tid(txq, tid); 140 ath_txq_schedule(sc, txq); 141 unlock: 142 spin_unlock_bh(&txq->axq_lock); 143 } 144 145 static struct ath_frame_info *get_frame_info(struct sk_buff *skb) 146 { 147 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 148 BUILD_BUG_ON(sizeof(struct ath_frame_info) > 149 sizeof(tx_info->rate_driver_data)); 150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0]; 151 } 152 153 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 154 { 155 struct ath_txq *txq = tid->ac->txq; 156 struct sk_buff *skb; 157 struct ath_buf *bf; 158 struct list_head bf_head; 159 struct ath_tx_status ts; 160 struct ath_frame_info *fi; 161 162 INIT_LIST_HEAD(&bf_head); 163 164 memset(&ts, 0, sizeof(ts)); 165 spin_lock_bh(&txq->axq_lock); 166 167 while ((skb = __skb_dequeue(&tid->buf_q))) { 168 fi = get_frame_info(skb); 169 bf = fi->bf; 170 171 spin_unlock_bh(&txq->axq_lock); 172 if (bf && fi->retries) { 173 list_add_tail(&bf->list, &bf_head); 174 ath_tx_update_baw(sc, tid, bf->bf_state.seqno); 175 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1); 176 } else { 177 ath_tx_send_normal(sc, txq, NULL, skb); 178 } 179 spin_lock_bh(&txq->axq_lock); 180 } 181 182 spin_unlock_bh(&txq->axq_lock); 183 } 184 185 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 186 int seqno) 187 { 188 int index, cindex; 189 190 index = ATH_BA_INDEX(tid->seq_start, seqno); 191 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 192 193 __clear_bit(cindex, tid->tx_buf); 194 195 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) { 196 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 197 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 198 } 199 } 200 201 static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 202 u16 seqno) 203 { 204 int index, cindex; 205 206 index = ATH_BA_INDEX(tid->seq_start, seqno); 207 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 208 __set_bit(cindex, tid->tx_buf); 209 210 if (index >= ((tid->baw_tail - tid->baw_head) & 211 (ATH_TID_MAX_BUFS - 1))) { 212 tid->baw_tail = cindex; 213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 214 } 215 } 216 217 /* 218 * TODO: For frame(s) that are in the retry state, we will reuse the 219 * sequence number(s) without setting the retry bit. The 220 * alternative is to give up on these and BAR the receiver's window 221 * forward. 222 */ 223 static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, 224 struct ath_atx_tid *tid) 225 226 { 227 struct sk_buff *skb; 228 struct ath_buf *bf; 229 struct list_head bf_head; 230 struct ath_tx_status ts; 231 struct ath_frame_info *fi; 232 233 memset(&ts, 0, sizeof(ts)); 234 INIT_LIST_HEAD(&bf_head); 235 236 while ((skb = __skb_dequeue(&tid->buf_q))) { 237 fi = get_frame_info(skb); 238 bf = fi->bf; 239 240 if (!bf) { 241 spin_unlock(&txq->axq_lock); 242 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq); 243 spin_lock(&txq->axq_lock); 244 continue; 245 } 246 247 list_add_tail(&bf->list, &bf_head); 248 249 if (fi->retries) 250 ath_tx_update_baw(sc, tid, bf->bf_state.seqno); 251 252 spin_unlock(&txq->axq_lock); 253 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 254 spin_lock(&txq->axq_lock); 255 } 256 257 tid->seq_next = tid->seq_start; 258 tid->baw_tail = tid->baw_head; 259 } 260 261 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, 262 struct sk_buff *skb) 263 { 264 struct ath_frame_info *fi = get_frame_info(skb); 265 struct ath_buf *bf = fi->bf; 266 struct ieee80211_hdr *hdr; 267 268 TX_STAT_INC(txq->axq_qnum, a_retries); 269 if (fi->retries++ > 0) 270 return; 271 272 hdr = (struct ieee80211_hdr *)skb->data; 273 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); 274 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 275 sizeof(*hdr), DMA_TO_DEVICE); 276 } 277 278 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc) 279 { 280 struct ath_buf *bf = NULL; 281 282 spin_lock_bh(&sc->tx.txbuflock); 283 284 if (unlikely(list_empty(&sc->tx.txbuf))) { 285 spin_unlock_bh(&sc->tx.txbuflock); 286 return NULL; 287 } 288 289 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); 290 list_del(&bf->list); 291 292 spin_unlock_bh(&sc->tx.txbuflock); 293 294 return bf; 295 } 296 297 static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf) 298 { 299 spin_lock_bh(&sc->tx.txbuflock); 300 list_add_tail(&bf->list, &sc->tx.txbuf); 301 spin_unlock_bh(&sc->tx.txbuflock); 302 } 303 304 static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf) 305 { 306 struct ath_buf *tbf; 307 308 tbf = ath_tx_get_buffer(sc); 309 if (WARN_ON(!tbf)) 310 return NULL; 311 312 ATH_TXBUF_RESET(tbf); 313 314 tbf->bf_mpdu = bf->bf_mpdu; 315 tbf->bf_buf_addr = bf->bf_buf_addr; 316 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); 317 tbf->bf_state = bf->bf_state; 318 319 return tbf; 320 } 321 322 static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf, 323 struct ath_tx_status *ts, int txok, 324 int *nframes, int *nbad) 325 { 326 struct ath_frame_info *fi; 327 u16 seq_st = 0; 328 u32 ba[WME_BA_BMP_SIZE >> 5]; 329 int ba_index; 330 int isaggr = 0; 331 332 *nbad = 0; 333 *nframes = 0; 334 335 isaggr = bf_isaggr(bf); 336 if (isaggr) { 337 seq_st = ts->ts_seqnum; 338 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); 339 } 340 341 while (bf) { 342 fi = get_frame_info(bf->bf_mpdu); 343 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno); 344 345 (*nframes)++; 346 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index))) 347 (*nbad)++; 348 349 bf = bf->bf_next; 350 } 351 } 352 353 354 static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, 355 struct ath_buf *bf, struct list_head *bf_q, 356 struct ath_tx_status *ts, int txok, bool retry) 357 { 358 struct ath_node *an = NULL; 359 struct sk_buff *skb; 360 struct ieee80211_sta *sta; 361 struct ieee80211_hw *hw = sc->hw; 362 struct ieee80211_hdr *hdr; 363 struct ieee80211_tx_info *tx_info; 364 struct ath_atx_tid *tid = NULL; 365 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; 366 struct list_head bf_head; 367 struct sk_buff_head bf_pending; 368 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0; 369 u32 ba[WME_BA_BMP_SIZE >> 5]; 370 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 371 bool rc_update = true; 372 struct ieee80211_tx_rate rates[4]; 373 struct ath_frame_info *fi; 374 int nframes; 375 u8 tidno; 376 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH); 377 378 skb = bf->bf_mpdu; 379 hdr = (struct ieee80211_hdr *)skb->data; 380 381 tx_info = IEEE80211_SKB_CB(skb); 382 383 memcpy(rates, tx_info->control.rates, sizeof(rates)); 384 385 rcu_read_lock(); 386 387 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2); 388 if (!sta) { 389 rcu_read_unlock(); 390 391 INIT_LIST_HEAD(&bf_head); 392 while (bf) { 393 bf_next = bf->bf_next; 394 395 if (!bf->bf_stale || bf_next != NULL) 396 list_move_tail(&bf->list, &bf_head); 397 398 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 399 0, 0); 400 401 bf = bf_next; 402 } 403 return; 404 } 405 406 an = (struct ath_node *)sta->drv_priv; 407 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; 408 tid = ATH_AN_2_TID(an, tidno); 409 410 /* 411 * The hardware occasionally sends a tx status for the wrong TID. 412 * In this case, the BA status cannot be considered valid and all 413 * subframes need to be retransmitted 414 */ 415 if (tidno != ts->tid) 416 txok = false; 417 418 isaggr = bf_isaggr(bf); 419 memset(ba, 0, WME_BA_BMP_SIZE >> 3); 420 421 if (isaggr && txok) { 422 if (ts->ts_flags & ATH9K_TX_BA) { 423 seq_st = ts->ts_seqnum; 424 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); 425 } else { 426 /* 427 * AR5416 can become deaf/mute when BA 428 * issue happens. Chip needs to be reset. 429 * But AP code may have sychronization issues 430 * when perform internal reset in this routine. 431 * Only enable reset in STA mode for now. 432 */ 433 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) 434 needreset = 1; 435 } 436 } 437 438 __skb_queue_head_init(&bf_pending); 439 440 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad); 441 while (bf) { 442 u16 seqno = bf->bf_state.seqno; 443 444 txfail = txpending = sendbar = 0; 445 bf_next = bf->bf_next; 446 447 skb = bf->bf_mpdu; 448 tx_info = IEEE80211_SKB_CB(skb); 449 fi = get_frame_info(skb); 450 451 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) { 452 /* transmit completion, subframe is 453 * acked by block ack */ 454 acked_cnt++; 455 } else if (!isaggr && txok) { 456 /* transmit completion */ 457 acked_cnt++; 458 } else { 459 if ((tid->state & AGGR_CLEANUP) || !retry) { 460 /* 461 * cleanup in progress, just fail 462 * the un-acked sub-frames 463 */ 464 txfail = 1; 465 } else if (flush) { 466 txpending = 1; 467 } else if (fi->retries < ATH_MAX_SW_RETRIES) { 468 if (txok || !an->sleeping) 469 ath_tx_set_retry(sc, txq, bf->bf_mpdu); 470 471 txpending = 1; 472 } else { 473 txfail = 1; 474 sendbar = 1; 475 txfail_cnt++; 476 } 477 } 478 479 /* 480 * Make sure the last desc is reclaimed if it 481 * not a holding desc. 482 */ 483 INIT_LIST_HEAD(&bf_head); 484 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) || 485 bf_next != NULL || !bf_last->bf_stale) 486 list_move_tail(&bf->list, &bf_head); 487 488 if (!txpending || (tid->state & AGGR_CLEANUP)) { 489 /* 490 * complete the acked-ones/xretried ones; update 491 * block-ack window 492 */ 493 spin_lock_bh(&txq->axq_lock); 494 ath_tx_update_baw(sc, tid, seqno); 495 spin_unlock_bh(&txq->axq_lock); 496 497 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 498 memcpy(tx_info->control.rates, rates, sizeof(rates)); 499 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok); 500 rc_update = false; 501 } 502 503 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 504 !txfail, sendbar); 505 } else { 506 /* retry the un-acked ones */ 507 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) { 508 if (bf->bf_next == NULL && bf_last->bf_stale) { 509 struct ath_buf *tbf; 510 511 tbf = ath_clone_txbuf(sc, bf_last); 512 /* 513 * Update tx baw and complete the 514 * frame with failed status if we 515 * run out of tx buf. 516 */ 517 if (!tbf) { 518 spin_lock_bh(&txq->axq_lock); 519 ath_tx_update_baw(sc, tid, seqno); 520 spin_unlock_bh(&txq->axq_lock); 521 522 ath_tx_complete_buf(sc, bf, txq, 523 &bf_head, 524 ts, 0, 525 !flush); 526 break; 527 } 528 529 fi->bf = tbf; 530 } 531 } 532 533 /* 534 * Put this buffer to the temporary pending 535 * queue to retain ordering 536 */ 537 __skb_queue_tail(&bf_pending, skb); 538 } 539 540 bf = bf_next; 541 } 542 543 /* prepend un-acked frames to the beginning of the pending frame queue */ 544 if (!skb_queue_empty(&bf_pending)) { 545 if (an->sleeping) 546 ieee80211_sta_set_buffered(sta, tid->tidno, true); 547 548 spin_lock_bh(&txq->axq_lock); 549 skb_queue_splice(&bf_pending, &tid->buf_q); 550 if (!an->sleeping) { 551 ath_tx_queue_tid(txq, tid); 552 553 if (ts->ts_status & ATH9K_TXERR_FILT) 554 tid->ac->clear_ps_filter = true; 555 } 556 spin_unlock_bh(&txq->axq_lock); 557 } 558 559 if (tid->state & AGGR_CLEANUP) { 560 ath_tx_flush_tid(sc, tid); 561 562 if (tid->baw_head == tid->baw_tail) { 563 tid->state &= ~AGGR_ADDBA_COMPLETE; 564 tid->state &= ~AGGR_CLEANUP; 565 } 566 } 567 568 rcu_read_unlock(); 569 570 if (needreset) { 571 RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR); 572 ieee80211_queue_work(sc->hw, &sc->hw_reset_work); 573 } 574 } 575 576 static bool ath_lookup_legacy(struct ath_buf *bf) 577 { 578 struct sk_buff *skb; 579 struct ieee80211_tx_info *tx_info; 580 struct ieee80211_tx_rate *rates; 581 int i; 582 583 skb = bf->bf_mpdu; 584 tx_info = IEEE80211_SKB_CB(skb); 585 rates = tx_info->control.rates; 586 587 for (i = 0; i < 4; i++) { 588 if (!rates[i].count || rates[i].idx < 0) 589 break; 590 591 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) 592 return true; 593 } 594 595 return false; 596 } 597 598 static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, 599 struct ath_atx_tid *tid) 600 { 601 struct sk_buff *skb; 602 struct ieee80211_tx_info *tx_info; 603 struct ieee80211_tx_rate *rates; 604 u32 max_4ms_framelen, frmlen; 605 u16 aggr_limit, legacy = 0; 606 int i; 607 608 skb = bf->bf_mpdu; 609 tx_info = IEEE80211_SKB_CB(skb); 610 rates = tx_info->control.rates; 611 612 /* 613 * Find the lowest frame length among the rate series that will have a 614 * 4ms transmit duration. 615 * TODO - TXOP limit needs to be considered. 616 */ 617 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX; 618 619 for (i = 0; i < 4; i++) { 620 if (rates[i].count) { 621 int modeidx; 622 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) { 623 legacy = 1; 624 break; 625 } 626 627 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 628 modeidx = MCS_HT40; 629 else 630 modeidx = MCS_HT20; 631 632 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 633 modeidx++; 634 635 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx]; 636 max_4ms_framelen = min(max_4ms_framelen, frmlen); 637 } 638 } 639 640 /* 641 * limit aggregate size by the minimum rate if rate selected is 642 * not a probe rate, if rate selected is a probe rate then 643 * avoid aggregation of this packet. 644 */ 645 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) 646 return 0; 647 648 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED) 649 aggr_limit = min((max_4ms_framelen * 3) / 8, 650 (u32)ATH_AMPDU_LIMIT_MAX); 651 else 652 aggr_limit = min(max_4ms_framelen, 653 (u32)ATH_AMPDU_LIMIT_MAX); 654 655 /* 656 * h/w can accept aggregates up to 16 bit lengths (65535). 657 * The IE, however can hold up to 65536, which shows up here 658 * as zero. Ignore 65536 since we are constrained by hw. 659 */ 660 if (tid->an->maxampdu) 661 aggr_limit = min(aggr_limit, tid->an->maxampdu); 662 663 return aggr_limit; 664 } 665 666 /* 667 * Returns the number of delimiters to be added to 668 * meet the minimum required mpdudensity. 669 */ 670 static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, 671 struct ath_buf *bf, u16 frmlen, 672 bool first_subfrm) 673 { 674 #define FIRST_DESC_NDELIMS 60 675 struct sk_buff *skb = bf->bf_mpdu; 676 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 677 u32 nsymbits, nsymbols; 678 u16 minlen; 679 u8 flags, rix; 680 int width, streams, half_gi, ndelim, mindelim; 681 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); 682 683 /* Select standard number of delimiters based on frame length alone */ 684 ndelim = ATH_AGGR_GET_NDELIM(frmlen); 685 686 /* 687 * If encryption enabled, hardware requires some more padding between 688 * subframes. 689 * TODO - this could be improved to be dependent on the rate. 690 * The hardware can keep up at lower rates, but not higher rates 691 */ 692 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) && 693 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) 694 ndelim += ATH_AGGR_ENCRYPTDELIM; 695 696 /* 697 * Add delimiter when using RTS/CTS with aggregation 698 * and non enterprise AR9003 card 699 */ 700 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) && 701 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE)) 702 ndelim = max(ndelim, FIRST_DESC_NDELIMS); 703 704 /* 705 * Convert desired mpdu density from microeconds to bytes based 706 * on highest rate in rate series (i.e. first rate) to determine 707 * required minimum length for subframe. Take into account 708 * whether high rate is 20 or 40Mhz and half or full GI. 709 * 710 * If there is no mpdu density restriction, no further calculation 711 * is needed. 712 */ 713 714 if (tid->an->mpdudensity == 0) 715 return ndelim; 716 717 rix = tx_info->control.rates[0].idx; 718 flags = tx_info->control.rates[0].flags; 719 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0; 720 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0; 721 722 if (half_gi) 723 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity); 724 else 725 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity); 726 727 if (nsymbols == 0) 728 nsymbols = 1; 729 730 streams = HT_RC_2_STREAMS(rix); 731 nsymbits = bits_per_symbol[rix % 8][width] * streams; 732 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE; 733 734 if (frmlen < minlen) { 735 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ; 736 ndelim = max(mindelim, ndelim); 737 } 738 739 return ndelim; 740 } 741 742 static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, 743 struct ath_txq *txq, 744 struct ath_atx_tid *tid, 745 struct list_head *bf_q, 746 int *aggr_len) 747 { 748 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) 749 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL; 750 int rl = 0, nframes = 0, ndelim, prev_al = 0; 751 u16 aggr_limit = 0, al = 0, bpad = 0, 752 al_delta, h_baw = tid->baw_size / 2; 753 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; 754 struct ieee80211_tx_info *tx_info; 755 struct ath_frame_info *fi; 756 struct sk_buff *skb; 757 u16 seqno; 758 759 do { 760 skb = skb_peek(&tid->buf_q); 761 fi = get_frame_info(skb); 762 bf = fi->bf; 763 if (!fi->bf) 764 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 765 766 if (!bf) 767 continue; 768 769 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR; 770 seqno = bf->bf_state.seqno; 771 if (!bf_first) 772 bf_first = bf; 773 774 /* do not step over block-ack window */ 775 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) { 776 status = ATH_AGGR_BAW_CLOSED; 777 break; 778 } 779 780 if (!rl) { 781 aggr_limit = ath_lookup_rate(sc, bf, tid); 782 rl = 1; 783 } 784 785 /* do not exceed aggregation limit */ 786 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen; 787 788 if (nframes && 789 ((aggr_limit < (al + bpad + al_delta + prev_al)) || 790 ath_lookup_legacy(bf))) { 791 status = ATH_AGGR_LIMITED; 792 break; 793 } 794 795 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); 796 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) 797 break; 798 799 /* do not exceed subframe limit */ 800 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) { 801 status = ATH_AGGR_LIMITED; 802 break; 803 } 804 805 /* add padding for previous frame to aggregation length */ 806 al += bpad + al_delta; 807 808 /* 809 * Get the delimiters needed to meet the MPDU 810 * density for this node. 811 */ 812 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen, 813 !nframes); 814 bpad = PADBYTES(al_delta) + (ndelim << 2); 815 816 nframes++; 817 bf->bf_next = NULL; 818 819 /* link buffers of this frame to the aggregate */ 820 if (!fi->retries) 821 ath_tx_addto_baw(sc, tid, seqno); 822 bf->bf_state.ndelim = ndelim; 823 824 __skb_unlink(skb, &tid->buf_q); 825 list_add_tail(&bf->list, bf_q); 826 if (bf_prev) 827 bf_prev->bf_next = bf; 828 829 bf_prev = bf; 830 831 } while (!skb_queue_empty(&tid->buf_q)); 832 833 *aggr_len = al; 834 835 return status; 836 #undef PADBYTES 837 } 838 839 /* 840 * rix - rate index 841 * pktlen - total bytes (delims + data + fcs + pads + pad delims) 842 * width - 0 for 20 MHz, 1 for 40 MHz 843 * half_gi - to use 4us v/s 3.6 us for symbol time 844 */ 845 static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen, 846 int width, int half_gi, bool shortPreamble) 847 { 848 u32 nbits, nsymbits, duration, nsymbols; 849 int streams; 850 851 /* find number of symbols: PLCP + data */ 852 streams = HT_RC_2_STREAMS(rix); 853 nbits = (pktlen << 3) + OFDM_PLCP_BITS; 854 nsymbits = bits_per_symbol[rix % 8][width] * streams; 855 nsymbols = (nbits + nsymbits - 1) / nsymbits; 856 857 if (!half_gi) 858 duration = SYMBOL_TIME(nsymbols); 859 else 860 duration = SYMBOL_TIME_HALFGI(nsymbols); 861 862 /* addup duration for legacy/ht training and signal fields */ 863 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); 864 865 return duration; 866 } 867 868 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, 869 struct ath_tx_info *info, int len) 870 { 871 struct ath_hw *ah = sc->sc_ah; 872 struct sk_buff *skb; 873 struct ieee80211_tx_info *tx_info; 874 struct ieee80211_tx_rate *rates; 875 const struct ieee80211_rate *rate; 876 struct ieee80211_hdr *hdr; 877 int i; 878 u8 rix = 0; 879 880 skb = bf->bf_mpdu; 881 tx_info = IEEE80211_SKB_CB(skb); 882 rates = tx_info->control.rates; 883 hdr = (struct ieee80211_hdr *)skb->data; 884 885 /* set dur_update_en for l-sig computation except for PS-Poll frames */ 886 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control); 887 888 /* 889 * We check if Short Preamble is needed for the CTS rate by 890 * checking the BSS's global flag. 891 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used. 892 */ 893 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info); 894 info->rtscts_rate = rate->hw_value; 895 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT) 896 info->rtscts_rate |= rate->hw_value_short; 897 898 for (i = 0; i < 4; i++) { 899 bool is_40, is_sgi, is_sp; 900 int phy; 901 902 if (!rates[i].count || (rates[i].idx < 0)) 903 continue; 904 905 rix = rates[i].idx; 906 info->rates[i].Tries = rates[i].count; 907 908 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) { 909 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 910 info->flags |= ATH9K_TXDESC_RTSENA; 911 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 912 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 913 info->flags |= ATH9K_TXDESC_CTSENA; 914 } 915 916 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 917 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040; 918 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 919 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI; 920 921 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI); 922 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH); 923 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE); 924 925 if (rates[i].flags & IEEE80211_TX_RC_MCS) { 926 /* MCS rates */ 927 info->rates[i].Rate = rix | 0x80; 928 info->rates[i].ChSel = ath_txchainmask_reduction(sc, 929 ah->txchainmask, info->rates[i].Rate); 930 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len, 931 is_40, is_sgi, is_sp); 932 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) 933 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC; 934 continue; 935 } 936 937 /* legacy rates */ 938 if ((tx_info->band == IEEE80211_BAND_2GHZ) && 939 !(rate->flags & IEEE80211_RATE_ERP_G)) 940 phy = WLAN_RC_PHY_CCK; 941 else 942 phy = WLAN_RC_PHY_OFDM; 943 944 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx]; 945 info->rates[i].Rate = rate->hw_value; 946 if (rate->hw_value_short) { 947 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 948 info->rates[i].Rate |= rate->hw_value_short; 949 } else { 950 is_sp = false; 951 } 952 953 if (bf->bf_state.bfs_paprd) 954 info->rates[i].ChSel = ah->txchainmask; 955 else 956 info->rates[i].ChSel = ath_txchainmask_reduction(sc, 957 ah->txchainmask, info->rates[i].Rate); 958 959 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah, 960 phy, rate->bitrate * 100, len, rix, is_sp); 961 } 962 963 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ 964 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit)) 965 info->flags &= ~ATH9K_TXDESC_RTSENA; 966 967 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */ 968 if (info->flags & ATH9K_TXDESC_RTSENA) 969 info->flags &= ~ATH9K_TXDESC_CTSENA; 970 } 971 972 static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) 973 { 974 struct ieee80211_hdr *hdr; 975 enum ath9k_pkt_type htype; 976 __le16 fc; 977 978 hdr = (struct ieee80211_hdr *)skb->data; 979 fc = hdr->frame_control; 980 981 if (ieee80211_is_beacon(fc)) 982 htype = ATH9K_PKT_TYPE_BEACON; 983 else if (ieee80211_is_probe_resp(fc)) 984 htype = ATH9K_PKT_TYPE_PROBE_RESP; 985 else if (ieee80211_is_atim(fc)) 986 htype = ATH9K_PKT_TYPE_ATIM; 987 else if (ieee80211_is_pspoll(fc)) 988 htype = ATH9K_PKT_TYPE_PSPOLL; 989 else 990 htype = ATH9K_PKT_TYPE_NORMAL; 991 992 return htype; 993 } 994 995 static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, 996 struct ath_txq *txq, int len) 997 { 998 struct ath_hw *ah = sc->sc_ah; 999 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); 1000 struct ath_buf *bf_first = bf; 1001 struct ath_tx_info info; 1002 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR); 1003 1004 memset(&info, 0, sizeof(info)); 1005 info.is_first = true; 1006 info.is_last = true; 1007 info.txpower = MAX_RATE_POWER; 1008 info.qcu = txq->axq_qnum; 1009 1010 info.flags = ATH9K_TXDESC_INTREQ; 1011 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 1012 info.flags |= ATH9K_TXDESC_NOACK; 1013 if (tx_info->flags & IEEE80211_TX_CTL_LDPC) 1014 info.flags |= ATH9K_TXDESC_LDPC; 1015 1016 ath_buf_set_rate(sc, bf, &info, len); 1017 1018 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) 1019 info.flags |= ATH9K_TXDESC_CLRDMASK; 1020 1021 if (bf->bf_state.bfs_paprd) 1022 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S; 1023 1024 1025 while (bf) { 1026 struct sk_buff *skb = bf->bf_mpdu; 1027 struct ath_frame_info *fi = get_frame_info(skb); 1028 1029 info.type = get_hw_packet_type(skb); 1030 if (bf->bf_next) 1031 info.link = bf->bf_next->bf_daddr; 1032 else 1033 info.link = 0; 1034 1035 info.buf_addr[0] = bf->bf_buf_addr; 1036 info.buf_len[0] = skb->len; 1037 info.pkt_len = fi->framelen; 1038 info.keyix = fi->keyix; 1039 info.keytype = fi->keytype; 1040 1041 if (aggr) { 1042 if (bf == bf_first) 1043 info.aggr = AGGR_BUF_FIRST; 1044 else if (!bf->bf_next) 1045 info.aggr = AGGR_BUF_LAST; 1046 else 1047 info.aggr = AGGR_BUF_MIDDLE; 1048 1049 info.ndelim = bf->bf_state.ndelim; 1050 info.aggr_len = len; 1051 } 1052 1053 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info); 1054 bf = bf->bf_next; 1055 } 1056 } 1057 1058 static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, 1059 struct ath_atx_tid *tid) 1060 { 1061 struct ath_buf *bf; 1062 enum ATH_AGGR_STATUS status; 1063 struct ieee80211_tx_info *tx_info; 1064 struct list_head bf_q; 1065 int aggr_len; 1066 1067 do { 1068 if (skb_queue_empty(&tid->buf_q)) 1069 return; 1070 1071 INIT_LIST_HEAD(&bf_q); 1072 1073 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len); 1074 1075 /* 1076 * no frames picked up to be aggregated; 1077 * block-ack window is not open. 1078 */ 1079 if (list_empty(&bf_q)) 1080 break; 1081 1082 bf = list_first_entry(&bf_q, struct ath_buf, list); 1083 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list); 1084 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); 1085 1086 if (tid->ac->clear_ps_filter) { 1087 tid->ac->clear_ps_filter = false; 1088 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1089 } else { 1090 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT; 1091 } 1092 1093 /* if only one frame, send as non-aggregate */ 1094 if (bf == bf->bf_lastbf) { 1095 aggr_len = get_frame_info(bf->bf_mpdu)->framelen; 1096 bf->bf_state.bf_type = BUF_AMPDU; 1097 } else { 1098 TX_STAT_INC(txq->axq_qnum, a_aggr); 1099 } 1100 1101 ath_tx_fill_desc(sc, bf, txq, aggr_len); 1102 ath_tx_txqaddbuf(sc, txq, &bf_q, false); 1103 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH && 1104 status != ATH_AGGR_BAW_CLOSED); 1105 } 1106 1107 int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 1108 u16 tid, u16 *ssn) 1109 { 1110 struct ath_atx_tid *txtid; 1111 struct ath_node *an; 1112 1113 an = (struct ath_node *)sta->drv_priv; 1114 txtid = ATH_AN_2_TID(an, tid); 1115 1116 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE)) 1117 return -EAGAIN; 1118 1119 txtid->state |= AGGR_ADDBA_PROGRESS; 1120 txtid->paused = true; 1121 *ssn = txtid->seq_start = txtid->seq_next; 1122 1123 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); 1124 txtid->baw_head = txtid->baw_tail = 0; 1125 1126 return 0; 1127 } 1128 1129 void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 1130 { 1131 struct ath_node *an = (struct ath_node *)sta->drv_priv; 1132 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 1133 struct ath_txq *txq = txtid->ac->txq; 1134 1135 if (txtid->state & AGGR_CLEANUP) 1136 return; 1137 1138 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { 1139 txtid->state &= ~AGGR_ADDBA_PROGRESS; 1140 return; 1141 } 1142 1143 spin_lock_bh(&txq->axq_lock); 1144 txtid->paused = true; 1145 1146 /* 1147 * If frames are still being transmitted for this TID, they will be 1148 * cleaned up during tx completion. To prevent race conditions, this 1149 * TID can only be reused after all in-progress subframes have been 1150 * completed. 1151 */ 1152 if (txtid->baw_head != txtid->baw_tail) 1153 txtid->state |= AGGR_CLEANUP; 1154 else 1155 txtid->state &= ~AGGR_ADDBA_COMPLETE; 1156 spin_unlock_bh(&txq->axq_lock); 1157 1158 ath_tx_flush_tid(sc, txtid); 1159 } 1160 1161 void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, 1162 struct ath_node *an) 1163 { 1164 struct ath_atx_tid *tid; 1165 struct ath_atx_ac *ac; 1166 struct ath_txq *txq; 1167 bool buffered; 1168 int tidno; 1169 1170 for (tidno = 0, tid = &an->tid[tidno]; 1171 tidno < WME_NUM_TID; tidno++, tid++) { 1172 1173 if (!tid->sched) 1174 continue; 1175 1176 ac = tid->ac; 1177 txq = ac->txq; 1178 1179 spin_lock_bh(&txq->axq_lock); 1180 1181 buffered = !skb_queue_empty(&tid->buf_q); 1182 1183 tid->sched = false; 1184 list_del(&tid->list); 1185 1186 if (ac->sched) { 1187 ac->sched = false; 1188 list_del(&ac->list); 1189 } 1190 1191 spin_unlock_bh(&txq->axq_lock); 1192 1193 ieee80211_sta_set_buffered(sta, tidno, buffered); 1194 } 1195 } 1196 1197 void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) 1198 { 1199 struct ath_atx_tid *tid; 1200 struct ath_atx_ac *ac; 1201 struct ath_txq *txq; 1202 int tidno; 1203 1204 for (tidno = 0, tid = &an->tid[tidno]; 1205 tidno < WME_NUM_TID; tidno++, tid++) { 1206 1207 ac = tid->ac; 1208 txq = ac->txq; 1209 1210 spin_lock_bh(&txq->axq_lock); 1211 ac->clear_ps_filter = true; 1212 1213 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) { 1214 ath_tx_queue_tid(txq, tid); 1215 ath_txq_schedule(sc, txq); 1216 } 1217 1218 spin_unlock_bh(&txq->axq_lock); 1219 } 1220 } 1221 1222 void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 1223 { 1224 struct ath_atx_tid *txtid; 1225 struct ath_node *an; 1226 1227 an = (struct ath_node *)sta->drv_priv; 1228 1229 if (sc->sc_flags & SC_OP_TXAGGR) { 1230 txtid = ATH_AN_2_TID(an, tid); 1231 txtid->baw_size = 1232 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; 1233 txtid->state |= AGGR_ADDBA_COMPLETE; 1234 txtid->state &= ~AGGR_ADDBA_PROGRESS; 1235 ath_tx_resume_tid(sc, txtid); 1236 } 1237 } 1238 1239 /********************/ 1240 /* Queue Management */ 1241 /********************/ 1242 1243 static void ath_txq_drain_pending_buffers(struct ath_softc *sc, 1244 struct ath_txq *txq) 1245 { 1246 struct ath_atx_ac *ac, *ac_tmp; 1247 struct ath_atx_tid *tid, *tid_tmp; 1248 1249 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { 1250 list_del(&ac->list); 1251 ac->sched = false; 1252 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) { 1253 list_del(&tid->list); 1254 tid->sched = false; 1255 ath_tid_drain(sc, txq, tid); 1256 } 1257 } 1258 } 1259 1260 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 1261 { 1262 struct ath_hw *ah = sc->sc_ah; 1263 struct ath9k_tx_queue_info qi; 1264 static const int subtype_txq_to_hwq[] = { 1265 [WME_AC_BE] = ATH_TXQ_AC_BE, 1266 [WME_AC_BK] = ATH_TXQ_AC_BK, 1267 [WME_AC_VI] = ATH_TXQ_AC_VI, 1268 [WME_AC_VO] = ATH_TXQ_AC_VO, 1269 }; 1270 int axq_qnum, i; 1271 1272 memset(&qi, 0, sizeof(qi)); 1273 qi.tqi_subtype = subtype_txq_to_hwq[subtype]; 1274 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; 1275 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; 1276 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; 1277 qi.tqi_physCompBuf = 0; 1278 1279 /* 1280 * Enable interrupts only for EOL and DESC conditions. 1281 * We mark tx descriptors to receive a DESC interrupt 1282 * when a tx queue gets deep; otherwise waiting for the 1283 * EOL to reap descriptors. Note that this is done to 1284 * reduce interrupt load and this only defers reaping 1285 * descriptors, never transmitting frames. Aside from 1286 * reducing interrupts this also permits more concurrency. 1287 * The only potential downside is if the tx queue backs 1288 * up in which case the top half of the kernel may backup 1289 * due to a lack of tx descriptors. 1290 * 1291 * The UAPSD queue is an exception, since we take a desc- 1292 * based intr on the EOSP frames. 1293 */ 1294 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1295 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE | 1296 TXQ_FLAG_TXERRINT_ENABLE; 1297 } else { 1298 if (qtype == ATH9K_TX_QUEUE_UAPSD) 1299 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE; 1300 else 1301 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | 1302 TXQ_FLAG_TXDESCINT_ENABLE; 1303 } 1304 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); 1305 if (axq_qnum == -1) { 1306 /* 1307 * NB: don't print a message, this happens 1308 * normally on parts with too few tx queues 1309 */ 1310 return NULL; 1311 } 1312 if (!ATH_TXQ_SETUP(sc, axq_qnum)) { 1313 struct ath_txq *txq = &sc->tx.txq[axq_qnum]; 1314 1315 txq->axq_qnum = axq_qnum; 1316 txq->mac80211_qnum = -1; 1317 txq->axq_link = NULL; 1318 INIT_LIST_HEAD(&txq->axq_q); 1319 INIT_LIST_HEAD(&txq->axq_acq); 1320 spin_lock_init(&txq->axq_lock); 1321 txq->axq_depth = 0; 1322 txq->axq_ampdu_depth = 0; 1323 txq->axq_tx_inprogress = false; 1324 sc->tx.txqsetup |= 1<<axq_qnum; 1325 1326 txq->txq_headidx = txq->txq_tailidx = 0; 1327 for (i = 0; i < ATH_TXFIFO_DEPTH; i++) 1328 INIT_LIST_HEAD(&txq->txq_fifo[i]); 1329 } 1330 return &sc->tx.txq[axq_qnum]; 1331 } 1332 1333 int ath_txq_update(struct ath_softc *sc, int qnum, 1334 struct ath9k_tx_queue_info *qinfo) 1335 { 1336 struct ath_hw *ah = sc->sc_ah; 1337 int error = 0; 1338 struct ath9k_tx_queue_info qi; 1339 1340 if (qnum == sc->beacon.beaconq) { 1341 /* 1342 * XXX: for beacon queue, we just save the parameter. 1343 * It will be picked up by ath_beaconq_config when 1344 * it's necessary. 1345 */ 1346 sc->beacon.beacon_qi = *qinfo; 1347 return 0; 1348 } 1349 1350 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum); 1351 1352 ath9k_hw_get_txq_props(ah, qnum, &qi); 1353 qi.tqi_aifs = qinfo->tqi_aifs; 1354 qi.tqi_cwmin = qinfo->tqi_cwmin; 1355 qi.tqi_cwmax = qinfo->tqi_cwmax; 1356 qi.tqi_burstTime = qinfo->tqi_burstTime; 1357 qi.tqi_readyTime = qinfo->tqi_readyTime; 1358 1359 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) { 1360 ath_err(ath9k_hw_common(sc->sc_ah), 1361 "Unable to update hardware queue %u!\n", qnum); 1362 error = -EIO; 1363 } else { 1364 ath9k_hw_resettxqueue(ah, qnum); 1365 } 1366 1367 return error; 1368 } 1369 1370 int ath_cabq_update(struct ath_softc *sc) 1371 { 1372 struct ath9k_tx_queue_info qi; 1373 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf; 1374 int qnum = sc->beacon.cabq->axq_qnum; 1375 1376 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); 1377 /* 1378 * Ensure the readytime % is within the bounds. 1379 */ 1380 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND) 1381 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND; 1382 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND) 1383 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND; 1384 1385 qi.tqi_readyTime = (cur_conf->beacon_interval * 1386 sc->config.cabqReadytime) / 100; 1387 ath_txq_update(sc, qnum, &qi); 1388 1389 return 0; 1390 } 1391 1392 static bool bf_is_ampdu_not_probing(struct ath_buf *bf) 1393 { 1394 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu); 1395 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 1396 } 1397 1398 static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, 1399 struct list_head *list, bool retry_tx) 1400 __releases(txq->axq_lock) 1401 __acquires(txq->axq_lock) 1402 { 1403 struct ath_buf *bf, *lastbf; 1404 struct list_head bf_head; 1405 struct ath_tx_status ts; 1406 1407 memset(&ts, 0, sizeof(ts)); 1408 ts.ts_status = ATH9K_TX_FLUSH; 1409 INIT_LIST_HEAD(&bf_head); 1410 1411 while (!list_empty(list)) { 1412 bf = list_first_entry(list, struct ath_buf, list); 1413 1414 if (bf->bf_stale) { 1415 list_del(&bf->list); 1416 1417 ath_tx_return_buffer(sc, bf); 1418 continue; 1419 } 1420 1421 lastbf = bf->bf_lastbf; 1422 list_cut_position(&bf_head, list, &lastbf->list); 1423 1424 txq->axq_depth--; 1425 if (bf_is_ampdu_not_probing(bf)) 1426 txq->axq_ampdu_depth--; 1427 1428 spin_unlock_bh(&txq->axq_lock); 1429 if (bf_isampdu(bf)) 1430 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0, 1431 retry_tx); 1432 else 1433 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 1434 spin_lock_bh(&txq->axq_lock); 1435 } 1436 } 1437 1438 /* 1439 * Drain a given TX queue (could be Beacon or Data) 1440 * 1441 * This assumes output has been stopped and 1442 * we do not need to block ath_tx_tasklet. 1443 */ 1444 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) 1445 { 1446 spin_lock_bh(&txq->axq_lock); 1447 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1448 int idx = txq->txq_tailidx; 1449 1450 while (!list_empty(&txq->txq_fifo[idx])) { 1451 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx], 1452 retry_tx); 1453 1454 INCR(idx, ATH_TXFIFO_DEPTH); 1455 } 1456 txq->txq_tailidx = idx; 1457 } 1458 1459 txq->axq_link = NULL; 1460 txq->axq_tx_inprogress = false; 1461 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx); 1462 1463 /* flush any pending frames if aggregation is enabled */ 1464 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx) 1465 ath_txq_drain_pending_buffers(sc, txq); 1466 1467 spin_unlock_bh(&txq->axq_lock); 1468 } 1469 1470 bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) 1471 { 1472 struct ath_hw *ah = sc->sc_ah; 1473 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1474 struct ath_txq *txq; 1475 int i; 1476 u32 npend = 0; 1477 1478 if (sc->sc_flags & SC_OP_INVALID) 1479 return true; 1480 1481 ath9k_hw_abort_tx_dma(ah); 1482 1483 /* Check if any queue remains active */ 1484 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1485 if (!ATH_TXQ_SETUP(sc, i)) 1486 continue; 1487 1488 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum)) 1489 npend |= BIT(i); 1490 } 1491 1492 if (npend) 1493 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend); 1494 1495 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1496 if (!ATH_TXQ_SETUP(sc, i)) 1497 continue; 1498 1499 /* 1500 * The caller will resume queues with ieee80211_wake_queues. 1501 * Mark the queue as not stopped to prevent ath_tx_complete 1502 * from waking the queue too early. 1503 */ 1504 txq = &sc->tx.txq[i]; 1505 txq->stopped = false; 1506 ath_draintxq(sc, txq, retry_tx); 1507 } 1508 1509 return !npend; 1510 } 1511 1512 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 1513 { 1514 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum); 1515 sc->tx.txqsetup &= ~(1<<txq->axq_qnum); 1516 } 1517 1518 /* For each axq_acq entry, for each tid, try to schedule packets 1519 * for transmit until ampdu_depth has reached min Q depth. 1520 */ 1521 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) 1522 { 1523 struct ath_atx_ac *ac, *ac_tmp, *last_ac; 1524 struct ath_atx_tid *tid, *last_tid; 1525 1526 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) || 1527 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1528 return; 1529 1530 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); 1531 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list); 1532 1533 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { 1534 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list); 1535 list_del(&ac->list); 1536 ac->sched = false; 1537 1538 while (!list_empty(&ac->tid_q)) { 1539 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, 1540 list); 1541 list_del(&tid->list); 1542 tid->sched = false; 1543 1544 if (tid->paused) 1545 continue; 1546 1547 ath_tx_sched_aggr(sc, txq, tid); 1548 1549 /* 1550 * add tid to round-robin queue if more frames 1551 * are pending for the tid 1552 */ 1553 if (!skb_queue_empty(&tid->buf_q)) 1554 ath_tx_queue_tid(txq, tid); 1555 1556 if (tid == last_tid || 1557 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1558 break; 1559 } 1560 1561 if (!list_empty(&ac->tid_q)) { 1562 if (!ac->sched) { 1563 ac->sched = true; 1564 list_add_tail(&ac->list, &txq->axq_acq); 1565 } 1566 } 1567 1568 if (ac == last_ac || 1569 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1570 return; 1571 } 1572 } 1573 1574 /***********/ 1575 /* TX, DMA */ 1576 /***********/ 1577 1578 /* 1579 * Insert a chain of ath_buf (descriptors) on a txq and 1580 * assume the descriptors are already chained together by caller. 1581 */ 1582 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 1583 struct list_head *head, bool internal) 1584 { 1585 struct ath_hw *ah = sc->sc_ah; 1586 struct ath_common *common = ath9k_hw_common(ah); 1587 struct ath_buf *bf, *bf_last; 1588 bool puttxbuf = false; 1589 bool edma; 1590 1591 /* 1592 * Insert the frame on the outbound list and 1593 * pass it on to the hardware. 1594 */ 1595 1596 if (list_empty(head)) 1597 return; 1598 1599 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1600 bf = list_first_entry(head, struct ath_buf, list); 1601 bf_last = list_entry(head->prev, struct ath_buf, list); 1602 1603 ath_dbg(common, ATH_DBG_QUEUE, 1604 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth); 1605 1606 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) { 1607 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]); 1608 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH); 1609 puttxbuf = true; 1610 } else { 1611 list_splice_tail_init(head, &txq->axq_q); 1612 1613 if (txq->axq_link) { 1614 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr); 1615 ath_dbg(common, ATH_DBG_XMIT, 1616 "link[%u] (%p)=%llx (%p)\n", 1617 txq->axq_qnum, txq->axq_link, 1618 ito64(bf->bf_daddr), bf->bf_desc); 1619 } else if (!edma) 1620 puttxbuf = true; 1621 1622 txq->axq_link = bf_last->bf_desc; 1623 } 1624 1625 if (puttxbuf) { 1626 TX_STAT_INC(txq->axq_qnum, puttxbuf); 1627 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1628 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n", 1629 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); 1630 } 1631 1632 if (!edma) { 1633 TX_STAT_INC(txq->axq_qnum, txstart); 1634 ath9k_hw_txstart(ah, txq->axq_qnum); 1635 } 1636 1637 if (!internal) { 1638 txq->axq_depth++; 1639 if (bf_is_ampdu_not_probing(bf)) 1640 txq->axq_ampdu_depth++; 1641 } 1642 } 1643 1644 static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, 1645 struct sk_buff *skb, struct ath_tx_control *txctl) 1646 { 1647 struct ath_frame_info *fi = get_frame_info(skb); 1648 struct list_head bf_head; 1649 struct ath_buf *bf; 1650 1651 /* 1652 * Do not queue to h/w when any of the following conditions is true: 1653 * - there are pending frames in software queue 1654 * - the TID is currently paused for ADDBA/BAR request 1655 * - seqno is not within block-ack window 1656 * - h/w queue depth exceeds low water mark 1657 */ 1658 if (!skb_queue_empty(&tid->buf_q) || tid->paused || 1659 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) || 1660 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) { 1661 /* 1662 * Add this frame to software queue for scheduling later 1663 * for aggregation. 1664 */ 1665 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw); 1666 __skb_queue_tail(&tid->buf_q, skb); 1667 if (!txctl->an || !txctl->an->sleeping) 1668 ath_tx_queue_tid(txctl->txq, tid); 1669 return; 1670 } 1671 1672 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); 1673 if (!bf) 1674 return; 1675 1676 bf->bf_state.bf_type = BUF_AMPDU; 1677 INIT_LIST_HEAD(&bf_head); 1678 list_add(&bf->list, &bf_head); 1679 1680 /* Add sub-frame to BAW */ 1681 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); 1682 1683 /* Queue to h/w without aggregation */ 1684 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw); 1685 bf->bf_lastbf = bf; 1686 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen); 1687 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false); 1688 } 1689 1690 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 1691 struct ath_atx_tid *tid, struct sk_buff *skb) 1692 { 1693 struct ath_frame_info *fi = get_frame_info(skb); 1694 struct list_head bf_head; 1695 struct ath_buf *bf; 1696 1697 bf = fi->bf; 1698 if (!bf) 1699 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 1700 1701 if (!bf) 1702 return; 1703 1704 INIT_LIST_HEAD(&bf_head); 1705 list_add_tail(&bf->list, &bf_head); 1706 bf->bf_state.bf_type = 0; 1707 1708 /* update starting sequence number for subsequent ADDBA request */ 1709 if (tid) 1710 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 1711 1712 bf->bf_lastbf = bf; 1713 ath_tx_fill_desc(sc, bf, txq, fi->framelen); 1714 ath_tx_txqaddbuf(sc, txq, &bf_head, false); 1715 TX_STAT_INC(txq->axq_qnum, queued); 1716 } 1717 1718 static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb, 1719 int framelen) 1720 { 1721 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1722 struct ieee80211_sta *sta = tx_info->control.sta; 1723 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; 1724 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1725 struct ath_frame_info *fi = get_frame_info(skb); 1726 struct ath_node *an = NULL; 1727 enum ath9k_key_type keytype; 1728 1729 keytype = ath9k_cmn_get_hw_crypto_keytype(skb); 1730 1731 if (sta) 1732 an = (struct ath_node *) sta->drv_priv; 1733 1734 memset(fi, 0, sizeof(*fi)); 1735 if (hw_key) 1736 fi->keyix = hw_key->hw_key_idx; 1737 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0) 1738 fi->keyix = an->ps_key; 1739 else 1740 fi->keyix = ATH9K_TXKEYIX_INVALID; 1741 fi->keytype = keytype; 1742 fi->framelen = framelen; 1743 } 1744 1745 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate) 1746 { 1747 struct ath_hw *ah = sc->sc_ah; 1748 struct ath9k_channel *curchan = ah->curchan; 1749 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && 1750 (curchan->channelFlags & CHANNEL_5GHZ) && 1751 (chainmask == 0x7) && (rate < 0x90)) 1752 return 0x3; 1753 else 1754 return chainmask; 1755 } 1756 1757 /* 1758 * Assign a descriptor (and sequence number if necessary, 1759 * and map buffer for DMA. Frees skb on error 1760 */ 1761 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, 1762 struct ath_txq *txq, 1763 struct ath_atx_tid *tid, 1764 struct sk_buff *skb) 1765 { 1766 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1767 struct ath_frame_info *fi = get_frame_info(skb); 1768 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1769 struct ath_buf *bf; 1770 u16 seqno; 1771 1772 bf = ath_tx_get_buffer(sc); 1773 if (!bf) { 1774 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n"); 1775 goto error; 1776 } 1777 1778 ATH_TXBUF_RESET(bf); 1779 1780 if (tid) { 1781 seqno = tid->seq_next; 1782 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); 1783 INCR(tid->seq_next, IEEE80211_SEQ_MAX); 1784 bf->bf_state.seqno = seqno; 1785 } 1786 1787 bf->bf_mpdu = skb; 1788 1789 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 1790 skb->len, DMA_TO_DEVICE); 1791 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { 1792 bf->bf_mpdu = NULL; 1793 bf->bf_buf_addr = 0; 1794 ath_err(ath9k_hw_common(sc->sc_ah), 1795 "dma_mapping_error() on TX\n"); 1796 ath_tx_return_buffer(sc, bf); 1797 goto error; 1798 } 1799 1800 fi->bf = bf; 1801 1802 return bf; 1803 1804 error: 1805 dev_kfree_skb_any(skb); 1806 return NULL; 1807 } 1808 1809 /* FIXME: tx power */ 1810 static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, 1811 struct ath_tx_control *txctl) 1812 { 1813 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1814 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1815 struct ath_atx_tid *tid = NULL; 1816 struct ath_buf *bf; 1817 u8 tidno; 1818 1819 spin_lock_bh(&txctl->txq->axq_lock); 1820 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an && 1821 ieee80211_is_data_qos(hdr->frame_control)) { 1822 tidno = ieee80211_get_qos_ctl(hdr)[0] & 1823 IEEE80211_QOS_CTL_TID_MASK; 1824 tid = ATH_AN_2_TID(txctl->an, tidno); 1825 1826 WARN_ON(tid->ac->txq != txctl->txq); 1827 } 1828 1829 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) { 1830 /* 1831 * Try aggregation if it's a unicast data frame 1832 * and the destination is HT capable. 1833 */ 1834 ath_tx_send_ampdu(sc, tid, skb, txctl); 1835 } else { 1836 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); 1837 if (!bf) 1838 goto out; 1839 1840 bf->bf_state.bfs_paprd = txctl->paprd; 1841 1842 if (txctl->paprd) 1843 bf->bf_state.bfs_paprd_timestamp = jiffies; 1844 1845 ath_tx_send_normal(sc, txctl->txq, tid, skb); 1846 } 1847 1848 out: 1849 spin_unlock_bh(&txctl->txq->axq_lock); 1850 } 1851 1852 /* Upon failure caller should free skb */ 1853 int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, 1854 struct ath_tx_control *txctl) 1855 { 1856 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1857 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1858 struct ieee80211_sta *sta = info->control.sta; 1859 struct ieee80211_vif *vif = info->control.vif; 1860 struct ath_softc *sc = hw->priv; 1861 struct ath_txq *txq = txctl->txq; 1862 int padpos, padsize; 1863 int frmlen = skb->len + FCS_LEN; 1864 int q; 1865 1866 /* NOTE: sta can be NULL according to net/mac80211.h */ 1867 if (sta) 1868 txctl->an = (struct ath_node *)sta->drv_priv; 1869 1870 if (info->control.hw_key) 1871 frmlen += info->control.hw_key->icv_len; 1872 1873 /* 1874 * As a temporary workaround, assign seq# here; this will likely need 1875 * to be cleaned up to work better with Beacon transmission and virtual 1876 * BSSes. 1877 */ 1878 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 1879 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 1880 sc->tx.seq_no += 0x10; 1881 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 1882 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); 1883 } 1884 1885 /* Add the padding after the header if this is not already done */ 1886 padpos = ath9k_cmn_padpos(hdr->frame_control); 1887 padsize = padpos & 3; 1888 if (padsize && skb->len > padpos) { 1889 if (skb_headroom(skb) < padsize) 1890 return -ENOMEM; 1891 1892 skb_push(skb, padsize); 1893 memmove(skb->data, skb->data + padsize, padpos); 1894 hdr = (struct ieee80211_hdr *) skb->data; 1895 } 1896 1897 if ((vif && vif->type != NL80211_IFTYPE_AP && 1898 vif->type != NL80211_IFTYPE_AP_VLAN) || 1899 !ieee80211_is_data(hdr->frame_control)) 1900 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1901 1902 setup_frame_info(hw, skb, frmlen); 1903 1904 /* 1905 * At this point, the vif, hw_key and sta pointers in the tx control 1906 * info are no longer valid (overwritten by the ath_frame_info data. 1907 */ 1908 1909 q = skb_get_queue_mapping(skb); 1910 spin_lock_bh(&txq->axq_lock); 1911 if (txq == sc->tx.txq_map[q] && 1912 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) { 1913 ieee80211_stop_queue(sc->hw, q); 1914 txq->stopped = 1; 1915 } 1916 spin_unlock_bh(&txq->axq_lock); 1917 1918 ath_tx_start_dma(sc, skb, txctl); 1919 return 0; 1920 } 1921 1922 /*****************/ 1923 /* TX Completion */ 1924 /*****************/ 1925 1926 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 1927 int tx_flags, struct ath_txq *txq) 1928 { 1929 struct ieee80211_hw *hw = sc->hw; 1930 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1931 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1932 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; 1933 int q, padpos, padsize; 1934 1935 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); 1936 1937 if (tx_flags & ATH_TX_BAR) 1938 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 1939 1940 if (!(tx_flags & ATH_TX_ERROR)) 1941 /* Frame was ACKed */ 1942 tx_info->flags |= IEEE80211_TX_STAT_ACK; 1943 1944 padpos = ath9k_cmn_padpos(hdr->frame_control); 1945 padsize = padpos & 3; 1946 if (padsize && skb->len>padpos+padsize) { 1947 /* 1948 * Remove MAC header padding before giving the frame back to 1949 * mac80211. 1950 */ 1951 memmove(skb->data + padsize, skb->data, padpos); 1952 skb_pull(skb, padsize); 1953 } 1954 1955 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) { 1956 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK; 1957 ath_dbg(common, ATH_DBG_PS, 1958 "Going back to sleep after having received TX status (0x%lx)\n", 1959 sc->ps_flags & (PS_WAIT_FOR_BEACON | 1960 PS_WAIT_FOR_CAB | 1961 PS_WAIT_FOR_PSPOLL_DATA | 1962 PS_WAIT_FOR_TX_ACK)); 1963 } 1964 1965 q = skb_get_queue_mapping(skb); 1966 if (txq == sc->tx.txq_map[q]) { 1967 spin_lock_bh(&txq->axq_lock); 1968 if (WARN_ON(--txq->pending_frames < 0)) 1969 txq->pending_frames = 0; 1970 1971 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) { 1972 ieee80211_wake_queue(sc->hw, q); 1973 txq->stopped = 0; 1974 } 1975 spin_unlock_bh(&txq->axq_lock); 1976 } 1977 1978 ieee80211_tx_status(hw, skb); 1979 } 1980 1981 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 1982 struct ath_txq *txq, struct list_head *bf_q, 1983 struct ath_tx_status *ts, int txok, int sendbar) 1984 { 1985 struct sk_buff *skb = bf->bf_mpdu; 1986 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1987 unsigned long flags; 1988 int tx_flags = 0; 1989 1990 if (sendbar) 1991 tx_flags = ATH_TX_BAR; 1992 1993 if (!txok) 1994 tx_flags |= ATH_TX_ERROR; 1995 1996 if (ts->ts_status & ATH9K_TXERR_FILT) 1997 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 1998 1999 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE); 2000 bf->bf_buf_addr = 0; 2001 2002 if (bf->bf_state.bfs_paprd) { 2003 if (time_after(jiffies, 2004 bf->bf_state.bfs_paprd_timestamp + 2005 msecs_to_jiffies(ATH_PAPRD_TIMEOUT))) 2006 dev_kfree_skb_any(skb); 2007 else 2008 complete(&sc->paprd_complete); 2009 } else { 2010 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags); 2011 ath_tx_complete(sc, skb, tx_flags, txq); 2012 } 2013 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't 2014 * accidentally reference it later. 2015 */ 2016 bf->bf_mpdu = NULL; 2017 2018 /* 2019 * Return the list of ath_buf of this mpdu to free queue 2020 */ 2021 spin_lock_irqsave(&sc->tx.txbuflock, flags); 2022 list_splice_tail_init(bf_q, &sc->tx.txbuf); 2023 spin_unlock_irqrestore(&sc->tx.txbuflock, flags); 2024 } 2025 2026 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, 2027 struct ath_tx_status *ts, int nframes, int nbad, 2028 int txok) 2029 { 2030 struct sk_buff *skb = bf->bf_mpdu; 2031 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2032 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 2033 struct ieee80211_hw *hw = sc->hw; 2034 struct ath_hw *ah = sc->sc_ah; 2035 u8 i, tx_rateindex; 2036 2037 if (txok) 2038 tx_info->status.ack_signal = ts->ts_rssi; 2039 2040 tx_rateindex = ts->ts_rateindex; 2041 WARN_ON(tx_rateindex >= hw->max_rates); 2042 2043 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 2044 tx_info->flags |= IEEE80211_TX_STAT_AMPDU; 2045 2046 BUG_ON(nbad > nframes); 2047 } 2048 tx_info->status.ampdu_len = nframes; 2049 tx_info->status.ampdu_ack_len = nframes - nbad; 2050 2051 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && 2052 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) { 2053 /* 2054 * If an underrun error is seen assume it as an excessive 2055 * retry only if max frame trigger level has been reached 2056 * (2 KB for single stream, and 4 KB for dual stream). 2057 * Adjust the long retry as if the frame was tried 2058 * hw->max_rate_tries times to affect how rate control updates 2059 * PER for the failed rate. 2060 * In case of congestion on the bus penalizing this type of 2061 * underruns should help hardware actually transmit new frames 2062 * successfully by eventually preferring slower rates. 2063 * This itself should also alleviate congestion on the bus. 2064 */ 2065 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN | 2066 ATH9K_TX_DELIM_UNDERRUN)) && 2067 ieee80211_is_data(hdr->frame_control) && 2068 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level) 2069 tx_info->status.rates[tx_rateindex].count = 2070 hw->max_rate_tries; 2071 } 2072 2073 for (i = tx_rateindex + 1; i < hw->max_rates; i++) { 2074 tx_info->status.rates[i].count = 0; 2075 tx_info->status.rates[i].idx = -1; 2076 } 2077 2078 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; 2079 } 2080 2081 static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, 2082 struct ath_tx_status *ts, struct ath_buf *bf, 2083 struct list_head *bf_head) 2084 __releases(txq->axq_lock) 2085 __acquires(txq->axq_lock) 2086 { 2087 int txok; 2088 2089 txq->axq_depth--; 2090 txok = !(ts->ts_status & ATH9K_TXERR_MASK); 2091 txq->axq_tx_inprogress = false; 2092 if (bf_is_ampdu_not_probing(bf)) 2093 txq->axq_ampdu_depth--; 2094 2095 spin_unlock_bh(&txq->axq_lock); 2096 2097 if (!bf_isampdu(bf)) { 2098 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); 2099 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0); 2100 } else 2101 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true); 2102 2103 spin_lock_bh(&txq->axq_lock); 2104 2105 if (sc->sc_flags & SC_OP_TXAGGR) 2106 ath_txq_schedule(sc, txq); 2107 } 2108 2109 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 2110 { 2111 struct ath_hw *ah = sc->sc_ah; 2112 struct ath_common *common = ath9k_hw_common(ah); 2113 struct ath_buf *bf, *lastbf, *bf_held = NULL; 2114 struct list_head bf_head; 2115 struct ath_desc *ds; 2116 struct ath_tx_status ts; 2117 int status; 2118 2119 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", 2120 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), 2121 txq->axq_link); 2122 2123 spin_lock_bh(&txq->axq_lock); 2124 for (;;) { 2125 if (work_pending(&sc->hw_reset_work)) 2126 break; 2127 2128 if (list_empty(&txq->axq_q)) { 2129 txq->axq_link = NULL; 2130 if (sc->sc_flags & SC_OP_TXAGGR) 2131 ath_txq_schedule(sc, txq); 2132 break; 2133 } 2134 bf = list_first_entry(&txq->axq_q, struct ath_buf, list); 2135 2136 /* 2137 * There is a race condition that a BH gets scheduled 2138 * after sw writes TxE and before hw re-load the last 2139 * descriptor to get the newly chained one. 2140 * Software must keep the last DONE descriptor as a 2141 * holding descriptor - software does so by marking 2142 * it with the STALE flag. 2143 */ 2144 bf_held = NULL; 2145 if (bf->bf_stale) { 2146 bf_held = bf; 2147 if (list_is_last(&bf_held->list, &txq->axq_q)) 2148 break; 2149 2150 bf = list_entry(bf_held->list.next, struct ath_buf, 2151 list); 2152 } 2153 2154 lastbf = bf->bf_lastbf; 2155 ds = lastbf->bf_desc; 2156 2157 memset(&ts, 0, sizeof(ts)); 2158 status = ath9k_hw_txprocdesc(ah, ds, &ts); 2159 if (status == -EINPROGRESS) 2160 break; 2161 2162 TX_STAT_INC(txq->axq_qnum, txprocdesc); 2163 2164 /* 2165 * Remove ath_buf's of the same transmit unit from txq, 2166 * however leave the last descriptor back as the holding 2167 * descriptor for hw. 2168 */ 2169 lastbf->bf_stale = true; 2170 INIT_LIST_HEAD(&bf_head); 2171 if (!list_is_singular(&lastbf->list)) 2172 list_cut_position(&bf_head, 2173 &txq->axq_q, lastbf->list.prev); 2174 2175 if (bf_held) { 2176 list_del(&bf_held->list); 2177 ath_tx_return_buffer(sc, bf_held); 2178 } 2179 2180 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); 2181 } 2182 spin_unlock_bh(&txq->axq_lock); 2183 } 2184 2185 static void ath_tx_complete_poll_work(struct work_struct *work) 2186 { 2187 struct ath_softc *sc = container_of(work, struct ath_softc, 2188 tx_complete_work.work); 2189 struct ath_txq *txq; 2190 int i; 2191 bool needreset = false; 2192 #ifdef CONFIG_ATH9K_DEBUGFS 2193 sc->tx_complete_poll_work_seen++; 2194 #endif 2195 2196 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 2197 if (ATH_TXQ_SETUP(sc, i)) { 2198 txq = &sc->tx.txq[i]; 2199 spin_lock_bh(&txq->axq_lock); 2200 if (txq->axq_depth) { 2201 if (txq->axq_tx_inprogress) { 2202 needreset = true; 2203 spin_unlock_bh(&txq->axq_lock); 2204 break; 2205 } else { 2206 txq->axq_tx_inprogress = true; 2207 } 2208 } 2209 spin_unlock_bh(&txq->axq_lock); 2210 } 2211 2212 if (needreset) { 2213 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, 2214 "tx hung, resetting the chip\n"); 2215 RESET_STAT_INC(sc, RESET_TYPE_TX_HANG); 2216 ieee80211_queue_work(sc->hw, &sc->hw_reset_work); 2217 } 2218 2219 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 2220 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT)); 2221 } 2222 2223 2224 2225 void ath_tx_tasklet(struct ath_softc *sc) 2226 { 2227 int i; 2228 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1); 2229 2230 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask); 2231 2232 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2233 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) 2234 ath_tx_processq(sc, &sc->tx.txq[i]); 2235 } 2236 } 2237 2238 void ath_tx_edma_tasklet(struct ath_softc *sc) 2239 { 2240 struct ath_tx_status ts; 2241 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2242 struct ath_hw *ah = sc->sc_ah; 2243 struct ath_txq *txq; 2244 struct ath_buf *bf, *lastbf; 2245 struct list_head bf_head; 2246 int status; 2247 2248 for (;;) { 2249 if (work_pending(&sc->hw_reset_work)) 2250 break; 2251 2252 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts); 2253 if (status == -EINPROGRESS) 2254 break; 2255 if (status == -EIO) { 2256 ath_dbg(common, ATH_DBG_XMIT, 2257 "Error processing tx status\n"); 2258 break; 2259 } 2260 2261 /* Skip beacon completions */ 2262 if (ts.qid == sc->beacon.beaconq) 2263 continue; 2264 2265 txq = &sc->tx.txq[ts.qid]; 2266 2267 spin_lock_bh(&txq->axq_lock); 2268 2269 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) { 2270 spin_unlock_bh(&txq->axq_lock); 2271 return; 2272 } 2273 2274 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx], 2275 struct ath_buf, list); 2276 lastbf = bf->bf_lastbf; 2277 2278 INIT_LIST_HEAD(&bf_head); 2279 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx], 2280 &lastbf->list); 2281 2282 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) { 2283 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH); 2284 2285 if (!list_empty(&txq->axq_q)) { 2286 struct list_head bf_q; 2287 2288 INIT_LIST_HEAD(&bf_q); 2289 txq->axq_link = NULL; 2290 list_splice_tail_init(&txq->axq_q, &bf_q); 2291 ath_tx_txqaddbuf(sc, txq, &bf_q, true); 2292 } 2293 } 2294 2295 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); 2296 spin_unlock_bh(&txq->axq_lock); 2297 } 2298 } 2299 2300 /*****************/ 2301 /* Init, Cleanup */ 2302 /*****************/ 2303 2304 static int ath_txstatus_setup(struct ath_softc *sc, int size) 2305 { 2306 struct ath_descdma *dd = &sc->txsdma; 2307 u8 txs_len = sc->sc_ah->caps.txs_len; 2308 2309 dd->dd_desc_len = size * txs_len; 2310 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len, 2311 &dd->dd_desc_paddr, GFP_KERNEL); 2312 if (!dd->dd_desc) 2313 return -ENOMEM; 2314 2315 return 0; 2316 } 2317 2318 static int ath_tx_edma_init(struct ath_softc *sc) 2319 { 2320 int err; 2321 2322 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE); 2323 if (!err) 2324 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc, 2325 sc->txsdma.dd_desc_paddr, 2326 ATH_TXSTATUS_RING_SIZE); 2327 2328 return err; 2329 } 2330 2331 static void ath_tx_edma_cleanup(struct ath_softc *sc) 2332 { 2333 struct ath_descdma *dd = &sc->txsdma; 2334 2335 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, 2336 dd->dd_desc_paddr); 2337 } 2338 2339 int ath_tx_init(struct ath_softc *sc, int nbufs) 2340 { 2341 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2342 int error = 0; 2343 2344 spin_lock_init(&sc->tx.txbuflock); 2345 2346 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, 2347 "tx", nbufs, 1, 1); 2348 if (error != 0) { 2349 ath_err(common, 2350 "Failed to allocate tx descriptors: %d\n", error); 2351 goto err; 2352 } 2353 2354 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf, 2355 "beacon", ATH_BCBUF, 1, 1); 2356 if (error != 0) { 2357 ath_err(common, 2358 "Failed to allocate beacon descriptors: %d\n", error); 2359 goto err; 2360 } 2361 2362 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work); 2363 2364 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 2365 error = ath_tx_edma_init(sc); 2366 if (error) 2367 goto err; 2368 } 2369 2370 err: 2371 if (error != 0) 2372 ath_tx_cleanup(sc); 2373 2374 return error; 2375 } 2376 2377 void ath_tx_cleanup(struct ath_softc *sc) 2378 { 2379 if (sc->beacon.bdma.dd_desc_len != 0) 2380 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf); 2381 2382 if (sc->tx.txdma.dd_desc_len != 0) 2383 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf); 2384 2385 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 2386 ath_tx_edma_cleanup(sc); 2387 } 2388 2389 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) 2390 { 2391 struct ath_atx_tid *tid; 2392 struct ath_atx_ac *ac; 2393 int tidno, acno; 2394 2395 for (tidno = 0, tid = &an->tid[tidno]; 2396 tidno < WME_NUM_TID; 2397 tidno++, tid++) { 2398 tid->an = an; 2399 tid->tidno = tidno; 2400 tid->seq_start = tid->seq_next = 0; 2401 tid->baw_size = WME_MAX_BA; 2402 tid->baw_head = tid->baw_tail = 0; 2403 tid->sched = false; 2404 tid->paused = false; 2405 tid->state &= ~AGGR_CLEANUP; 2406 __skb_queue_head_init(&tid->buf_q); 2407 acno = TID_TO_WME_AC(tidno); 2408 tid->ac = &an->ac[acno]; 2409 tid->state &= ~AGGR_ADDBA_COMPLETE; 2410 tid->state &= ~AGGR_ADDBA_PROGRESS; 2411 } 2412 2413 for (acno = 0, ac = &an->ac[acno]; 2414 acno < WME_NUM_AC; acno++, ac++) { 2415 ac->sched = false; 2416 ac->txq = sc->tx.txq_map[acno]; 2417 INIT_LIST_HEAD(&ac->tid_q); 2418 } 2419 } 2420 2421 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) 2422 { 2423 struct ath_atx_ac *ac; 2424 struct ath_atx_tid *tid; 2425 struct ath_txq *txq; 2426 int tidno; 2427 2428 for (tidno = 0, tid = &an->tid[tidno]; 2429 tidno < WME_NUM_TID; tidno++, tid++) { 2430 2431 ac = tid->ac; 2432 txq = ac->txq; 2433 2434 spin_lock_bh(&txq->axq_lock); 2435 2436 if (tid->sched) { 2437 list_del(&tid->list); 2438 tid->sched = false; 2439 } 2440 2441 if (ac->sched) { 2442 list_del(&ac->list); 2443 tid->ac->sched = false; 2444 } 2445 2446 ath_tid_drain(sc, txq, tid); 2447 tid->state &= ~AGGR_ADDBA_COMPLETE; 2448 tid->state &= ~AGGR_CLEANUP; 2449 2450 spin_unlock_bh(&txq->axq_lock); 2451 } 2452 } 2453