1 /* 2 * Copyright (c) 2008-2009 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "ath9k.h" 18 #include "ar9003_mac.h" 19 20 #define BITS_PER_BYTE 8 21 #define OFDM_PLCP_BITS 22 22 #define HT_RC_2_MCS(_rc) ((_rc) & 0x1f) 23 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1) 24 #define L_STF 8 25 #define L_LTF 8 26 #define L_SIG 4 27 #define HT_SIG 8 28 #define HT_STF 4 29 #define HT_LTF(_ns) (4 * (_ns)) 30 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */ 31 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */ 32 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2) 33 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18) 34 35 #define OFDM_SIFS_TIME 16 36 37 static u16 bits_per_symbol[][2] = { 38 /* 20MHz 40MHz */ 39 { 26, 54 }, /* 0: BPSK */ 40 { 52, 108 }, /* 1: QPSK 1/2 */ 41 { 78, 162 }, /* 2: QPSK 3/4 */ 42 { 104, 216 }, /* 3: 16-QAM 1/2 */ 43 { 156, 324 }, /* 4: 16-QAM 3/4 */ 44 { 208, 432 }, /* 5: 64-QAM 2/3 */ 45 { 234, 486 }, /* 6: 64-QAM 3/4 */ 46 { 260, 540 }, /* 7: 64-QAM 5/6 */ 47 }; 48 49 #define IS_HT_RATE(_rate) ((_rate) & 0x80) 50 51 static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq, 52 struct ath_atx_tid *tid, 53 struct list_head *bf_head); 54 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 55 struct ath_txq *txq, struct list_head *bf_q, 56 struct ath_tx_status *ts, int txok, int sendbar); 57 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 58 struct list_head *head); 59 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf); 60 static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, 61 struct ath_tx_status *ts, int txok); 62 static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, 63 int nbad, int txok, bool update_rc); 64 65 enum { 66 MCS_HT20, 67 MCS_HT20_SGI, 68 MCS_HT40, 69 MCS_HT40_SGI, 70 }; 71 72 static int ath_max_4ms_framelen[4][32] = { 73 [MCS_HT20] = { 74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172, 75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280, 76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532, 77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532, 78 }, 79 [MCS_HT20_SGI] = { 80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744, 81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532, 82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532, 83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532, 84 }, 85 [MCS_HT40] = { 86 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532, 87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532, 88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532, 89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532, 90 }, 91 [MCS_HT40_SGI] = { 92 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532, 93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532, 94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532, 95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532, 96 } 97 }; 98 99 /*********************/ 100 /* Aggregation logic */ 101 /*********************/ 102 103 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) 104 { 105 struct ath_atx_ac *ac = tid->ac; 106 107 if (tid->paused) 108 return; 109 110 if (tid->sched) 111 return; 112 113 tid->sched = true; 114 list_add_tail(&tid->list, &ac->tid_q); 115 116 if (ac->sched) 117 return; 118 119 ac->sched = true; 120 list_add_tail(&ac->list, &txq->axq_acq); 121 } 122 123 static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 124 { 125 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 126 127 spin_lock_bh(&txq->axq_lock); 128 tid->paused++; 129 spin_unlock_bh(&txq->axq_lock); 130 } 131 132 static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 133 { 134 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 135 136 BUG_ON(tid->paused <= 0); 137 spin_lock_bh(&txq->axq_lock); 138 139 tid->paused--; 140 141 if (tid->paused > 0) 142 goto unlock; 143 144 if (list_empty(&tid->buf_q)) 145 goto unlock; 146 147 ath_tx_queue_tid(txq, tid); 148 ath_txq_schedule(sc, txq); 149 unlock: 150 spin_unlock_bh(&txq->axq_lock); 151 } 152 153 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 154 { 155 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 156 struct ath_buf *bf; 157 struct list_head bf_head; 158 INIT_LIST_HEAD(&bf_head); 159 160 BUG_ON(tid->paused <= 0); 161 spin_lock_bh(&txq->axq_lock); 162 163 tid->paused--; 164 165 if (tid->paused > 0) { 166 spin_unlock_bh(&txq->axq_lock); 167 return; 168 } 169 170 while (!list_empty(&tid->buf_q)) { 171 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 172 BUG_ON(bf_isretried(bf)); 173 list_move_tail(&bf->list, &bf_head); 174 ath_tx_send_ht_normal(sc, txq, tid, &bf_head); 175 } 176 177 spin_unlock_bh(&txq->axq_lock); 178 } 179 180 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 181 int seqno) 182 { 183 int index, cindex; 184 185 index = ATH_BA_INDEX(tid->seq_start, seqno); 186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 187 188 tid->tx_buf[cindex] = NULL; 189 190 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) { 191 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 192 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 193 } 194 } 195 196 static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 197 struct ath_buf *bf) 198 { 199 int index, cindex; 200 201 if (bf_isretried(bf)) 202 return; 203 204 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno); 205 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 206 207 BUG_ON(tid->tx_buf[cindex] != NULL); 208 tid->tx_buf[cindex] = bf; 209 210 if (index >= ((tid->baw_tail - tid->baw_head) & 211 (ATH_TID_MAX_BUFS - 1))) { 212 tid->baw_tail = cindex; 213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 214 } 215 } 216 217 /* 218 * TODO: For frame(s) that are in the retry state, we will reuse the 219 * sequence number(s) without setting the retry bit. The 220 * alternative is to give up on these and BAR the receiver's window 221 * forward. 222 */ 223 static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, 224 struct ath_atx_tid *tid) 225 226 { 227 struct ath_buf *bf; 228 struct list_head bf_head; 229 struct ath_tx_status ts; 230 231 memset(&ts, 0, sizeof(ts)); 232 INIT_LIST_HEAD(&bf_head); 233 234 for (;;) { 235 if (list_empty(&tid->buf_q)) 236 break; 237 238 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 239 list_move_tail(&bf->list, &bf_head); 240 241 if (bf_isretried(bf)) 242 ath_tx_update_baw(sc, tid, bf->bf_seqno); 243 244 spin_unlock(&txq->axq_lock); 245 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 246 spin_lock(&txq->axq_lock); 247 } 248 249 tid->seq_next = tid->seq_start; 250 tid->baw_tail = tid->baw_head; 251 } 252 253 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, 254 struct ath_buf *bf) 255 { 256 struct sk_buff *skb; 257 struct ieee80211_hdr *hdr; 258 259 bf->bf_state.bf_type |= BUF_RETRY; 260 bf->bf_retries++; 261 TX_STAT_INC(txq->axq_qnum, a_retries); 262 263 skb = bf->bf_mpdu; 264 hdr = (struct ieee80211_hdr *)skb->data; 265 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); 266 } 267 268 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc) 269 { 270 struct ath_buf *bf = NULL; 271 272 spin_lock_bh(&sc->tx.txbuflock); 273 274 if (unlikely(list_empty(&sc->tx.txbuf))) { 275 spin_unlock_bh(&sc->tx.txbuflock); 276 return NULL; 277 } 278 279 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); 280 list_del(&bf->list); 281 282 spin_unlock_bh(&sc->tx.txbuflock); 283 284 return bf; 285 } 286 287 static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf) 288 { 289 spin_lock_bh(&sc->tx.txbuflock); 290 list_add_tail(&bf->list, &sc->tx.txbuf); 291 spin_unlock_bh(&sc->tx.txbuflock); 292 } 293 294 static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf) 295 { 296 struct ath_buf *tbf; 297 298 tbf = ath_tx_get_buffer(sc); 299 if (WARN_ON(!tbf)) 300 return NULL; 301 302 ATH_TXBUF_RESET(tbf); 303 304 tbf->aphy = bf->aphy; 305 tbf->bf_mpdu = bf->bf_mpdu; 306 tbf->bf_buf_addr = bf->bf_buf_addr; 307 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); 308 tbf->bf_state = bf->bf_state; 309 tbf->bf_dmacontext = bf->bf_dmacontext; 310 311 return tbf; 312 } 313 314 static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, 315 struct ath_buf *bf, struct list_head *bf_q, 316 struct ath_tx_status *ts, int txok) 317 { 318 struct ath_node *an = NULL; 319 struct sk_buff *skb; 320 struct ieee80211_sta *sta; 321 struct ieee80211_hw *hw; 322 struct ieee80211_hdr *hdr; 323 struct ieee80211_tx_info *tx_info; 324 struct ath_atx_tid *tid = NULL; 325 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; 326 struct list_head bf_head, bf_pending; 327 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0; 328 u32 ba[WME_BA_BMP_SIZE >> 5]; 329 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 330 bool rc_update = true; 331 332 skb = bf->bf_mpdu; 333 hdr = (struct ieee80211_hdr *)skb->data; 334 335 tx_info = IEEE80211_SKB_CB(skb); 336 hw = bf->aphy->hw; 337 338 rcu_read_lock(); 339 340 /* XXX: use ieee80211_find_sta! */ 341 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1); 342 if (!sta) { 343 rcu_read_unlock(); 344 return; 345 } 346 347 an = (struct ath_node *)sta->drv_priv; 348 tid = ATH_AN_2_TID(an, bf->bf_tidno); 349 350 isaggr = bf_isaggr(bf); 351 memset(ba, 0, WME_BA_BMP_SIZE >> 3); 352 353 if (isaggr && txok) { 354 if (ts->ts_flags & ATH9K_TX_BA) { 355 seq_st = ts->ts_seqnum; 356 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); 357 } else { 358 /* 359 * AR5416 can become deaf/mute when BA 360 * issue happens. Chip needs to be reset. 361 * But AP code may have sychronization issues 362 * when perform internal reset in this routine. 363 * Only enable reset in STA mode for now. 364 */ 365 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) 366 needreset = 1; 367 } 368 } 369 370 INIT_LIST_HEAD(&bf_pending); 371 INIT_LIST_HEAD(&bf_head); 372 373 nbad = ath_tx_num_badfrms(sc, bf, ts, txok); 374 while (bf) { 375 txfail = txpending = 0; 376 bf_next = bf->bf_next; 377 378 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) { 379 /* transmit completion, subframe is 380 * acked by block ack */ 381 acked_cnt++; 382 } else if (!isaggr && txok) { 383 /* transmit completion */ 384 acked_cnt++; 385 } else { 386 if (!(tid->state & AGGR_CLEANUP) && 387 !bf_last->bf_tx_aborted) { 388 if (bf->bf_retries < ATH_MAX_SW_RETRIES) { 389 ath_tx_set_retry(sc, txq, bf); 390 txpending = 1; 391 } else { 392 bf->bf_state.bf_type |= BUF_XRETRY; 393 txfail = 1; 394 sendbar = 1; 395 txfail_cnt++; 396 } 397 } else { 398 /* 399 * cleanup in progress, just fail 400 * the un-acked sub-frames 401 */ 402 txfail = 1; 403 } 404 } 405 406 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) && 407 bf_next == NULL) { 408 /* 409 * Make sure the last desc is reclaimed if it 410 * not a holding desc. 411 */ 412 if (!bf_last->bf_stale) 413 list_move_tail(&bf->list, &bf_head); 414 else 415 INIT_LIST_HEAD(&bf_head); 416 } else { 417 BUG_ON(list_empty(bf_q)); 418 list_move_tail(&bf->list, &bf_head); 419 } 420 421 if (!txpending) { 422 /* 423 * complete the acked-ones/xretried ones; update 424 * block-ack window 425 */ 426 spin_lock_bh(&txq->axq_lock); 427 ath_tx_update_baw(sc, tid, bf->bf_seqno); 428 spin_unlock_bh(&txq->axq_lock); 429 430 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 431 ath_tx_rc_status(bf, ts, nbad, txok, true); 432 rc_update = false; 433 } else { 434 ath_tx_rc_status(bf, ts, nbad, txok, false); 435 } 436 437 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 438 !txfail, sendbar); 439 } else { 440 /* retry the un-acked ones */ 441 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) { 442 if (bf->bf_next == NULL && bf_last->bf_stale) { 443 struct ath_buf *tbf; 444 445 tbf = ath_clone_txbuf(sc, bf_last); 446 /* 447 * Update tx baw and complete the 448 * frame with failed status if we 449 * run out of tx buf. 450 */ 451 if (!tbf) { 452 spin_lock_bh(&txq->axq_lock); 453 ath_tx_update_baw(sc, tid, 454 bf->bf_seqno); 455 spin_unlock_bh(&txq->axq_lock); 456 457 bf->bf_state.bf_type |= 458 BUF_XRETRY; 459 ath_tx_rc_status(bf, ts, nbad, 460 0, false); 461 ath_tx_complete_buf(sc, bf, txq, 462 &bf_head, 463 ts, 0, 0); 464 break; 465 } 466 467 ath9k_hw_cleartxdesc(sc->sc_ah, 468 tbf->bf_desc); 469 list_add_tail(&tbf->list, &bf_head); 470 } else { 471 /* 472 * Clear descriptor status words for 473 * software retry 474 */ 475 ath9k_hw_cleartxdesc(sc->sc_ah, 476 bf->bf_desc); 477 } 478 } 479 480 /* 481 * Put this buffer to the temporary pending 482 * queue to retain ordering 483 */ 484 list_splice_tail_init(&bf_head, &bf_pending); 485 } 486 487 bf = bf_next; 488 } 489 490 if (tid->state & AGGR_CLEANUP) { 491 if (tid->baw_head == tid->baw_tail) { 492 tid->state &= ~AGGR_ADDBA_COMPLETE; 493 tid->state &= ~AGGR_CLEANUP; 494 495 /* send buffered frames as singles */ 496 ath_tx_flush_tid(sc, tid); 497 } 498 rcu_read_unlock(); 499 return; 500 } 501 502 /* prepend un-acked frames to the beginning of the pending frame queue */ 503 if (!list_empty(&bf_pending)) { 504 spin_lock_bh(&txq->axq_lock); 505 list_splice(&bf_pending, &tid->buf_q); 506 ath_tx_queue_tid(txq, tid); 507 spin_unlock_bh(&txq->axq_lock); 508 } 509 510 rcu_read_unlock(); 511 512 if (needreset) 513 ath_reset(sc, false); 514 } 515 516 static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, 517 struct ath_atx_tid *tid) 518 { 519 struct sk_buff *skb; 520 struct ieee80211_tx_info *tx_info; 521 struct ieee80211_tx_rate *rates; 522 u32 max_4ms_framelen, frmlen; 523 u16 aggr_limit, legacy = 0; 524 int i; 525 526 skb = bf->bf_mpdu; 527 tx_info = IEEE80211_SKB_CB(skb); 528 rates = tx_info->control.rates; 529 530 /* 531 * Find the lowest frame length among the rate series that will have a 532 * 4ms transmit duration. 533 * TODO - TXOP limit needs to be considered. 534 */ 535 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX; 536 537 for (i = 0; i < 4; i++) { 538 if (rates[i].count) { 539 int modeidx; 540 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) { 541 legacy = 1; 542 break; 543 } 544 545 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 546 modeidx = MCS_HT40; 547 else 548 modeidx = MCS_HT20; 549 550 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 551 modeidx++; 552 553 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx]; 554 max_4ms_framelen = min(max_4ms_framelen, frmlen); 555 } 556 } 557 558 /* 559 * limit aggregate size by the minimum rate if rate selected is 560 * not a probe rate, if rate selected is a probe rate then 561 * avoid aggregation of this packet. 562 */ 563 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) 564 return 0; 565 566 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED) 567 aggr_limit = min((max_4ms_framelen * 3) / 8, 568 (u32)ATH_AMPDU_LIMIT_MAX); 569 else 570 aggr_limit = min(max_4ms_framelen, 571 (u32)ATH_AMPDU_LIMIT_MAX); 572 573 /* 574 * h/w can accept aggregates upto 16 bit lengths (65535). 575 * The IE, however can hold upto 65536, which shows up here 576 * as zero. Ignore 65536 since we are constrained by hw. 577 */ 578 if (tid->an->maxampdu) 579 aggr_limit = min(aggr_limit, tid->an->maxampdu); 580 581 return aggr_limit; 582 } 583 584 /* 585 * Returns the number of delimiters to be added to 586 * meet the minimum required mpdudensity. 587 */ 588 static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, 589 struct ath_buf *bf, u16 frmlen) 590 { 591 struct sk_buff *skb = bf->bf_mpdu; 592 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 593 u32 nsymbits, nsymbols; 594 u16 minlen; 595 u8 flags, rix; 596 int width, streams, half_gi, ndelim, mindelim; 597 598 /* Select standard number of delimiters based on frame length alone */ 599 ndelim = ATH_AGGR_GET_NDELIM(frmlen); 600 601 /* 602 * If encryption enabled, hardware requires some more padding between 603 * subframes. 604 * TODO - this could be improved to be dependent on the rate. 605 * The hardware can keep up at lower rates, but not higher rates 606 */ 607 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) 608 ndelim += ATH_AGGR_ENCRYPTDELIM; 609 610 /* 611 * Convert desired mpdu density from microeconds to bytes based 612 * on highest rate in rate series (i.e. first rate) to determine 613 * required minimum length for subframe. Take into account 614 * whether high rate is 20 or 40Mhz and half or full GI. 615 * 616 * If there is no mpdu density restriction, no further calculation 617 * is needed. 618 */ 619 620 if (tid->an->mpdudensity == 0) 621 return ndelim; 622 623 rix = tx_info->control.rates[0].idx; 624 flags = tx_info->control.rates[0].flags; 625 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0; 626 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0; 627 628 if (half_gi) 629 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity); 630 else 631 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity); 632 633 if (nsymbols == 0) 634 nsymbols = 1; 635 636 streams = HT_RC_2_STREAMS(rix); 637 nsymbits = bits_per_symbol[rix % 8][width] * streams; 638 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE; 639 640 if (frmlen < minlen) { 641 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ; 642 ndelim = max(mindelim, ndelim); 643 } 644 645 return ndelim; 646 } 647 648 static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, 649 struct ath_txq *txq, 650 struct ath_atx_tid *tid, 651 struct list_head *bf_q) 652 { 653 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) 654 struct ath_buf *bf, *bf_first, *bf_prev = NULL; 655 int rl = 0, nframes = 0, ndelim, prev_al = 0; 656 u16 aggr_limit = 0, al = 0, bpad = 0, 657 al_delta, h_baw = tid->baw_size / 2; 658 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; 659 660 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list); 661 662 do { 663 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 664 665 /* do not step over block-ack window */ 666 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) { 667 status = ATH_AGGR_BAW_CLOSED; 668 break; 669 } 670 671 if (!rl) { 672 aggr_limit = ath_lookup_rate(sc, bf, tid); 673 rl = 1; 674 } 675 676 /* do not exceed aggregation limit */ 677 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen; 678 679 if (nframes && 680 (aggr_limit < (al + bpad + al_delta + prev_al))) { 681 status = ATH_AGGR_LIMITED; 682 break; 683 } 684 685 /* do not exceed subframe limit */ 686 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) { 687 status = ATH_AGGR_LIMITED; 688 break; 689 } 690 nframes++; 691 692 /* add padding for previous frame to aggregation length */ 693 al += bpad + al_delta; 694 695 /* 696 * Get the delimiters needed to meet the MPDU 697 * density for this node. 698 */ 699 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen); 700 bpad = PADBYTES(al_delta) + (ndelim << 2); 701 702 bf->bf_next = NULL; 703 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0); 704 705 /* link buffers of this frame to the aggregate */ 706 ath_tx_addto_baw(sc, tid, bf); 707 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim); 708 list_move_tail(&bf->list, bf_q); 709 if (bf_prev) { 710 bf_prev->bf_next = bf; 711 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc, 712 bf->bf_daddr); 713 } 714 bf_prev = bf; 715 716 } while (!list_empty(&tid->buf_q)); 717 718 bf_first->bf_al = al; 719 bf_first->bf_nframes = nframes; 720 721 return status; 722 #undef PADBYTES 723 } 724 725 static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, 726 struct ath_atx_tid *tid) 727 { 728 struct ath_buf *bf; 729 enum ATH_AGGR_STATUS status; 730 struct list_head bf_q; 731 732 do { 733 if (list_empty(&tid->buf_q)) 734 return; 735 736 INIT_LIST_HEAD(&bf_q); 737 738 status = ath_tx_form_aggr(sc, txq, tid, &bf_q); 739 740 /* 741 * no frames picked up to be aggregated; 742 * block-ack window is not open. 743 */ 744 if (list_empty(&bf_q)) 745 break; 746 747 bf = list_first_entry(&bf_q, struct ath_buf, list); 748 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list); 749 750 /* if only one frame, send as non-aggregate */ 751 if (bf->bf_nframes == 1) { 752 bf->bf_state.bf_type &= ~BUF_AGGR; 753 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc); 754 ath_buf_set_rate(sc, bf); 755 ath_tx_txqaddbuf(sc, txq, &bf_q); 756 continue; 757 } 758 759 /* setup first desc of aggregate */ 760 bf->bf_state.bf_type |= BUF_AGGR; 761 ath_buf_set_rate(sc, bf); 762 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al); 763 764 /* anchor last desc of aggregate */ 765 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc); 766 767 ath_tx_txqaddbuf(sc, txq, &bf_q); 768 TX_STAT_INC(txq->axq_qnum, a_aggr); 769 770 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH && 771 status != ATH_AGGR_BAW_CLOSED); 772 } 773 774 void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 775 u16 tid, u16 *ssn) 776 { 777 struct ath_atx_tid *txtid; 778 struct ath_node *an; 779 780 an = (struct ath_node *)sta->drv_priv; 781 txtid = ATH_AN_2_TID(an, tid); 782 txtid->state |= AGGR_ADDBA_PROGRESS; 783 ath_tx_pause_tid(sc, txtid); 784 *ssn = txtid->seq_start; 785 } 786 787 void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 788 { 789 struct ath_node *an = (struct ath_node *)sta->drv_priv; 790 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 791 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum]; 792 struct ath_tx_status ts; 793 struct ath_buf *bf; 794 struct list_head bf_head; 795 796 memset(&ts, 0, sizeof(ts)); 797 INIT_LIST_HEAD(&bf_head); 798 799 if (txtid->state & AGGR_CLEANUP) 800 return; 801 802 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { 803 txtid->state &= ~AGGR_ADDBA_PROGRESS; 804 return; 805 } 806 807 ath_tx_pause_tid(sc, txtid); 808 809 /* drop all software retried frames and mark this TID */ 810 spin_lock_bh(&txq->axq_lock); 811 while (!list_empty(&txtid->buf_q)) { 812 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list); 813 if (!bf_isretried(bf)) { 814 /* 815 * NB: it's based on the assumption that 816 * software retried frame will always stay 817 * at the head of software queue. 818 */ 819 break; 820 } 821 list_move_tail(&bf->list, &bf_head); 822 ath_tx_update_baw(sc, txtid, bf->bf_seqno); 823 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 824 } 825 spin_unlock_bh(&txq->axq_lock); 826 827 if (txtid->baw_head != txtid->baw_tail) { 828 txtid->state |= AGGR_CLEANUP; 829 } else { 830 txtid->state &= ~AGGR_ADDBA_COMPLETE; 831 ath_tx_flush_tid(sc, txtid); 832 } 833 } 834 835 void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 836 { 837 struct ath_atx_tid *txtid; 838 struct ath_node *an; 839 840 an = (struct ath_node *)sta->drv_priv; 841 842 if (sc->sc_flags & SC_OP_TXAGGR) { 843 txtid = ATH_AN_2_TID(an, tid); 844 txtid->baw_size = 845 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; 846 txtid->state |= AGGR_ADDBA_COMPLETE; 847 txtid->state &= ~AGGR_ADDBA_PROGRESS; 848 ath_tx_resume_tid(sc, txtid); 849 } 850 } 851 852 bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno) 853 { 854 struct ath_atx_tid *txtid; 855 856 if (!(sc->sc_flags & SC_OP_TXAGGR)) 857 return false; 858 859 txtid = ATH_AN_2_TID(an, tidno); 860 861 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS))) 862 return true; 863 return false; 864 } 865 866 /********************/ 867 /* Queue Management */ 868 /********************/ 869 870 static void ath_txq_drain_pending_buffers(struct ath_softc *sc, 871 struct ath_txq *txq) 872 { 873 struct ath_atx_ac *ac, *ac_tmp; 874 struct ath_atx_tid *tid, *tid_tmp; 875 876 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { 877 list_del(&ac->list); 878 ac->sched = false; 879 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) { 880 list_del(&tid->list); 881 tid->sched = false; 882 ath_tid_drain(sc, txq, tid); 883 } 884 } 885 } 886 887 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 888 { 889 struct ath_hw *ah = sc->sc_ah; 890 struct ath_common *common = ath9k_hw_common(ah); 891 struct ath9k_tx_queue_info qi; 892 int qnum, i; 893 894 memset(&qi, 0, sizeof(qi)); 895 qi.tqi_subtype = subtype; 896 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; 897 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; 898 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; 899 qi.tqi_physCompBuf = 0; 900 901 /* 902 * Enable interrupts only for EOL and DESC conditions. 903 * We mark tx descriptors to receive a DESC interrupt 904 * when a tx queue gets deep; otherwise waiting for the 905 * EOL to reap descriptors. Note that this is done to 906 * reduce interrupt load and this only defers reaping 907 * descriptors, never transmitting frames. Aside from 908 * reducing interrupts this also permits more concurrency. 909 * The only potential downside is if the tx queue backs 910 * up in which case the top half of the kernel may backup 911 * due to a lack of tx descriptors. 912 * 913 * The UAPSD queue is an exception, since we take a desc- 914 * based intr on the EOSP frames. 915 */ 916 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 917 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE | 918 TXQ_FLAG_TXERRINT_ENABLE; 919 } else { 920 if (qtype == ATH9K_TX_QUEUE_UAPSD) 921 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE; 922 else 923 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | 924 TXQ_FLAG_TXDESCINT_ENABLE; 925 } 926 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); 927 if (qnum == -1) { 928 /* 929 * NB: don't print a message, this happens 930 * normally on parts with too few tx queues 931 */ 932 return NULL; 933 } 934 if (qnum >= ARRAY_SIZE(sc->tx.txq)) { 935 ath_print(common, ATH_DBG_FATAL, 936 "qnum %u out of range, max %u!\n", 937 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq)); 938 ath9k_hw_releasetxqueue(ah, qnum); 939 return NULL; 940 } 941 if (!ATH_TXQ_SETUP(sc, qnum)) { 942 struct ath_txq *txq = &sc->tx.txq[qnum]; 943 944 txq->axq_qnum = qnum; 945 txq->axq_link = NULL; 946 INIT_LIST_HEAD(&txq->axq_q); 947 INIT_LIST_HEAD(&txq->axq_acq); 948 spin_lock_init(&txq->axq_lock); 949 txq->axq_depth = 0; 950 txq->axq_tx_inprogress = false; 951 sc->tx.txqsetup |= 1<<qnum; 952 953 txq->txq_headidx = txq->txq_tailidx = 0; 954 for (i = 0; i < ATH_TXFIFO_DEPTH; i++) 955 INIT_LIST_HEAD(&txq->txq_fifo[i]); 956 INIT_LIST_HEAD(&txq->txq_fifo_pending); 957 } 958 return &sc->tx.txq[qnum]; 959 } 960 961 int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype) 962 { 963 int qnum; 964 965 switch (qtype) { 966 case ATH9K_TX_QUEUE_DATA: 967 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) { 968 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 969 "HAL AC %u out of range, max %zu!\n", 970 haltype, ARRAY_SIZE(sc->tx.hwq_map)); 971 return -1; 972 } 973 qnum = sc->tx.hwq_map[haltype]; 974 break; 975 case ATH9K_TX_QUEUE_BEACON: 976 qnum = sc->beacon.beaconq; 977 break; 978 case ATH9K_TX_QUEUE_CAB: 979 qnum = sc->beacon.cabq->axq_qnum; 980 break; 981 default: 982 qnum = -1; 983 } 984 return qnum; 985 } 986 987 struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb) 988 { 989 struct ath_txq *txq = NULL; 990 u16 skb_queue = skb_get_queue_mapping(skb); 991 int qnum; 992 993 qnum = ath_get_hal_qnum(skb_queue, sc); 994 txq = &sc->tx.txq[qnum]; 995 996 spin_lock_bh(&txq->axq_lock); 997 998 if (txq->axq_depth >= (ATH_TXBUF - 20)) { 999 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_XMIT, 1000 "TX queue: %d is full, depth: %d\n", 1001 qnum, txq->axq_depth); 1002 ath_mac80211_stop_queue(sc, skb_queue); 1003 txq->stopped = 1; 1004 spin_unlock_bh(&txq->axq_lock); 1005 return NULL; 1006 } 1007 1008 spin_unlock_bh(&txq->axq_lock); 1009 1010 return txq; 1011 } 1012 1013 int ath_txq_update(struct ath_softc *sc, int qnum, 1014 struct ath9k_tx_queue_info *qinfo) 1015 { 1016 struct ath_hw *ah = sc->sc_ah; 1017 int error = 0; 1018 struct ath9k_tx_queue_info qi; 1019 1020 if (qnum == sc->beacon.beaconq) { 1021 /* 1022 * XXX: for beacon queue, we just save the parameter. 1023 * It will be picked up by ath_beaconq_config when 1024 * it's necessary. 1025 */ 1026 sc->beacon.beacon_qi = *qinfo; 1027 return 0; 1028 } 1029 1030 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum); 1031 1032 ath9k_hw_get_txq_props(ah, qnum, &qi); 1033 qi.tqi_aifs = qinfo->tqi_aifs; 1034 qi.tqi_cwmin = qinfo->tqi_cwmin; 1035 qi.tqi_cwmax = qinfo->tqi_cwmax; 1036 qi.tqi_burstTime = qinfo->tqi_burstTime; 1037 qi.tqi_readyTime = qinfo->tqi_readyTime; 1038 1039 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) { 1040 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 1041 "Unable to update hardware queue %u!\n", qnum); 1042 error = -EIO; 1043 } else { 1044 ath9k_hw_resettxqueue(ah, qnum); 1045 } 1046 1047 return error; 1048 } 1049 1050 int ath_cabq_update(struct ath_softc *sc) 1051 { 1052 struct ath9k_tx_queue_info qi; 1053 int qnum = sc->beacon.cabq->axq_qnum; 1054 1055 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); 1056 /* 1057 * Ensure the readytime % is within the bounds. 1058 */ 1059 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND) 1060 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND; 1061 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND) 1062 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND; 1063 1064 qi.tqi_readyTime = (sc->beacon_interval * 1065 sc->config.cabqReadytime) / 100; 1066 ath_txq_update(sc, qnum, &qi); 1067 1068 return 0; 1069 } 1070 1071 /* 1072 * Drain a given TX queue (could be Beacon or Data) 1073 * 1074 * This assumes output has been stopped and 1075 * we do not need to block ath_tx_tasklet. 1076 */ 1077 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) 1078 { 1079 struct ath_buf *bf, *lastbf; 1080 struct list_head bf_head; 1081 struct ath_tx_status ts; 1082 1083 memset(&ts, 0, sizeof(ts)); 1084 INIT_LIST_HEAD(&bf_head); 1085 1086 for (;;) { 1087 spin_lock_bh(&txq->axq_lock); 1088 1089 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1090 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) { 1091 txq->txq_headidx = txq->txq_tailidx = 0; 1092 spin_unlock_bh(&txq->axq_lock); 1093 break; 1094 } else { 1095 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx], 1096 struct ath_buf, list); 1097 } 1098 } else { 1099 if (list_empty(&txq->axq_q)) { 1100 txq->axq_link = NULL; 1101 spin_unlock_bh(&txq->axq_lock); 1102 break; 1103 } 1104 bf = list_first_entry(&txq->axq_q, struct ath_buf, 1105 list); 1106 1107 if (bf->bf_stale) { 1108 list_del(&bf->list); 1109 spin_unlock_bh(&txq->axq_lock); 1110 1111 ath_tx_return_buffer(sc, bf); 1112 continue; 1113 } 1114 } 1115 1116 lastbf = bf->bf_lastbf; 1117 if (!retry_tx) 1118 lastbf->bf_tx_aborted = true; 1119 1120 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1121 list_cut_position(&bf_head, 1122 &txq->txq_fifo[txq->txq_tailidx], 1123 &lastbf->list); 1124 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH); 1125 } else { 1126 /* remove ath_buf's of the same mpdu from txq */ 1127 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list); 1128 } 1129 1130 txq->axq_depth--; 1131 1132 spin_unlock_bh(&txq->axq_lock); 1133 1134 if (bf_isampdu(bf)) 1135 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0); 1136 else 1137 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 1138 } 1139 1140 spin_lock_bh(&txq->axq_lock); 1141 txq->axq_tx_inprogress = false; 1142 spin_unlock_bh(&txq->axq_lock); 1143 1144 /* flush any pending frames if aggregation is enabled */ 1145 if (sc->sc_flags & SC_OP_TXAGGR) { 1146 if (!retry_tx) { 1147 spin_lock_bh(&txq->axq_lock); 1148 ath_txq_drain_pending_buffers(sc, txq); 1149 spin_unlock_bh(&txq->axq_lock); 1150 } 1151 } 1152 1153 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1154 spin_lock_bh(&txq->axq_lock); 1155 while (!list_empty(&txq->txq_fifo_pending)) { 1156 bf = list_first_entry(&txq->txq_fifo_pending, 1157 struct ath_buf, list); 1158 list_cut_position(&bf_head, 1159 &txq->txq_fifo_pending, 1160 &bf->bf_lastbf->list); 1161 spin_unlock_bh(&txq->axq_lock); 1162 1163 if (bf_isampdu(bf)) 1164 ath_tx_complete_aggr(sc, txq, bf, &bf_head, 1165 &ts, 0); 1166 else 1167 ath_tx_complete_buf(sc, bf, txq, &bf_head, 1168 &ts, 0, 0); 1169 spin_lock_bh(&txq->axq_lock); 1170 } 1171 spin_unlock_bh(&txq->axq_lock); 1172 } 1173 } 1174 1175 void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) 1176 { 1177 struct ath_hw *ah = sc->sc_ah; 1178 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1179 struct ath_txq *txq; 1180 int i, npend = 0; 1181 1182 if (sc->sc_flags & SC_OP_INVALID) 1183 return; 1184 1185 /* Stop beacon queue */ 1186 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 1187 1188 /* Stop data queues */ 1189 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1190 if (ATH_TXQ_SETUP(sc, i)) { 1191 txq = &sc->tx.txq[i]; 1192 ath9k_hw_stoptxdma(ah, txq->axq_qnum); 1193 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum); 1194 } 1195 } 1196 1197 if (npend) { 1198 int r; 1199 1200 ath_print(common, ATH_DBG_FATAL, 1201 "Failed to stop TX DMA. Resetting hardware!\n"); 1202 1203 spin_lock_bh(&sc->sc_resetlock); 1204 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false); 1205 if (r) 1206 ath_print(common, ATH_DBG_FATAL, 1207 "Unable to reset hardware; reset status %d\n", 1208 r); 1209 spin_unlock_bh(&sc->sc_resetlock); 1210 } 1211 1212 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1213 if (ATH_TXQ_SETUP(sc, i)) 1214 ath_draintxq(sc, &sc->tx.txq[i], retry_tx); 1215 } 1216 } 1217 1218 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 1219 { 1220 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum); 1221 sc->tx.txqsetup &= ~(1<<txq->axq_qnum); 1222 } 1223 1224 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) 1225 { 1226 struct ath_atx_ac *ac; 1227 struct ath_atx_tid *tid; 1228 1229 if (list_empty(&txq->axq_acq)) 1230 return; 1231 1232 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); 1233 list_del(&ac->list); 1234 ac->sched = false; 1235 1236 do { 1237 if (list_empty(&ac->tid_q)) 1238 return; 1239 1240 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list); 1241 list_del(&tid->list); 1242 tid->sched = false; 1243 1244 if (tid->paused) 1245 continue; 1246 1247 ath_tx_sched_aggr(sc, txq, tid); 1248 1249 /* 1250 * add tid to round-robin queue if more frames 1251 * are pending for the tid 1252 */ 1253 if (!list_empty(&tid->buf_q)) 1254 ath_tx_queue_tid(txq, tid); 1255 1256 break; 1257 } while (!list_empty(&ac->tid_q)); 1258 1259 if (!list_empty(&ac->tid_q)) { 1260 if (!ac->sched) { 1261 ac->sched = true; 1262 list_add_tail(&ac->list, &txq->axq_acq); 1263 } 1264 } 1265 } 1266 1267 int ath_tx_setup(struct ath_softc *sc, int haltype) 1268 { 1269 struct ath_txq *txq; 1270 1271 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) { 1272 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 1273 "HAL AC %u out of range, max %zu!\n", 1274 haltype, ARRAY_SIZE(sc->tx.hwq_map)); 1275 return 0; 1276 } 1277 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype); 1278 if (txq != NULL) { 1279 sc->tx.hwq_map[haltype] = txq->axq_qnum; 1280 return 1; 1281 } else 1282 return 0; 1283 } 1284 1285 /***********/ 1286 /* TX, DMA */ 1287 /***********/ 1288 1289 /* 1290 * Insert a chain of ath_buf (descriptors) on a txq and 1291 * assume the descriptors are already chained together by caller. 1292 */ 1293 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 1294 struct list_head *head) 1295 { 1296 struct ath_hw *ah = sc->sc_ah; 1297 struct ath_common *common = ath9k_hw_common(ah); 1298 struct ath_buf *bf; 1299 1300 /* 1301 * Insert the frame on the outbound list and 1302 * pass it on to the hardware. 1303 */ 1304 1305 if (list_empty(head)) 1306 return; 1307 1308 bf = list_first_entry(head, struct ath_buf, list); 1309 1310 ath_print(common, ATH_DBG_QUEUE, 1311 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth); 1312 1313 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1314 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) { 1315 list_splice_tail_init(head, &txq->txq_fifo_pending); 1316 return; 1317 } 1318 if (!list_empty(&txq->txq_fifo[txq->txq_headidx])) 1319 ath_print(common, ATH_DBG_XMIT, 1320 "Initializing tx fifo %d which " 1321 "is non-empty\n", 1322 txq->txq_headidx); 1323 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]); 1324 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]); 1325 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH); 1326 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1327 ath_print(common, ATH_DBG_XMIT, 1328 "TXDP[%u] = %llx (%p)\n", 1329 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); 1330 } else { 1331 list_splice_tail_init(head, &txq->axq_q); 1332 1333 if (txq->axq_link == NULL) { 1334 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1335 ath_print(common, ATH_DBG_XMIT, 1336 "TXDP[%u] = %llx (%p)\n", 1337 txq->axq_qnum, ito64(bf->bf_daddr), 1338 bf->bf_desc); 1339 } else { 1340 *txq->axq_link = bf->bf_daddr; 1341 ath_print(common, ATH_DBG_XMIT, 1342 "link[%u] (%p)=%llx (%p)\n", 1343 txq->axq_qnum, txq->axq_link, 1344 ito64(bf->bf_daddr), bf->bf_desc); 1345 } 1346 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc, 1347 &txq->axq_link); 1348 ath9k_hw_txstart(ah, txq->axq_qnum); 1349 } 1350 txq->axq_depth++; 1351 } 1352 1353 static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, 1354 struct list_head *bf_head, 1355 struct ath_tx_control *txctl) 1356 { 1357 struct ath_buf *bf; 1358 1359 bf = list_first_entry(bf_head, struct ath_buf, list); 1360 bf->bf_state.bf_type |= BUF_AMPDU; 1361 TX_STAT_INC(txctl->txq->axq_qnum, a_queued); 1362 1363 /* 1364 * Do not queue to h/w when any of the following conditions is true: 1365 * - there are pending frames in software queue 1366 * - the TID is currently paused for ADDBA/BAR request 1367 * - seqno is not within block-ack window 1368 * - h/w queue depth exceeds low water mark 1369 */ 1370 if (!list_empty(&tid->buf_q) || tid->paused || 1371 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) || 1372 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) { 1373 /* 1374 * Add this frame to software queue for scheduling later 1375 * for aggregation. 1376 */ 1377 list_move_tail(&bf->list, &tid->buf_q); 1378 ath_tx_queue_tid(txctl->txq, tid); 1379 return; 1380 } 1381 1382 /* Add sub-frame to BAW */ 1383 ath_tx_addto_baw(sc, tid, bf); 1384 1385 /* Queue to h/w without aggregation */ 1386 bf->bf_nframes = 1; 1387 bf->bf_lastbf = bf; 1388 ath_buf_set_rate(sc, bf); 1389 ath_tx_txqaddbuf(sc, txctl->txq, bf_head); 1390 } 1391 1392 static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq, 1393 struct ath_atx_tid *tid, 1394 struct list_head *bf_head) 1395 { 1396 struct ath_buf *bf; 1397 1398 bf = list_first_entry(bf_head, struct ath_buf, list); 1399 bf->bf_state.bf_type &= ~BUF_AMPDU; 1400 1401 /* update starting sequence number for subsequent ADDBA request */ 1402 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 1403 1404 bf->bf_nframes = 1; 1405 bf->bf_lastbf = bf; 1406 ath_buf_set_rate(sc, bf); 1407 ath_tx_txqaddbuf(sc, txq, bf_head); 1408 TX_STAT_INC(txq->axq_qnum, queued); 1409 } 1410 1411 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 1412 struct list_head *bf_head) 1413 { 1414 struct ath_buf *bf; 1415 1416 bf = list_first_entry(bf_head, struct ath_buf, list); 1417 1418 bf->bf_lastbf = bf; 1419 bf->bf_nframes = 1; 1420 ath_buf_set_rate(sc, bf); 1421 ath_tx_txqaddbuf(sc, txq, bf_head); 1422 TX_STAT_INC(txq->axq_qnum, queued); 1423 } 1424 1425 static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) 1426 { 1427 struct ieee80211_hdr *hdr; 1428 enum ath9k_pkt_type htype; 1429 __le16 fc; 1430 1431 hdr = (struct ieee80211_hdr *)skb->data; 1432 fc = hdr->frame_control; 1433 1434 if (ieee80211_is_beacon(fc)) 1435 htype = ATH9K_PKT_TYPE_BEACON; 1436 else if (ieee80211_is_probe_resp(fc)) 1437 htype = ATH9K_PKT_TYPE_PROBE_RESP; 1438 else if (ieee80211_is_atim(fc)) 1439 htype = ATH9K_PKT_TYPE_ATIM; 1440 else if (ieee80211_is_pspoll(fc)) 1441 htype = ATH9K_PKT_TYPE_PSPOLL; 1442 else 1443 htype = ATH9K_PKT_TYPE_NORMAL; 1444 1445 return htype; 1446 } 1447 1448 static int get_hw_crypto_keytype(struct sk_buff *skb) 1449 { 1450 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1451 1452 if (tx_info->control.hw_key) { 1453 if (tx_info->control.hw_key->alg == ALG_WEP) 1454 return ATH9K_KEY_TYPE_WEP; 1455 else if (tx_info->control.hw_key->alg == ALG_TKIP) 1456 return ATH9K_KEY_TYPE_TKIP; 1457 else if (tx_info->control.hw_key->alg == ALG_CCMP) 1458 return ATH9K_KEY_TYPE_AES; 1459 } 1460 1461 return ATH9K_KEY_TYPE_CLEAR; 1462 } 1463 1464 static void assign_aggr_tid_seqno(struct sk_buff *skb, 1465 struct ath_buf *bf) 1466 { 1467 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1468 struct ieee80211_hdr *hdr; 1469 struct ath_node *an; 1470 struct ath_atx_tid *tid; 1471 __le16 fc; 1472 u8 *qc; 1473 1474 if (!tx_info->control.sta) 1475 return; 1476 1477 an = (struct ath_node *)tx_info->control.sta->drv_priv; 1478 hdr = (struct ieee80211_hdr *)skb->data; 1479 fc = hdr->frame_control; 1480 1481 if (ieee80211_is_data_qos(fc)) { 1482 qc = ieee80211_get_qos_ctl(hdr); 1483 bf->bf_tidno = qc[0] & 0xf; 1484 } 1485 1486 /* 1487 * For HT capable stations, we save tidno for later use. 1488 * We also override seqno set by upper layer with the one 1489 * in tx aggregation state. 1490 */ 1491 tid = ATH_AN_2_TID(an, bf->bf_tidno); 1492 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); 1493 bf->bf_seqno = tid->seq_next; 1494 INCR(tid->seq_next, IEEE80211_SEQ_MAX); 1495 } 1496 1497 static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc) 1498 { 1499 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1500 int flags = 0; 1501 1502 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */ 1503 flags |= ATH9K_TXDESC_INTREQ; 1504 1505 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 1506 flags |= ATH9K_TXDESC_NOACK; 1507 1508 if (use_ldpc) 1509 flags |= ATH9K_TXDESC_LDPC; 1510 1511 return flags; 1512 } 1513 1514 /* 1515 * rix - rate index 1516 * pktlen - total bytes (delims + data + fcs + pads + pad delims) 1517 * width - 0 for 20 MHz, 1 for 40 MHz 1518 * half_gi - to use 4us v/s 3.6 us for symbol time 1519 */ 1520 static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf, 1521 int width, int half_gi, bool shortPreamble) 1522 { 1523 u32 nbits, nsymbits, duration, nsymbols; 1524 int streams, pktlen; 1525 1526 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen; 1527 1528 /* find number of symbols: PLCP + data */ 1529 streams = HT_RC_2_STREAMS(rix); 1530 nbits = (pktlen << 3) + OFDM_PLCP_BITS; 1531 nsymbits = bits_per_symbol[rix % 8][width] * streams; 1532 nsymbols = (nbits + nsymbits - 1) / nsymbits; 1533 1534 if (!half_gi) 1535 duration = SYMBOL_TIME(nsymbols); 1536 else 1537 duration = SYMBOL_TIME_HALFGI(nsymbols); 1538 1539 /* addup duration for legacy/ht training and signal fields */ 1540 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); 1541 1542 return duration; 1543 } 1544 1545 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf) 1546 { 1547 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1548 struct ath9k_11n_rate_series series[4]; 1549 struct sk_buff *skb; 1550 struct ieee80211_tx_info *tx_info; 1551 struct ieee80211_tx_rate *rates; 1552 const struct ieee80211_rate *rate; 1553 struct ieee80211_hdr *hdr; 1554 int i, flags = 0; 1555 u8 rix = 0, ctsrate = 0; 1556 bool is_pspoll; 1557 1558 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4); 1559 1560 skb = bf->bf_mpdu; 1561 tx_info = IEEE80211_SKB_CB(skb); 1562 rates = tx_info->control.rates; 1563 hdr = (struct ieee80211_hdr *)skb->data; 1564 is_pspoll = ieee80211_is_pspoll(hdr->frame_control); 1565 1566 /* 1567 * We check if Short Preamble is needed for the CTS rate by 1568 * checking the BSS's global flag. 1569 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used. 1570 */ 1571 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info); 1572 ctsrate = rate->hw_value; 1573 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT) 1574 ctsrate |= rate->hw_value_short; 1575 1576 for (i = 0; i < 4; i++) { 1577 bool is_40, is_sgi, is_sp; 1578 int phy; 1579 1580 if (!rates[i].count || (rates[i].idx < 0)) 1581 continue; 1582 1583 rix = rates[i].idx; 1584 series[i].Tries = rates[i].count; 1585 series[i].ChSel = common->tx_chainmask; 1586 1587 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) || 1588 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) { 1589 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 1590 flags |= ATH9K_TXDESC_RTSENA; 1591 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 1592 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 1593 flags |= ATH9K_TXDESC_CTSENA; 1594 } 1595 1596 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 1597 series[i].RateFlags |= ATH9K_RATESERIES_2040; 1598 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 1599 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI; 1600 1601 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI); 1602 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH); 1603 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE); 1604 1605 if (rates[i].flags & IEEE80211_TX_RC_MCS) { 1606 /* MCS rates */ 1607 series[i].Rate = rix | 0x80; 1608 series[i].PktDuration = ath_pkt_duration(sc, rix, bf, 1609 is_40, is_sgi, is_sp); 1610 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) 1611 series[i].RateFlags |= ATH9K_RATESERIES_STBC; 1612 continue; 1613 } 1614 1615 /* legcay rates */ 1616 if ((tx_info->band == IEEE80211_BAND_2GHZ) && 1617 !(rate->flags & IEEE80211_RATE_ERP_G)) 1618 phy = WLAN_RC_PHY_CCK; 1619 else 1620 phy = WLAN_RC_PHY_OFDM; 1621 1622 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx]; 1623 series[i].Rate = rate->hw_value; 1624 if (rate->hw_value_short) { 1625 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 1626 series[i].Rate |= rate->hw_value_short; 1627 } else { 1628 is_sp = false; 1629 } 1630 1631 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah, 1632 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp); 1633 } 1634 1635 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ 1636 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit)) 1637 flags &= ~ATH9K_TXDESC_RTSENA; 1638 1639 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */ 1640 if (flags & ATH9K_TXDESC_RTSENA) 1641 flags &= ~ATH9K_TXDESC_CTSENA; 1642 1643 /* set dur_update_en for l-sig computation except for PS-Poll frames */ 1644 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc, 1645 bf->bf_lastbf->bf_desc, 1646 !is_pspoll, ctsrate, 1647 0, series, 4, flags); 1648 1649 if (sc->config.ath_aggr_prot && flags) 1650 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192); 1651 } 1652 1653 static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf, 1654 struct sk_buff *skb, 1655 struct ath_tx_control *txctl) 1656 { 1657 struct ath_wiphy *aphy = hw->priv; 1658 struct ath_softc *sc = aphy->sc; 1659 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1660 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1661 int hdrlen; 1662 __le16 fc; 1663 int padpos, padsize; 1664 bool use_ldpc = false; 1665 1666 tx_info->pad[0] = 0; 1667 switch (txctl->frame_type) { 1668 case ATH9K_IFT_NOT_INTERNAL: 1669 break; 1670 case ATH9K_IFT_PAUSE: 1671 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE; 1672 /* fall through */ 1673 case ATH9K_IFT_UNPAUSE: 1674 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL; 1675 break; 1676 } 1677 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1678 fc = hdr->frame_control; 1679 1680 ATH_TXBUF_RESET(bf); 1681 1682 bf->aphy = aphy; 1683 bf->bf_frmlen = skb->len + FCS_LEN; 1684 /* Remove the padding size from bf_frmlen, if any */ 1685 padpos = ath9k_cmn_padpos(hdr->frame_control); 1686 padsize = padpos & 3; 1687 if (padsize && skb->len>padpos+padsize) { 1688 bf->bf_frmlen -= padsize; 1689 } 1690 1691 if (conf_is_ht(&hw->conf)) { 1692 bf->bf_state.bf_type |= BUF_HT; 1693 if (tx_info->flags & IEEE80211_TX_CTL_LDPC) 1694 use_ldpc = true; 1695 } 1696 1697 bf->bf_flags = setup_tx_flags(skb, use_ldpc); 1698 1699 bf->bf_keytype = get_hw_crypto_keytype(skb); 1700 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) { 1701 bf->bf_frmlen += tx_info->control.hw_key->icv_len; 1702 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx; 1703 } else { 1704 bf->bf_keyix = ATH9K_TXKEYIX_INVALID; 1705 } 1706 1707 if (ieee80211_is_data_qos(fc) && bf_isht(bf) && 1708 (sc->sc_flags & SC_OP_TXAGGR)) 1709 assign_aggr_tid_seqno(skb, bf); 1710 1711 bf->bf_mpdu = skb; 1712 1713 bf->bf_dmacontext = dma_map_single(sc->dev, skb->data, 1714 skb->len, DMA_TO_DEVICE); 1715 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) { 1716 bf->bf_mpdu = NULL; 1717 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 1718 "dma_mapping_error() on TX\n"); 1719 return -ENOMEM; 1720 } 1721 1722 bf->bf_buf_addr = bf->bf_dmacontext; 1723 1724 /* tag if this is a nullfunc frame to enable PS when AP acks it */ 1725 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) { 1726 bf->bf_isnullfunc = true; 1727 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED; 1728 } else 1729 bf->bf_isnullfunc = false; 1730 1731 bf->bf_tx_aborted = false; 1732 1733 return 0; 1734 } 1735 1736 /* FIXME: tx power */ 1737 static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, 1738 struct ath_tx_control *txctl) 1739 { 1740 struct sk_buff *skb = bf->bf_mpdu; 1741 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1742 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1743 struct ath_node *an = NULL; 1744 struct list_head bf_head; 1745 struct ath_desc *ds; 1746 struct ath_atx_tid *tid; 1747 struct ath_hw *ah = sc->sc_ah; 1748 int frm_type; 1749 __le16 fc; 1750 1751 frm_type = get_hw_packet_type(skb); 1752 fc = hdr->frame_control; 1753 1754 INIT_LIST_HEAD(&bf_head); 1755 list_add_tail(&bf->list, &bf_head); 1756 1757 ds = bf->bf_desc; 1758 ath9k_hw_set_desc_link(ah, ds, 0); 1759 1760 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER, 1761 bf->bf_keyix, bf->bf_keytype, bf->bf_flags); 1762 1763 ath9k_hw_filltxdesc(ah, ds, 1764 skb->len, /* segment length */ 1765 true, /* first segment */ 1766 true, /* last segment */ 1767 ds, /* first descriptor */ 1768 bf->bf_buf_addr, 1769 txctl->txq->axq_qnum); 1770 1771 spin_lock_bh(&txctl->txq->axq_lock); 1772 1773 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) && 1774 tx_info->control.sta) { 1775 an = (struct ath_node *)tx_info->control.sta->drv_priv; 1776 tid = ATH_AN_2_TID(an, bf->bf_tidno); 1777 1778 if (!ieee80211_is_data_qos(fc)) { 1779 ath_tx_send_normal(sc, txctl->txq, &bf_head); 1780 goto tx_done; 1781 } 1782 1783 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 1784 /* 1785 * Try aggregation if it's a unicast data frame 1786 * and the destination is HT capable. 1787 */ 1788 ath_tx_send_ampdu(sc, tid, &bf_head, txctl); 1789 } else { 1790 /* 1791 * Send this frame as regular when ADDBA 1792 * exchange is neither complete nor pending. 1793 */ 1794 ath_tx_send_ht_normal(sc, txctl->txq, 1795 tid, &bf_head); 1796 } 1797 } else { 1798 ath_tx_send_normal(sc, txctl->txq, &bf_head); 1799 } 1800 1801 tx_done: 1802 spin_unlock_bh(&txctl->txq->axq_lock); 1803 } 1804 1805 /* Upon failure caller should free skb */ 1806 int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, 1807 struct ath_tx_control *txctl) 1808 { 1809 struct ath_wiphy *aphy = hw->priv; 1810 struct ath_softc *sc = aphy->sc; 1811 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1812 struct ath_buf *bf; 1813 int r; 1814 1815 bf = ath_tx_get_buffer(sc); 1816 if (!bf) { 1817 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n"); 1818 return -1; 1819 } 1820 1821 r = ath_tx_setup_buffer(hw, bf, skb, txctl); 1822 if (unlikely(r)) { 1823 struct ath_txq *txq = txctl->txq; 1824 1825 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n"); 1826 1827 /* upon ath_tx_processq() this TX queue will be resumed, we 1828 * guarantee this will happen by knowing beforehand that 1829 * we will at least have to run TX completionon one buffer 1830 * on the queue */ 1831 spin_lock_bh(&txq->axq_lock); 1832 if (sc->tx.txq[txq->axq_qnum].axq_depth > 1) { 1833 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb)); 1834 txq->stopped = 1; 1835 } 1836 spin_unlock_bh(&txq->axq_lock); 1837 1838 ath_tx_return_buffer(sc, bf); 1839 1840 return r; 1841 } 1842 1843 ath_tx_start_dma(sc, bf, txctl); 1844 1845 return 0; 1846 } 1847 1848 void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) 1849 { 1850 struct ath_wiphy *aphy = hw->priv; 1851 struct ath_softc *sc = aphy->sc; 1852 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1853 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1854 int padpos, padsize; 1855 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1856 struct ath_tx_control txctl; 1857 1858 memset(&txctl, 0, sizeof(struct ath_tx_control)); 1859 1860 /* 1861 * As a temporary workaround, assign seq# here; this will likely need 1862 * to be cleaned up to work better with Beacon transmission and virtual 1863 * BSSes. 1864 */ 1865 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 1866 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 1867 sc->tx.seq_no += 0x10; 1868 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 1869 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); 1870 } 1871 1872 /* Add the padding after the header if this is not already done */ 1873 padpos = ath9k_cmn_padpos(hdr->frame_control); 1874 padsize = padpos & 3; 1875 if (padsize && skb->len>padpos) { 1876 if (skb_headroom(skb) < padsize) { 1877 ath_print(common, ATH_DBG_XMIT, 1878 "TX CABQ padding failed\n"); 1879 dev_kfree_skb_any(skb); 1880 return; 1881 } 1882 skb_push(skb, padsize); 1883 memmove(skb->data, skb->data + padsize, padpos); 1884 } 1885 1886 txctl.txq = sc->beacon.cabq; 1887 1888 ath_print(common, ATH_DBG_XMIT, 1889 "transmitting CABQ packet, skb: %p\n", skb); 1890 1891 if (ath_tx_start(hw, skb, &txctl) != 0) { 1892 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n"); 1893 goto exit; 1894 } 1895 1896 return; 1897 exit: 1898 dev_kfree_skb_any(skb); 1899 } 1900 1901 /*****************/ 1902 /* TX Completion */ 1903 /*****************/ 1904 1905 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 1906 struct ath_wiphy *aphy, int tx_flags) 1907 { 1908 struct ieee80211_hw *hw = sc->hw; 1909 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1910 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1911 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; 1912 int padpos, padsize; 1913 1914 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); 1915 1916 if (aphy) 1917 hw = aphy->hw; 1918 1919 if (tx_flags & ATH_TX_BAR) 1920 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 1921 1922 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) { 1923 /* Frame was ACKed */ 1924 tx_info->flags |= IEEE80211_TX_STAT_ACK; 1925 } 1926 1927 padpos = ath9k_cmn_padpos(hdr->frame_control); 1928 padsize = padpos & 3; 1929 if (padsize && skb->len>padpos+padsize) { 1930 /* 1931 * Remove MAC header padding before giving the frame back to 1932 * mac80211. 1933 */ 1934 memmove(skb->data + padsize, skb->data, padpos); 1935 skb_pull(skb, padsize); 1936 } 1937 1938 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) { 1939 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK; 1940 ath_print(common, ATH_DBG_PS, 1941 "Going back to sleep after having " 1942 "received TX status (0x%lx)\n", 1943 sc->ps_flags & (PS_WAIT_FOR_BEACON | 1944 PS_WAIT_FOR_CAB | 1945 PS_WAIT_FOR_PSPOLL_DATA | 1946 PS_WAIT_FOR_TX_ACK)); 1947 } 1948 1949 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL)) 1950 ath9k_tx_status(hw, skb); 1951 else 1952 ieee80211_tx_status(hw, skb); 1953 } 1954 1955 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 1956 struct ath_txq *txq, struct list_head *bf_q, 1957 struct ath_tx_status *ts, int txok, int sendbar) 1958 { 1959 struct sk_buff *skb = bf->bf_mpdu; 1960 unsigned long flags; 1961 int tx_flags = 0; 1962 1963 if (sendbar) 1964 tx_flags = ATH_TX_BAR; 1965 1966 if (!txok) { 1967 tx_flags |= ATH_TX_ERROR; 1968 1969 if (bf_isxretried(bf)) 1970 tx_flags |= ATH_TX_XRETRY; 1971 } 1972 1973 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE); 1974 ath_tx_complete(sc, skb, bf->aphy, tx_flags); 1975 ath_debug_stat_tx(sc, txq, bf, ts); 1976 1977 /* 1978 * Return the list of ath_buf of this mpdu to free queue 1979 */ 1980 spin_lock_irqsave(&sc->tx.txbuflock, flags); 1981 list_splice_tail_init(bf_q, &sc->tx.txbuf); 1982 spin_unlock_irqrestore(&sc->tx.txbuflock, flags); 1983 } 1984 1985 static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, 1986 struct ath_tx_status *ts, int txok) 1987 { 1988 u16 seq_st = 0; 1989 u32 ba[WME_BA_BMP_SIZE >> 5]; 1990 int ba_index; 1991 int nbad = 0; 1992 int isaggr = 0; 1993 1994 if (bf->bf_lastbf->bf_tx_aborted) 1995 return 0; 1996 1997 isaggr = bf_isaggr(bf); 1998 if (isaggr) { 1999 seq_st = ts->ts_seqnum; 2000 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); 2001 } 2002 2003 while (bf) { 2004 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno); 2005 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index))) 2006 nbad++; 2007 2008 bf = bf->bf_next; 2009 } 2010 2011 return nbad; 2012 } 2013 2014 static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, 2015 int nbad, int txok, bool update_rc) 2016 { 2017 struct sk_buff *skb = bf->bf_mpdu; 2018 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2019 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 2020 struct ieee80211_hw *hw = bf->aphy->hw; 2021 u8 i, tx_rateindex; 2022 2023 if (txok) 2024 tx_info->status.ack_signal = ts->ts_rssi; 2025 2026 tx_rateindex = ts->ts_rateindex; 2027 WARN_ON(tx_rateindex >= hw->max_rates); 2028 2029 if (ts->ts_status & ATH9K_TXERR_FILT) 2030 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 2031 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) 2032 tx_info->flags |= IEEE80211_TX_STAT_AMPDU; 2033 2034 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && 2035 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) { 2036 if (ieee80211_is_data(hdr->frame_control)) { 2037 if (ts->ts_flags & 2038 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN)) 2039 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN; 2040 if ((ts->ts_status & ATH9K_TXERR_XRETRY) || 2041 (ts->ts_status & ATH9K_TXERR_FIFO)) 2042 tx_info->pad[0] |= ATH_TX_INFO_XRETRY; 2043 tx_info->status.ampdu_len = bf->bf_nframes; 2044 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad; 2045 } 2046 } 2047 2048 for (i = tx_rateindex + 1; i < hw->max_rates; i++) { 2049 tx_info->status.rates[i].count = 0; 2050 tx_info->status.rates[i].idx = -1; 2051 } 2052 2053 tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1; 2054 } 2055 2056 static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq) 2057 { 2058 int qnum; 2059 2060 spin_lock_bh(&txq->axq_lock); 2061 if (txq->stopped && 2062 sc->tx.txq[txq->axq_qnum].axq_depth <= (ATH_TXBUF - 20)) { 2063 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc); 2064 if (qnum != -1) { 2065 ath_mac80211_start_queue(sc, qnum); 2066 txq->stopped = 0; 2067 } 2068 } 2069 spin_unlock_bh(&txq->axq_lock); 2070 } 2071 2072 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 2073 { 2074 struct ath_hw *ah = sc->sc_ah; 2075 struct ath_common *common = ath9k_hw_common(ah); 2076 struct ath_buf *bf, *lastbf, *bf_held = NULL; 2077 struct list_head bf_head; 2078 struct ath_desc *ds; 2079 struct ath_tx_status ts; 2080 int txok; 2081 int status; 2082 2083 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", 2084 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), 2085 txq->axq_link); 2086 2087 for (;;) { 2088 spin_lock_bh(&txq->axq_lock); 2089 if (list_empty(&txq->axq_q)) { 2090 txq->axq_link = NULL; 2091 spin_unlock_bh(&txq->axq_lock); 2092 break; 2093 } 2094 bf = list_first_entry(&txq->axq_q, struct ath_buf, list); 2095 2096 /* 2097 * There is a race condition that a BH gets scheduled 2098 * after sw writes TxE and before hw re-load the last 2099 * descriptor to get the newly chained one. 2100 * Software must keep the last DONE descriptor as a 2101 * holding descriptor - software does so by marking 2102 * it with the STALE flag. 2103 */ 2104 bf_held = NULL; 2105 if (bf->bf_stale) { 2106 bf_held = bf; 2107 if (list_is_last(&bf_held->list, &txq->axq_q)) { 2108 spin_unlock_bh(&txq->axq_lock); 2109 break; 2110 } else { 2111 bf = list_entry(bf_held->list.next, 2112 struct ath_buf, list); 2113 } 2114 } 2115 2116 lastbf = bf->bf_lastbf; 2117 ds = lastbf->bf_desc; 2118 2119 memset(&ts, 0, sizeof(ts)); 2120 status = ath9k_hw_txprocdesc(ah, ds, &ts); 2121 if (status == -EINPROGRESS) { 2122 spin_unlock_bh(&txq->axq_lock); 2123 break; 2124 } 2125 2126 /* 2127 * We now know the nullfunc frame has been ACKed so we 2128 * can disable RX. 2129 */ 2130 if (bf->bf_isnullfunc && 2131 (ts.ts_status & ATH9K_TX_ACKED)) { 2132 if ((sc->ps_flags & PS_ENABLED)) 2133 ath9k_enable_ps(sc); 2134 else 2135 sc->ps_flags |= PS_NULLFUNC_COMPLETED; 2136 } 2137 2138 /* 2139 * Remove ath_buf's of the same transmit unit from txq, 2140 * however leave the last descriptor back as the holding 2141 * descriptor for hw. 2142 */ 2143 lastbf->bf_stale = true; 2144 INIT_LIST_HEAD(&bf_head); 2145 if (!list_is_singular(&lastbf->list)) 2146 list_cut_position(&bf_head, 2147 &txq->axq_q, lastbf->list.prev); 2148 2149 txq->axq_depth--; 2150 txok = !(ts.ts_status & ATH9K_TXERR_MASK); 2151 txq->axq_tx_inprogress = false; 2152 if (bf_held) 2153 list_del(&bf_held->list); 2154 spin_unlock_bh(&txq->axq_lock); 2155 2156 if (bf_held) 2157 ath_tx_return_buffer(sc, bf_held); 2158 2159 if (!bf_isampdu(bf)) { 2160 /* 2161 * This frame is sent out as a single frame. 2162 * Use hardware retry status for this frame. 2163 */ 2164 bf->bf_retries = ts.ts_longretry; 2165 if (ts.ts_status & ATH9K_TXERR_XRETRY) 2166 bf->bf_state.bf_type |= BUF_XRETRY; 2167 ath_tx_rc_status(bf, &ts, 0, txok, true); 2168 } 2169 2170 if (bf_isampdu(bf)) 2171 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok); 2172 else 2173 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0); 2174 2175 ath_wake_mac80211_queue(sc, txq); 2176 2177 spin_lock_bh(&txq->axq_lock); 2178 if (sc->sc_flags & SC_OP_TXAGGR) 2179 ath_txq_schedule(sc, txq); 2180 spin_unlock_bh(&txq->axq_lock); 2181 } 2182 } 2183 2184 static void ath_tx_complete_poll_work(struct work_struct *work) 2185 { 2186 struct ath_softc *sc = container_of(work, struct ath_softc, 2187 tx_complete_work.work); 2188 struct ath_txq *txq; 2189 int i; 2190 bool needreset = false; 2191 2192 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 2193 if (ATH_TXQ_SETUP(sc, i)) { 2194 txq = &sc->tx.txq[i]; 2195 spin_lock_bh(&txq->axq_lock); 2196 if (txq->axq_depth) { 2197 if (txq->axq_tx_inprogress) { 2198 needreset = true; 2199 spin_unlock_bh(&txq->axq_lock); 2200 break; 2201 } else { 2202 txq->axq_tx_inprogress = true; 2203 } 2204 } 2205 spin_unlock_bh(&txq->axq_lock); 2206 } 2207 2208 if (needreset) { 2209 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, 2210 "tx hung, resetting the chip\n"); 2211 ath9k_ps_wakeup(sc); 2212 ath_reset(sc, false); 2213 ath9k_ps_restore(sc); 2214 } 2215 2216 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 2217 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT)); 2218 } 2219 2220 2221 2222 void ath_tx_tasklet(struct ath_softc *sc) 2223 { 2224 int i; 2225 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1); 2226 2227 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask); 2228 2229 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2230 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) 2231 ath_tx_processq(sc, &sc->tx.txq[i]); 2232 } 2233 } 2234 2235 void ath_tx_edma_tasklet(struct ath_softc *sc) 2236 { 2237 struct ath_tx_status txs; 2238 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2239 struct ath_hw *ah = sc->sc_ah; 2240 struct ath_txq *txq; 2241 struct ath_buf *bf, *lastbf; 2242 struct list_head bf_head; 2243 int status; 2244 int txok; 2245 2246 for (;;) { 2247 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs); 2248 if (status == -EINPROGRESS) 2249 break; 2250 if (status == -EIO) { 2251 ath_print(common, ATH_DBG_XMIT, 2252 "Error processing tx status\n"); 2253 break; 2254 } 2255 2256 /* Skip beacon completions */ 2257 if (txs.qid == sc->beacon.beaconq) 2258 continue; 2259 2260 txq = &sc->tx.txq[txs.qid]; 2261 2262 spin_lock_bh(&txq->axq_lock); 2263 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) { 2264 spin_unlock_bh(&txq->axq_lock); 2265 return; 2266 } 2267 2268 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx], 2269 struct ath_buf, list); 2270 lastbf = bf->bf_lastbf; 2271 2272 INIT_LIST_HEAD(&bf_head); 2273 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx], 2274 &lastbf->list); 2275 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH); 2276 txq->axq_depth--; 2277 txq->axq_tx_inprogress = false; 2278 spin_unlock_bh(&txq->axq_lock); 2279 2280 txok = !(txs.ts_status & ATH9K_TXERR_MASK); 2281 2282 if (!bf_isampdu(bf)) { 2283 bf->bf_retries = txs.ts_longretry; 2284 if (txs.ts_status & ATH9K_TXERR_XRETRY) 2285 bf->bf_state.bf_type |= BUF_XRETRY; 2286 ath_tx_rc_status(bf, &txs, 0, txok, true); 2287 } 2288 2289 if (bf_isampdu(bf)) 2290 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok); 2291 else 2292 ath_tx_complete_buf(sc, bf, txq, &bf_head, 2293 &txs, txok, 0); 2294 2295 ath_wake_mac80211_queue(sc, txq); 2296 2297 spin_lock_bh(&txq->axq_lock); 2298 if (!list_empty(&txq->txq_fifo_pending)) { 2299 INIT_LIST_HEAD(&bf_head); 2300 bf = list_first_entry(&txq->txq_fifo_pending, 2301 struct ath_buf, list); 2302 list_cut_position(&bf_head, &txq->txq_fifo_pending, 2303 &bf->bf_lastbf->list); 2304 ath_tx_txqaddbuf(sc, txq, &bf_head); 2305 } else if (sc->sc_flags & SC_OP_TXAGGR) 2306 ath_txq_schedule(sc, txq); 2307 spin_unlock_bh(&txq->axq_lock); 2308 } 2309 } 2310 2311 /*****************/ 2312 /* Init, Cleanup */ 2313 /*****************/ 2314 2315 static int ath_txstatus_setup(struct ath_softc *sc, int size) 2316 { 2317 struct ath_descdma *dd = &sc->txsdma; 2318 u8 txs_len = sc->sc_ah->caps.txs_len; 2319 2320 dd->dd_desc_len = size * txs_len; 2321 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len, 2322 &dd->dd_desc_paddr, GFP_KERNEL); 2323 if (!dd->dd_desc) 2324 return -ENOMEM; 2325 2326 return 0; 2327 } 2328 2329 static int ath_tx_edma_init(struct ath_softc *sc) 2330 { 2331 int err; 2332 2333 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE); 2334 if (!err) 2335 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc, 2336 sc->txsdma.dd_desc_paddr, 2337 ATH_TXSTATUS_RING_SIZE); 2338 2339 return err; 2340 } 2341 2342 static void ath_tx_edma_cleanup(struct ath_softc *sc) 2343 { 2344 struct ath_descdma *dd = &sc->txsdma; 2345 2346 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, 2347 dd->dd_desc_paddr); 2348 } 2349 2350 int ath_tx_init(struct ath_softc *sc, int nbufs) 2351 { 2352 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2353 int error = 0; 2354 2355 spin_lock_init(&sc->tx.txbuflock); 2356 2357 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, 2358 "tx", nbufs, 1, 1); 2359 if (error != 0) { 2360 ath_print(common, ATH_DBG_FATAL, 2361 "Failed to allocate tx descriptors: %d\n", error); 2362 goto err; 2363 } 2364 2365 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf, 2366 "beacon", ATH_BCBUF, 1, 1); 2367 if (error != 0) { 2368 ath_print(common, ATH_DBG_FATAL, 2369 "Failed to allocate beacon descriptors: %d\n", error); 2370 goto err; 2371 } 2372 2373 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work); 2374 2375 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 2376 error = ath_tx_edma_init(sc); 2377 if (error) 2378 goto err; 2379 } 2380 2381 err: 2382 if (error != 0) 2383 ath_tx_cleanup(sc); 2384 2385 return error; 2386 } 2387 2388 void ath_tx_cleanup(struct ath_softc *sc) 2389 { 2390 if (sc->beacon.bdma.dd_desc_len != 0) 2391 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf); 2392 2393 if (sc->tx.txdma.dd_desc_len != 0) 2394 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf); 2395 2396 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 2397 ath_tx_edma_cleanup(sc); 2398 } 2399 2400 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) 2401 { 2402 struct ath_atx_tid *tid; 2403 struct ath_atx_ac *ac; 2404 int tidno, acno; 2405 2406 for (tidno = 0, tid = &an->tid[tidno]; 2407 tidno < WME_NUM_TID; 2408 tidno++, tid++) { 2409 tid->an = an; 2410 tid->tidno = tidno; 2411 tid->seq_start = tid->seq_next = 0; 2412 tid->baw_size = WME_MAX_BA; 2413 tid->baw_head = tid->baw_tail = 0; 2414 tid->sched = false; 2415 tid->paused = false; 2416 tid->state &= ~AGGR_CLEANUP; 2417 INIT_LIST_HEAD(&tid->buf_q); 2418 acno = TID_TO_WME_AC(tidno); 2419 tid->ac = &an->ac[acno]; 2420 tid->state &= ~AGGR_ADDBA_COMPLETE; 2421 tid->state &= ~AGGR_ADDBA_PROGRESS; 2422 } 2423 2424 for (acno = 0, ac = &an->ac[acno]; 2425 acno < WME_NUM_AC; acno++, ac++) { 2426 ac->sched = false; 2427 INIT_LIST_HEAD(&ac->tid_q); 2428 2429 switch (acno) { 2430 case WME_AC_BE: 2431 ac->qnum = ath_tx_get_qnum(sc, 2432 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE); 2433 break; 2434 case WME_AC_BK: 2435 ac->qnum = ath_tx_get_qnum(sc, 2436 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK); 2437 break; 2438 case WME_AC_VI: 2439 ac->qnum = ath_tx_get_qnum(sc, 2440 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI); 2441 break; 2442 case WME_AC_VO: 2443 ac->qnum = ath_tx_get_qnum(sc, 2444 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO); 2445 break; 2446 } 2447 } 2448 } 2449 2450 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) 2451 { 2452 int i; 2453 struct ath_atx_ac *ac, *ac_tmp; 2454 struct ath_atx_tid *tid, *tid_tmp; 2455 struct ath_txq *txq; 2456 2457 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2458 if (ATH_TXQ_SETUP(sc, i)) { 2459 txq = &sc->tx.txq[i]; 2460 2461 spin_lock_bh(&txq->axq_lock); 2462 2463 list_for_each_entry_safe(ac, 2464 ac_tmp, &txq->axq_acq, list) { 2465 tid = list_first_entry(&ac->tid_q, 2466 struct ath_atx_tid, list); 2467 if (tid && tid->an != an) 2468 continue; 2469 list_del(&ac->list); 2470 ac->sched = false; 2471 2472 list_for_each_entry_safe(tid, 2473 tid_tmp, &ac->tid_q, list) { 2474 list_del(&tid->list); 2475 tid->sched = false; 2476 ath_tid_drain(sc, txq, tid); 2477 tid->state &= ~AGGR_ADDBA_COMPLETE; 2478 tid->state &= ~AGGR_CLEANUP; 2479 } 2480 } 2481 2482 spin_unlock_bh(&txq->axq_lock); 2483 } 2484 } 2485 } 2486