1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/dma-mapping.h> 18 #include "ath9k.h" 19 #include "ar9003_mac.h" 20 21 #define BITS_PER_BYTE 8 22 #define OFDM_PLCP_BITS 22 23 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1) 24 #define L_STF 8 25 #define L_LTF 8 26 #define L_SIG 4 27 #define HT_SIG 8 28 #define HT_STF 4 29 #define HT_LTF(_ns) (4 * (_ns)) 30 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */ 31 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */ 32 #define TIME_SYMBOLS(t) ((t) >> 2) 33 #define TIME_SYMBOLS_HALFGI(t) (((t) * 5 - 4) / 18) 34 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2) 35 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18) 36 37 38 static u16 bits_per_symbol[][2] = { 39 /* 20MHz 40MHz */ 40 { 26, 54 }, /* 0: BPSK */ 41 { 52, 108 }, /* 1: QPSK 1/2 */ 42 { 78, 162 }, /* 2: QPSK 3/4 */ 43 { 104, 216 }, /* 3: 16-QAM 1/2 */ 44 { 156, 324 }, /* 4: 16-QAM 3/4 */ 45 { 208, 432 }, /* 5: 64-QAM 2/3 */ 46 { 234, 486 }, /* 6: 64-QAM 3/4 */ 47 { 260, 540 }, /* 7: 64-QAM 5/6 */ 48 }; 49 50 #define IS_HT_RATE(_rate) ((_rate) & 0x80) 51 52 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 53 struct ath_atx_tid *tid, struct sk_buff *skb); 54 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 55 int tx_flags, struct ath_txq *txq); 56 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 57 struct ath_txq *txq, struct list_head *bf_q, 58 struct ath_tx_status *ts, int txok); 59 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 60 struct list_head *head, bool internal); 61 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, 62 struct ath_tx_status *ts, int nframes, int nbad, 63 int txok); 64 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 65 int seqno); 66 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, 67 struct ath_txq *txq, 68 struct ath_atx_tid *tid, 69 struct sk_buff *skb); 70 71 enum { 72 MCS_HT20, 73 MCS_HT20_SGI, 74 MCS_HT40, 75 MCS_HT40_SGI, 76 }; 77 78 /*********************/ 79 /* Aggregation logic */ 80 /*********************/ 81 82 void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq) 83 __acquires(&txq->axq_lock) 84 { 85 spin_lock_bh(&txq->axq_lock); 86 } 87 88 void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq) 89 __releases(&txq->axq_lock) 90 { 91 spin_unlock_bh(&txq->axq_lock); 92 } 93 94 void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) 95 __releases(&txq->axq_lock) 96 { 97 struct sk_buff_head q; 98 struct sk_buff *skb; 99 100 __skb_queue_head_init(&q); 101 skb_queue_splice_init(&txq->complete_q, &q); 102 spin_unlock_bh(&txq->axq_lock); 103 104 while ((skb = __skb_dequeue(&q))) 105 ieee80211_tx_status(sc->hw, skb); 106 } 107 108 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) 109 { 110 struct ath_atx_ac *ac = tid->ac; 111 112 if (tid->paused) 113 return; 114 115 if (tid->sched) 116 return; 117 118 tid->sched = true; 119 list_add_tail(&tid->list, &ac->tid_q); 120 121 if (ac->sched) 122 return; 123 124 ac->sched = true; 125 list_add_tail(&ac->list, &txq->axq_acq); 126 } 127 128 static struct ath_frame_info *get_frame_info(struct sk_buff *skb) 129 { 130 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 131 BUILD_BUG_ON(sizeof(struct ath_frame_info) > 132 sizeof(tx_info->rate_driver_data)); 133 return (struct ath_frame_info *) &tx_info->rate_driver_data[0]; 134 } 135 136 static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno) 137 { 138 ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno, 139 seqno << IEEE80211_SEQ_SEQ_SHIFT); 140 } 141 142 static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta, 143 struct ath_buf *bf) 144 { 145 ieee80211_get_tx_rates(vif, sta, bf->bf_mpdu, bf->rates, 146 ARRAY_SIZE(bf->rates)); 147 } 148 149 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 150 { 151 struct ath_txq *txq = tid->ac->txq; 152 struct sk_buff *skb; 153 struct ath_buf *bf; 154 struct list_head bf_head; 155 struct ath_tx_status ts; 156 struct ath_frame_info *fi; 157 bool sendbar = false; 158 159 INIT_LIST_HEAD(&bf_head); 160 161 memset(&ts, 0, sizeof(ts)); 162 163 while ((skb = __skb_dequeue(&tid->buf_q))) { 164 fi = get_frame_info(skb); 165 bf = fi->bf; 166 167 if (!bf) { 168 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 169 if (!bf) { 170 ieee80211_free_txskb(sc->hw, skb); 171 continue; 172 } 173 } 174 175 if (fi->retries) { 176 list_add_tail(&bf->list, &bf_head); 177 ath_tx_update_baw(sc, tid, bf->bf_state.seqno); 178 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); 179 sendbar = true; 180 } else { 181 ath_set_rates(tid->an->vif, tid->an->sta, bf); 182 ath_tx_send_normal(sc, txq, NULL, skb); 183 } 184 } 185 186 if (sendbar) { 187 ath_txq_unlock(sc, txq); 188 ath_send_bar(tid, tid->seq_start); 189 ath_txq_lock(sc, txq); 190 } 191 } 192 193 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 194 int seqno) 195 { 196 int index, cindex; 197 198 index = ATH_BA_INDEX(tid->seq_start, seqno); 199 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 200 201 __clear_bit(cindex, tid->tx_buf); 202 203 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) { 204 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 205 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 206 if (tid->bar_index >= 0) 207 tid->bar_index--; 208 } 209 } 210 211 static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 212 u16 seqno) 213 { 214 int index, cindex; 215 216 index = ATH_BA_INDEX(tid->seq_start, seqno); 217 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 218 __set_bit(cindex, tid->tx_buf); 219 220 if (index >= ((tid->baw_tail - tid->baw_head) & 221 (ATH_TID_MAX_BUFS - 1))) { 222 tid->baw_tail = cindex; 223 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 224 } 225 } 226 227 /* 228 * TODO: For frame(s) that are in the retry state, we will reuse the 229 * sequence number(s) without setting the retry bit. The 230 * alternative is to give up on these and BAR the receiver's window 231 * forward. 232 */ 233 static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, 234 struct ath_atx_tid *tid) 235 236 { 237 struct sk_buff *skb; 238 struct ath_buf *bf; 239 struct list_head bf_head; 240 struct ath_tx_status ts; 241 struct ath_frame_info *fi; 242 243 memset(&ts, 0, sizeof(ts)); 244 INIT_LIST_HEAD(&bf_head); 245 246 while ((skb = __skb_dequeue(&tid->buf_q))) { 247 fi = get_frame_info(skb); 248 bf = fi->bf; 249 250 if (!bf) { 251 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq); 252 continue; 253 } 254 255 list_add_tail(&bf->list, &bf_head); 256 257 ath_tx_update_baw(sc, tid, bf->bf_state.seqno); 258 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); 259 } 260 261 tid->seq_next = tid->seq_start; 262 tid->baw_tail = tid->baw_head; 263 tid->bar_index = -1; 264 } 265 266 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, 267 struct sk_buff *skb, int count) 268 { 269 struct ath_frame_info *fi = get_frame_info(skb); 270 struct ath_buf *bf = fi->bf; 271 struct ieee80211_hdr *hdr; 272 int prev = fi->retries; 273 274 TX_STAT_INC(txq->axq_qnum, a_retries); 275 fi->retries += count; 276 277 if (prev > 0) 278 return; 279 280 hdr = (struct ieee80211_hdr *)skb->data; 281 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); 282 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 283 sizeof(*hdr), DMA_TO_DEVICE); 284 } 285 286 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc) 287 { 288 struct ath_buf *bf = NULL; 289 290 spin_lock_bh(&sc->tx.txbuflock); 291 292 if (unlikely(list_empty(&sc->tx.txbuf))) { 293 spin_unlock_bh(&sc->tx.txbuflock); 294 return NULL; 295 } 296 297 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); 298 list_del(&bf->list); 299 300 spin_unlock_bh(&sc->tx.txbuflock); 301 302 return bf; 303 } 304 305 static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf) 306 { 307 spin_lock_bh(&sc->tx.txbuflock); 308 list_add_tail(&bf->list, &sc->tx.txbuf); 309 spin_unlock_bh(&sc->tx.txbuflock); 310 } 311 312 static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf) 313 { 314 struct ath_buf *tbf; 315 316 tbf = ath_tx_get_buffer(sc); 317 if (WARN_ON(!tbf)) 318 return NULL; 319 320 ATH_TXBUF_RESET(tbf); 321 322 tbf->bf_mpdu = bf->bf_mpdu; 323 tbf->bf_buf_addr = bf->bf_buf_addr; 324 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); 325 tbf->bf_state = bf->bf_state; 326 327 return tbf; 328 } 329 330 static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf, 331 struct ath_tx_status *ts, int txok, 332 int *nframes, int *nbad) 333 { 334 struct ath_frame_info *fi; 335 u16 seq_st = 0; 336 u32 ba[WME_BA_BMP_SIZE >> 5]; 337 int ba_index; 338 int isaggr = 0; 339 340 *nbad = 0; 341 *nframes = 0; 342 343 isaggr = bf_isaggr(bf); 344 if (isaggr) { 345 seq_st = ts->ts_seqnum; 346 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); 347 } 348 349 while (bf) { 350 fi = get_frame_info(bf->bf_mpdu); 351 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno); 352 353 (*nframes)++; 354 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index))) 355 (*nbad)++; 356 357 bf = bf->bf_next; 358 } 359 } 360 361 362 static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, 363 struct ath_buf *bf, struct list_head *bf_q, 364 struct ath_tx_status *ts, int txok) 365 { 366 struct ath_node *an = NULL; 367 struct sk_buff *skb; 368 struct ieee80211_sta *sta; 369 struct ieee80211_hw *hw = sc->hw; 370 struct ieee80211_hdr *hdr; 371 struct ieee80211_tx_info *tx_info; 372 struct ath_atx_tid *tid = NULL; 373 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; 374 struct list_head bf_head; 375 struct sk_buff_head bf_pending; 376 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first; 377 u32 ba[WME_BA_BMP_SIZE >> 5]; 378 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 379 bool rc_update = true, isba; 380 struct ieee80211_tx_rate rates[4]; 381 struct ath_frame_info *fi; 382 int nframes; 383 u8 tidno; 384 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH); 385 int i, retries; 386 int bar_index = -1; 387 388 skb = bf->bf_mpdu; 389 hdr = (struct ieee80211_hdr *)skb->data; 390 391 tx_info = IEEE80211_SKB_CB(skb); 392 393 memcpy(rates, bf->rates, sizeof(rates)); 394 395 retries = ts->ts_longretry + 1; 396 for (i = 0; i < ts->ts_rateindex; i++) 397 retries += rates[i].count; 398 399 rcu_read_lock(); 400 401 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2); 402 if (!sta) { 403 rcu_read_unlock(); 404 405 INIT_LIST_HEAD(&bf_head); 406 while (bf) { 407 bf_next = bf->bf_next; 408 409 if (!bf->bf_stale || bf_next != NULL) 410 list_move_tail(&bf->list, &bf_head); 411 412 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0); 413 414 bf = bf_next; 415 } 416 return; 417 } 418 419 an = (struct ath_node *)sta->drv_priv; 420 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; 421 tid = ATH_AN_2_TID(an, tidno); 422 seq_first = tid->seq_start; 423 isba = ts->ts_flags & ATH9K_TX_BA; 424 425 /* 426 * The hardware occasionally sends a tx status for the wrong TID. 427 * In this case, the BA status cannot be considered valid and all 428 * subframes need to be retransmitted 429 * 430 * Only BlockAcks have a TID and therefore normal Acks cannot be 431 * checked 432 */ 433 if (isba && tidno != ts->tid) 434 txok = false; 435 436 isaggr = bf_isaggr(bf); 437 memset(ba, 0, WME_BA_BMP_SIZE >> 3); 438 439 if (isaggr && txok) { 440 if (ts->ts_flags & ATH9K_TX_BA) { 441 seq_st = ts->ts_seqnum; 442 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); 443 } else { 444 /* 445 * AR5416 can become deaf/mute when BA 446 * issue happens. Chip needs to be reset. 447 * But AP code may have sychronization issues 448 * when perform internal reset in this routine. 449 * Only enable reset in STA mode for now. 450 */ 451 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) 452 needreset = 1; 453 } 454 } 455 456 __skb_queue_head_init(&bf_pending); 457 458 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad); 459 while (bf) { 460 u16 seqno = bf->bf_state.seqno; 461 462 txfail = txpending = sendbar = 0; 463 bf_next = bf->bf_next; 464 465 skb = bf->bf_mpdu; 466 tx_info = IEEE80211_SKB_CB(skb); 467 fi = get_frame_info(skb); 468 469 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) { 470 /* 471 * Outside of the current BlockAck window, 472 * maybe part of a previous session 473 */ 474 txfail = 1; 475 } else if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) { 476 /* transmit completion, subframe is 477 * acked by block ack */ 478 acked_cnt++; 479 } else if (!isaggr && txok) { 480 /* transmit completion */ 481 acked_cnt++; 482 } else if (flush) { 483 txpending = 1; 484 } else if (fi->retries < ATH_MAX_SW_RETRIES) { 485 if (txok || !an->sleeping) 486 ath_tx_set_retry(sc, txq, bf->bf_mpdu, 487 retries); 488 489 txpending = 1; 490 } else { 491 txfail = 1; 492 txfail_cnt++; 493 bar_index = max_t(int, bar_index, 494 ATH_BA_INDEX(seq_first, seqno)); 495 } 496 497 /* 498 * Make sure the last desc is reclaimed if it 499 * not a holding desc. 500 */ 501 INIT_LIST_HEAD(&bf_head); 502 if (bf_next != NULL || !bf_last->bf_stale) 503 list_move_tail(&bf->list, &bf_head); 504 505 if (!txpending) { 506 /* 507 * complete the acked-ones/xretried ones; update 508 * block-ack window 509 */ 510 ath_tx_update_baw(sc, tid, seqno); 511 512 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 513 memcpy(tx_info->control.rates, rates, sizeof(rates)); 514 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok); 515 rc_update = false; 516 } 517 518 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 519 !txfail); 520 } else { 521 if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) { 522 tx_info->flags &= ~IEEE80211_TX_STATUS_EOSP; 523 ieee80211_sta_eosp(sta); 524 } 525 /* retry the un-acked ones */ 526 if (bf->bf_next == NULL && bf_last->bf_stale) { 527 struct ath_buf *tbf; 528 529 tbf = ath_clone_txbuf(sc, bf_last); 530 /* 531 * Update tx baw and complete the 532 * frame with failed status if we 533 * run out of tx buf. 534 */ 535 if (!tbf) { 536 ath_tx_update_baw(sc, tid, seqno); 537 538 ath_tx_complete_buf(sc, bf, txq, 539 &bf_head, ts, 0); 540 bar_index = max_t(int, bar_index, 541 ATH_BA_INDEX(seq_first, seqno)); 542 break; 543 } 544 545 fi->bf = tbf; 546 } 547 548 /* 549 * Put this buffer to the temporary pending 550 * queue to retain ordering 551 */ 552 __skb_queue_tail(&bf_pending, skb); 553 } 554 555 bf = bf_next; 556 } 557 558 /* prepend un-acked frames to the beginning of the pending frame queue */ 559 if (!skb_queue_empty(&bf_pending)) { 560 if (an->sleeping) 561 ieee80211_sta_set_buffered(sta, tid->tidno, true); 562 563 skb_queue_splice(&bf_pending, &tid->buf_q); 564 if (!an->sleeping) { 565 ath_tx_queue_tid(txq, tid); 566 567 if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY)) 568 tid->ac->clear_ps_filter = true; 569 } 570 } 571 572 if (bar_index >= 0) { 573 u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index); 574 575 if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq)) 576 tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq); 577 578 ath_txq_unlock(sc, txq); 579 ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1)); 580 ath_txq_lock(sc, txq); 581 } 582 583 rcu_read_unlock(); 584 585 if (needreset) 586 ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR); 587 } 588 589 static bool bf_is_ampdu_not_probing(struct ath_buf *bf) 590 { 591 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu); 592 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 593 } 594 595 static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, 596 struct ath_tx_status *ts, struct ath_buf *bf, 597 struct list_head *bf_head) 598 { 599 struct ieee80211_tx_info *info; 600 bool txok, flush; 601 602 txok = !(ts->ts_status & ATH9K_TXERR_MASK); 603 flush = !!(ts->ts_status & ATH9K_TX_FLUSH); 604 txq->axq_tx_inprogress = false; 605 606 txq->axq_depth--; 607 if (bf_is_ampdu_not_probing(bf)) 608 txq->axq_ampdu_depth--; 609 610 if (!bf_isampdu(bf)) { 611 if (!flush) { 612 info = IEEE80211_SKB_CB(bf->bf_mpdu); 613 memcpy(info->control.rates, bf->rates, 614 sizeof(info->control.rates)); 615 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); 616 } 617 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok); 618 } else 619 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok); 620 621 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !flush) 622 ath_txq_schedule(sc, txq); 623 } 624 625 static bool ath_lookup_legacy(struct ath_buf *bf) 626 { 627 struct sk_buff *skb; 628 struct ieee80211_tx_info *tx_info; 629 struct ieee80211_tx_rate *rates; 630 int i; 631 632 skb = bf->bf_mpdu; 633 tx_info = IEEE80211_SKB_CB(skb); 634 rates = tx_info->control.rates; 635 636 for (i = 0; i < 4; i++) { 637 if (!rates[i].count || rates[i].idx < 0) 638 break; 639 640 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) 641 return true; 642 } 643 644 return false; 645 } 646 647 static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, 648 struct ath_atx_tid *tid) 649 { 650 struct sk_buff *skb; 651 struct ieee80211_tx_info *tx_info; 652 struct ieee80211_tx_rate *rates; 653 u32 max_4ms_framelen, frmlen; 654 u16 aggr_limit, bt_aggr_limit, legacy = 0; 655 int q = tid->ac->txq->mac80211_qnum; 656 int i; 657 658 skb = bf->bf_mpdu; 659 tx_info = IEEE80211_SKB_CB(skb); 660 rates = bf->rates; 661 662 /* 663 * Find the lowest frame length among the rate series that will have a 664 * 4ms (or TXOP limited) transmit duration. 665 */ 666 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX; 667 668 for (i = 0; i < 4; i++) { 669 int modeidx; 670 671 if (!rates[i].count) 672 continue; 673 674 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) { 675 legacy = 1; 676 break; 677 } 678 679 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 680 modeidx = MCS_HT40; 681 else 682 modeidx = MCS_HT20; 683 684 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 685 modeidx++; 686 687 frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx]; 688 max_4ms_framelen = min(max_4ms_framelen, frmlen); 689 } 690 691 /* 692 * limit aggregate size by the minimum rate if rate selected is 693 * not a probe rate, if rate selected is a probe rate then 694 * avoid aggregation of this packet. 695 */ 696 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) 697 return 0; 698 699 aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX); 700 701 /* 702 * Override the default aggregation limit for BTCOEX. 703 */ 704 bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen); 705 if (bt_aggr_limit) 706 aggr_limit = bt_aggr_limit; 707 708 /* 709 * h/w can accept aggregates up to 16 bit lengths (65535). 710 * The IE, however can hold up to 65536, which shows up here 711 * as zero. Ignore 65536 since we are constrained by hw. 712 */ 713 if (tid->an->maxampdu) 714 aggr_limit = min(aggr_limit, tid->an->maxampdu); 715 716 return aggr_limit; 717 } 718 719 /* 720 * Returns the number of delimiters to be added to 721 * meet the minimum required mpdudensity. 722 */ 723 static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, 724 struct ath_buf *bf, u16 frmlen, 725 bool first_subfrm) 726 { 727 #define FIRST_DESC_NDELIMS 60 728 u32 nsymbits, nsymbols; 729 u16 minlen; 730 u8 flags, rix; 731 int width, streams, half_gi, ndelim, mindelim; 732 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); 733 734 /* Select standard number of delimiters based on frame length alone */ 735 ndelim = ATH_AGGR_GET_NDELIM(frmlen); 736 737 /* 738 * If encryption enabled, hardware requires some more padding between 739 * subframes. 740 * TODO - this could be improved to be dependent on the rate. 741 * The hardware can keep up at lower rates, but not higher rates 742 */ 743 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) && 744 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) 745 ndelim += ATH_AGGR_ENCRYPTDELIM; 746 747 /* 748 * Add delimiter when using RTS/CTS with aggregation 749 * and non enterprise AR9003 card 750 */ 751 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) && 752 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE)) 753 ndelim = max(ndelim, FIRST_DESC_NDELIMS); 754 755 /* 756 * Convert desired mpdu density from microeconds to bytes based 757 * on highest rate in rate series (i.e. first rate) to determine 758 * required minimum length for subframe. Take into account 759 * whether high rate is 20 or 40Mhz and half or full GI. 760 * 761 * If there is no mpdu density restriction, no further calculation 762 * is needed. 763 */ 764 765 if (tid->an->mpdudensity == 0) 766 return ndelim; 767 768 rix = bf->rates[0].idx; 769 flags = bf->rates[0].flags; 770 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0; 771 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0; 772 773 if (half_gi) 774 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity); 775 else 776 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity); 777 778 if (nsymbols == 0) 779 nsymbols = 1; 780 781 streams = HT_RC_2_STREAMS(rix); 782 nsymbits = bits_per_symbol[rix % 8][width] * streams; 783 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE; 784 785 if (frmlen < minlen) { 786 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ; 787 ndelim = max(mindelim, ndelim); 788 } 789 790 return ndelim; 791 } 792 793 static struct ath_buf * 794 ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, 795 struct ath_atx_tid *tid) 796 { 797 struct ath_frame_info *fi; 798 struct sk_buff *skb; 799 struct ath_buf *bf; 800 u16 seqno; 801 802 while (1) { 803 skb = skb_peek(&tid->buf_q); 804 if (!skb) 805 break; 806 807 fi = get_frame_info(skb); 808 bf = fi->bf; 809 if (!fi->bf) 810 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 811 812 if (!bf) { 813 __skb_unlink(skb, &tid->buf_q); 814 ieee80211_free_txskb(sc->hw, skb); 815 continue; 816 } 817 818 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR; 819 seqno = bf->bf_state.seqno; 820 821 /* do not step over block-ack window */ 822 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) 823 break; 824 825 if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) { 826 struct ath_tx_status ts = {}; 827 struct list_head bf_head; 828 829 INIT_LIST_HEAD(&bf_head); 830 list_add(&bf->list, &bf_head); 831 __skb_unlink(skb, &tid->buf_q); 832 ath_tx_update_baw(sc, tid, seqno); 833 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); 834 continue; 835 } 836 837 bf->bf_next = NULL; 838 bf->bf_lastbf = bf; 839 return bf; 840 } 841 842 return NULL; 843 } 844 845 static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, 846 struct ath_txq *txq, 847 struct ath_atx_tid *tid, 848 struct list_head *bf_q, 849 int *aggr_len) 850 { 851 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) 852 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL; 853 int rl = 0, nframes = 0, ndelim, prev_al = 0; 854 u16 aggr_limit = 0, al = 0, bpad = 0, 855 al_delta, h_baw = tid->baw_size / 2; 856 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; 857 struct ieee80211_tx_info *tx_info; 858 struct ath_frame_info *fi; 859 struct sk_buff *skb; 860 861 do { 862 bf = ath_tx_get_tid_subframe(sc, txq, tid); 863 if (!bf) { 864 status = ATH_AGGR_BAW_CLOSED; 865 break; 866 } 867 868 skb = bf->bf_mpdu; 869 fi = get_frame_info(skb); 870 871 if (!bf_first) 872 bf_first = bf; 873 874 if (!rl) { 875 ath_set_rates(tid->an->vif, tid->an->sta, bf); 876 aggr_limit = ath_lookup_rate(sc, bf, tid); 877 rl = 1; 878 } 879 880 /* do not exceed aggregation limit */ 881 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen; 882 883 if (nframes && 884 ((aggr_limit < (al + bpad + al_delta + prev_al)) || 885 ath_lookup_legacy(bf))) { 886 status = ATH_AGGR_LIMITED; 887 break; 888 } 889 890 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); 891 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) 892 break; 893 894 /* do not exceed subframe limit */ 895 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) { 896 status = ATH_AGGR_LIMITED; 897 break; 898 } 899 900 /* add padding for previous frame to aggregation length */ 901 al += bpad + al_delta; 902 903 /* 904 * Get the delimiters needed to meet the MPDU 905 * density for this node. 906 */ 907 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen, 908 !nframes); 909 bpad = PADBYTES(al_delta) + (ndelim << 2); 910 911 nframes++; 912 bf->bf_next = NULL; 913 914 /* link buffers of this frame to the aggregate */ 915 if (!fi->retries) 916 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); 917 bf->bf_state.ndelim = ndelim; 918 919 __skb_unlink(skb, &tid->buf_q); 920 list_add_tail(&bf->list, bf_q); 921 if (bf_prev) 922 bf_prev->bf_next = bf; 923 924 bf_prev = bf; 925 926 } while (!skb_queue_empty(&tid->buf_q)); 927 928 *aggr_len = al; 929 930 return status; 931 #undef PADBYTES 932 } 933 934 /* 935 * rix - rate index 936 * pktlen - total bytes (delims + data + fcs + pads + pad delims) 937 * width - 0 for 20 MHz, 1 for 40 MHz 938 * half_gi - to use 4us v/s 3.6 us for symbol time 939 */ 940 static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen, 941 int width, int half_gi, bool shortPreamble) 942 { 943 u32 nbits, nsymbits, duration, nsymbols; 944 int streams; 945 946 /* find number of symbols: PLCP + data */ 947 streams = HT_RC_2_STREAMS(rix); 948 nbits = (pktlen << 3) + OFDM_PLCP_BITS; 949 nsymbits = bits_per_symbol[rix % 8][width] * streams; 950 nsymbols = (nbits + nsymbits - 1) / nsymbits; 951 952 if (!half_gi) 953 duration = SYMBOL_TIME(nsymbols); 954 else 955 duration = SYMBOL_TIME_HALFGI(nsymbols); 956 957 /* addup duration for legacy/ht training and signal fields */ 958 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); 959 960 return duration; 961 } 962 963 static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi) 964 { 965 int streams = HT_RC_2_STREAMS(mcs); 966 int symbols, bits; 967 int bytes = 0; 968 969 symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec); 970 bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams; 971 bits -= OFDM_PLCP_BITS; 972 bytes = bits / 8; 973 bytes -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); 974 if (bytes > 65532) 975 bytes = 65532; 976 977 return bytes; 978 } 979 980 void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop) 981 { 982 u16 *cur_ht20, *cur_ht20_sgi, *cur_ht40, *cur_ht40_sgi; 983 int mcs; 984 985 /* 4ms is the default (and maximum) duration */ 986 if (!txop || txop > 4096) 987 txop = 4096; 988 989 cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20]; 990 cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI]; 991 cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40]; 992 cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI]; 993 for (mcs = 0; mcs < 32; mcs++) { 994 cur_ht20[mcs] = ath_max_framelen(txop, mcs, false, false); 995 cur_ht20_sgi[mcs] = ath_max_framelen(txop, mcs, false, true); 996 cur_ht40[mcs] = ath_max_framelen(txop, mcs, true, false); 997 cur_ht40_sgi[mcs] = ath_max_framelen(txop, mcs, true, true); 998 } 999 } 1000 1001 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, 1002 struct ath_tx_info *info, int len) 1003 { 1004 struct ath_hw *ah = sc->sc_ah; 1005 struct sk_buff *skb; 1006 struct ieee80211_tx_info *tx_info; 1007 struct ieee80211_tx_rate *rates; 1008 const struct ieee80211_rate *rate; 1009 struct ieee80211_hdr *hdr; 1010 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); 1011 int i; 1012 u8 rix = 0; 1013 1014 skb = bf->bf_mpdu; 1015 tx_info = IEEE80211_SKB_CB(skb); 1016 rates = bf->rates; 1017 hdr = (struct ieee80211_hdr *)skb->data; 1018 1019 /* set dur_update_en for l-sig computation except for PS-Poll frames */ 1020 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control); 1021 info->rtscts_rate = fi->rtscts_rate; 1022 1023 for (i = 0; i < ARRAY_SIZE(bf->rates); i++) { 1024 bool is_40, is_sgi, is_sp; 1025 int phy; 1026 1027 if (!rates[i].count || (rates[i].idx < 0)) 1028 continue; 1029 1030 rix = rates[i].idx; 1031 info->rates[i].Tries = rates[i].count; 1032 1033 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) { 1034 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 1035 info->flags |= ATH9K_TXDESC_RTSENA; 1036 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 1037 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 1038 info->flags |= ATH9K_TXDESC_CTSENA; 1039 } 1040 1041 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 1042 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040; 1043 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 1044 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI; 1045 1046 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI); 1047 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH); 1048 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE); 1049 1050 if (rates[i].flags & IEEE80211_TX_RC_MCS) { 1051 /* MCS rates */ 1052 info->rates[i].Rate = rix | 0x80; 1053 info->rates[i].ChSel = ath_txchainmask_reduction(sc, 1054 ah->txchainmask, info->rates[i].Rate); 1055 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len, 1056 is_40, is_sgi, is_sp); 1057 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) 1058 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC; 1059 continue; 1060 } 1061 1062 /* legacy rates */ 1063 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx]; 1064 if ((tx_info->band == IEEE80211_BAND_2GHZ) && 1065 !(rate->flags & IEEE80211_RATE_ERP_G)) 1066 phy = WLAN_RC_PHY_CCK; 1067 else 1068 phy = WLAN_RC_PHY_OFDM; 1069 1070 info->rates[i].Rate = rate->hw_value; 1071 if (rate->hw_value_short) { 1072 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 1073 info->rates[i].Rate |= rate->hw_value_short; 1074 } else { 1075 is_sp = false; 1076 } 1077 1078 if (bf->bf_state.bfs_paprd) 1079 info->rates[i].ChSel = ah->txchainmask; 1080 else 1081 info->rates[i].ChSel = ath_txchainmask_reduction(sc, 1082 ah->txchainmask, info->rates[i].Rate); 1083 1084 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah, 1085 phy, rate->bitrate * 100, len, rix, is_sp); 1086 } 1087 1088 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ 1089 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit)) 1090 info->flags &= ~ATH9K_TXDESC_RTSENA; 1091 1092 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */ 1093 if (info->flags & ATH9K_TXDESC_RTSENA) 1094 info->flags &= ~ATH9K_TXDESC_CTSENA; 1095 } 1096 1097 static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) 1098 { 1099 struct ieee80211_hdr *hdr; 1100 enum ath9k_pkt_type htype; 1101 __le16 fc; 1102 1103 hdr = (struct ieee80211_hdr *)skb->data; 1104 fc = hdr->frame_control; 1105 1106 if (ieee80211_is_beacon(fc)) 1107 htype = ATH9K_PKT_TYPE_BEACON; 1108 else if (ieee80211_is_probe_resp(fc)) 1109 htype = ATH9K_PKT_TYPE_PROBE_RESP; 1110 else if (ieee80211_is_atim(fc)) 1111 htype = ATH9K_PKT_TYPE_ATIM; 1112 else if (ieee80211_is_pspoll(fc)) 1113 htype = ATH9K_PKT_TYPE_PSPOLL; 1114 else 1115 htype = ATH9K_PKT_TYPE_NORMAL; 1116 1117 return htype; 1118 } 1119 1120 static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, 1121 struct ath_txq *txq, int len) 1122 { 1123 struct ath_hw *ah = sc->sc_ah; 1124 struct ath_buf *bf_first = NULL; 1125 struct ath_tx_info info; 1126 1127 memset(&info, 0, sizeof(info)); 1128 info.is_first = true; 1129 info.is_last = true; 1130 info.txpower = MAX_RATE_POWER; 1131 info.qcu = txq->axq_qnum; 1132 1133 while (bf) { 1134 struct sk_buff *skb = bf->bf_mpdu; 1135 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1136 struct ath_frame_info *fi = get_frame_info(skb); 1137 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR); 1138 1139 info.type = get_hw_packet_type(skb); 1140 if (bf->bf_next) 1141 info.link = bf->bf_next->bf_daddr; 1142 else 1143 info.link = 0; 1144 1145 if (!bf_first) { 1146 bf_first = bf; 1147 1148 info.flags = ATH9K_TXDESC_INTREQ; 1149 if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) || 1150 txq == sc->tx.uapsdq) 1151 info.flags |= ATH9K_TXDESC_CLRDMASK; 1152 1153 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 1154 info.flags |= ATH9K_TXDESC_NOACK; 1155 if (tx_info->flags & IEEE80211_TX_CTL_LDPC) 1156 info.flags |= ATH9K_TXDESC_LDPC; 1157 1158 if (bf->bf_state.bfs_paprd) 1159 info.flags |= (u32) bf->bf_state.bfs_paprd << 1160 ATH9K_TXDESC_PAPRD_S; 1161 1162 ath_buf_set_rate(sc, bf, &info, len); 1163 } 1164 1165 info.buf_addr[0] = bf->bf_buf_addr; 1166 info.buf_len[0] = skb->len; 1167 info.pkt_len = fi->framelen; 1168 info.keyix = fi->keyix; 1169 info.keytype = fi->keytype; 1170 1171 if (aggr) { 1172 if (bf == bf_first) 1173 info.aggr = AGGR_BUF_FIRST; 1174 else if (bf == bf_first->bf_lastbf) 1175 info.aggr = AGGR_BUF_LAST; 1176 else 1177 info.aggr = AGGR_BUF_MIDDLE; 1178 1179 info.ndelim = bf->bf_state.ndelim; 1180 info.aggr_len = len; 1181 } 1182 1183 if (bf == bf_first->bf_lastbf) 1184 bf_first = NULL; 1185 1186 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info); 1187 bf = bf->bf_next; 1188 } 1189 } 1190 1191 static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, 1192 struct ath_atx_tid *tid) 1193 { 1194 struct ath_buf *bf; 1195 enum ATH_AGGR_STATUS status; 1196 struct ieee80211_tx_info *tx_info; 1197 struct list_head bf_q; 1198 int aggr_len; 1199 1200 do { 1201 if (skb_queue_empty(&tid->buf_q)) 1202 return; 1203 1204 INIT_LIST_HEAD(&bf_q); 1205 1206 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len); 1207 1208 /* 1209 * no frames picked up to be aggregated; 1210 * block-ack window is not open. 1211 */ 1212 if (list_empty(&bf_q)) 1213 break; 1214 1215 bf = list_first_entry(&bf_q, struct ath_buf, list); 1216 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list); 1217 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); 1218 1219 if (tid->ac->clear_ps_filter) { 1220 tid->ac->clear_ps_filter = false; 1221 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1222 } else { 1223 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT; 1224 } 1225 1226 /* if only one frame, send as non-aggregate */ 1227 if (bf == bf->bf_lastbf) { 1228 aggr_len = get_frame_info(bf->bf_mpdu)->framelen; 1229 bf->bf_state.bf_type = BUF_AMPDU; 1230 } else { 1231 TX_STAT_INC(txq->axq_qnum, a_aggr); 1232 } 1233 1234 ath_tx_fill_desc(sc, bf, txq, aggr_len); 1235 ath_tx_txqaddbuf(sc, txq, &bf_q, false); 1236 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH && 1237 status != ATH_AGGR_BAW_CLOSED); 1238 } 1239 1240 int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 1241 u16 tid, u16 *ssn) 1242 { 1243 struct ath_atx_tid *txtid; 1244 struct ath_node *an; 1245 u8 density; 1246 1247 an = (struct ath_node *)sta->drv_priv; 1248 txtid = ATH_AN_2_TID(an, tid); 1249 1250 /* update ampdu factor/density, they may have changed. This may happen 1251 * in HT IBSS when a beacon with HT-info is received after the station 1252 * has already been added. 1253 */ 1254 if (sta->ht_cap.ht_supported) { 1255 an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 1256 sta->ht_cap.ampdu_factor); 1257 density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density); 1258 an->mpdudensity = density; 1259 } 1260 1261 txtid->active = true; 1262 txtid->paused = true; 1263 *ssn = txtid->seq_start = txtid->seq_next; 1264 txtid->bar_index = -1; 1265 1266 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); 1267 txtid->baw_head = txtid->baw_tail = 0; 1268 1269 return 0; 1270 } 1271 1272 void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 1273 { 1274 struct ath_node *an = (struct ath_node *)sta->drv_priv; 1275 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 1276 struct ath_txq *txq = txtid->ac->txq; 1277 1278 ath_txq_lock(sc, txq); 1279 txtid->active = false; 1280 txtid->paused = true; 1281 ath_tx_flush_tid(sc, txtid); 1282 ath_txq_unlock_complete(sc, txq); 1283 } 1284 1285 void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, 1286 struct ath_node *an) 1287 { 1288 struct ath_atx_tid *tid; 1289 struct ath_atx_ac *ac; 1290 struct ath_txq *txq; 1291 bool buffered; 1292 int tidno; 1293 1294 for (tidno = 0, tid = &an->tid[tidno]; 1295 tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { 1296 1297 if (!tid->sched) 1298 continue; 1299 1300 ac = tid->ac; 1301 txq = ac->txq; 1302 1303 ath_txq_lock(sc, txq); 1304 1305 buffered = !skb_queue_empty(&tid->buf_q); 1306 1307 tid->sched = false; 1308 list_del(&tid->list); 1309 1310 if (ac->sched) { 1311 ac->sched = false; 1312 list_del(&ac->list); 1313 } 1314 1315 ath_txq_unlock(sc, txq); 1316 1317 ieee80211_sta_set_buffered(sta, tidno, buffered); 1318 } 1319 } 1320 1321 void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) 1322 { 1323 struct ath_atx_tid *tid; 1324 struct ath_atx_ac *ac; 1325 struct ath_txq *txq; 1326 int tidno; 1327 1328 for (tidno = 0, tid = &an->tid[tidno]; 1329 tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { 1330 1331 ac = tid->ac; 1332 txq = ac->txq; 1333 1334 ath_txq_lock(sc, txq); 1335 ac->clear_ps_filter = true; 1336 1337 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) { 1338 ath_tx_queue_tid(txq, tid); 1339 ath_txq_schedule(sc, txq); 1340 } 1341 1342 ath_txq_unlock_complete(sc, txq); 1343 } 1344 } 1345 1346 void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, 1347 u16 tidno) 1348 { 1349 struct ath_atx_tid *tid; 1350 struct ath_node *an; 1351 struct ath_txq *txq; 1352 1353 an = (struct ath_node *)sta->drv_priv; 1354 tid = ATH_AN_2_TID(an, tidno); 1355 txq = tid->ac->txq; 1356 1357 ath_txq_lock(sc, txq); 1358 1359 tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; 1360 tid->paused = false; 1361 1362 if (!skb_queue_empty(&tid->buf_q)) { 1363 ath_tx_queue_tid(txq, tid); 1364 ath_txq_schedule(sc, txq); 1365 } 1366 1367 ath_txq_unlock_complete(sc, txq); 1368 } 1369 1370 void ath9k_release_buffered_frames(struct ieee80211_hw *hw, 1371 struct ieee80211_sta *sta, 1372 u16 tids, int nframes, 1373 enum ieee80211_frame_release_type reason, 1374 bool more_data) 1375 { 1376 struct ath_softc *sc = hw->priv; 1377 struct ath_node *an = (struct ath_node *)sta->drv_priv; 1378 struct ath_txq *txq = sc->tx.uapsdq; 1379 struct ieee80211_tx_info *info; 1380 struct list_head bf_q; 1381 struct ath_buf *bf_tail = NULL, *bf; 1382 int sent = 0; 1383 int i; 1384 1385 INIT_LIST_HEAD(&bf_q); 1386 for (i = 0; tids && nframes; i++, tids >>= 1) { 1387 struct ath_atx_tid *tid; 1388 1389 if (!(tids & 1)) 1390 continue; 1391 1392 tid = ATH_AN_2_TID(an, i); 1393 if (tid->paused) 1394 continue; 1395 1396 ath_txq_lock(sc, tid->ac->txq); 1397 while (!skb_queue_empty(&tid->buf_q) && nframes > 0) { 1398 bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid); 1399 if (!bf) 1400 break; 1401 1402 __skb_unlink(bf->bf_mpdu, &tid->buf_q); 1403 list_add_tail(&bf->list, &bf_q); 1404 ath_set_rates(tid->an->vif, tid->an->sta, bf); 1405 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); 1406 bf->bf_state.bf_type &= ~BUF_AGGR; 1407 if (bf_tail) 1408 bf_tail->bf_next = bf; 1409 1410 bf_tail = bf; 1411 nframes--; 1412 sent++; 1413 TX_STAT_INC(txq->axq_qnum, a_queued_hw); 1414 1415 if (skb_queue_empty(&tid->buf_q)) 1416 ieee80211_sta_set_buffered(an->sta, i, false); 1417 } 1418 ath_txq_unlock_complete(sc, tid->ac->txq); 1419 } 1420 1421 if (list_empty(&bf_q)) 1422 return; 1423 1424 info = IEEE80211_SKB_CB(bf_tail->bf_mpdu); 1425 info->flags |= IEEE80211_TX_STATUS_EOSP; 1426 1427 bf = list_first_entry(&bf_q, struct ath_buf, list); 1428 ath_txq_lock(sc, txq); 1429 ath_tx_fill_desc(sc, bf, txq, 0); 1430 ath_tx_txqaddbuf(sc, txq, &bf_q, false); 1431 ath_txq_unlock(sc, txq); 1432 } 1433 1434 /********************/ 1435 /* Queue Management */ 1436 /********************/ 1437 1438 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 1439 { 1440 struct ath_hw *ah = sc->sc_ah; 1441 struct ath9k_tx_queue_info qi; 1442 static const int subtype_txq_to_hwq[] = { 1443 [IEEE80211_AC_BE] = ATH_TXQ_AC_BE, 1444 [IEEE80211_AC_BK] = ATH_TXQ_AC_BK, 1445 [IEEE80211_AC_VI] = ATH_TXQ_AC_VI, 1446 [IEEE80211_AC_VO] = ATH_TXQ_AC_VO, 1447 }; 1448 int axq_qnum, i; 1449 1450 memset(&qi, 0, sizeof(qi)); 1451 qi.tqi_subtype = subtype_txq_to_hwq[subtype]; 1452 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; 1453 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; 1454 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; 1455 qi.tqi_physCompBuf = 0; 1456 1457 /* 1458 * Enable interrupts only for EOL and DESC conditions. 1459 * We mark tx descriptors to receive a DESC interrupt 1460 * when a tx queue gets deep; otherwise waiting for the 1461 * EOL to reap descriptors. Note that this is done to 1462 * reduce interrupt load and this only defers reaping 1463 * descriptors, never transmitting frames. Aside from 1464 * reducing interrupts this also permits more concurrency. 1465 * The only potential downside is if the tx queue backs 1466 * up in which case the top half of the kernel may backup 1467 * due to a lack of tx descriptors. 1468 * 1469 * The UAPSD queue is an exception, since we take a desc- 1470 * based intr on the EOSP frames. 1471 */ 1472 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1473 qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE; 1474 } else { 1475 if (qtype == ATH9K_TX_QUEUE_UAPSD) 1476 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE; 1477 else 1478 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | 1479 TXQ_FLAG_TXDESCINT_ENABLE; 1480 } 1481 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); 1482 if (axq_qnum == -1) { 1483 /* 1484 * NB: don't print a message, this happens 1485 * normally on parts with too few tx queues 1486 */ 1487 return NULL; 1488 } 1489 if (!ATH_TXQ_SETUP(sc, axq_qnum)) { 1490 struct ath_txq *txq = &sc->tx.txq[axq_qnum]; 1491 1492 txq->axq_qnum = axq_qnum; 1493 txq->mac80211_qnum = -1; 1494 txq->axq_link = NULL; 1495 __skb_queue_head_init(&txq->complete_q); 1496 INIT_LIST_HEAD(&txq->axq_q); 1497 INIT_LIST_HEAD(&txq->axq_acq); 1498 spin_lock_init(&txq->axq_lock); 1499 txq->axq_depth = 0; 1500 txq->axq_ampdu_depth = 0; 1501 txq->axq_tx_inprogress = false; 1502 sc->tx.txqsetup |= 1<<axq_qnum; 1503 1504 txq->txq_headidx = txq->txq_tailidx = 0; 1505 for (i = 0; i < ATH_TXFIFO_DEPTH; i++) 1506 INIT_LIST_HEAD(&txq->txq_fifo[i]); 1507 } 1508 return &sc->tx.txq[axq_qnum]; 1509 } 1510 1511 int ath_txq_update(struct ath_softc *sc, int qnum, 1512 struct ath9k_tx_queue_info *qinfo) 1513 { 1514 struct ath_hw *ah = sc->sc_ah; 1515 int error = 0; 1516 struct ath9k_tx_queue_info qi; 1517 1518 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum); 1519 1520 ath9k_hw_get_txq_props(ah, qnum, &qi); 1521 qi.tqi_aifs = qinfo->tqi_aifs; 1522 qi.tqi_cwmin = qinfo->tqi_cwmin; 1523 qi.tqi_cwmax = qinfo->tqi_cwmax; 1524 qi.tqi_burstTime = qinfo->tqi_burstTime; 1525 qi.tqi_readyTime = qinfo->tqi_readyTime; 1526 1527 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) { 1528 ath_err(ath9k_hw_common(sc->sc_ah), 1529 "Unable to update hardware queue %u!\n", qnum); 1530 error = -EIO; 1531 } else { 1532 ath9k_hw_resettxqueue(ah, qnum); 1533 } 1534 1535 return error; 1536 } 1537 1538 int ath_cabq_update(struct ath_softc *sc) 1539 { 1540 struct ath9k_tx_queue_info qi; 1541 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf; 1542 int qnum = sc->beacon.cabq->axq_qnum; 1543 1544 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); 1545 /* 1546 * Ensure the readytime % is within the bounds. 1547 */ 1548 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND) 1549 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND; 1550 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND) 1551 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND; 1552 1553 qi.tqi_readyTime = (cur_conf->beacon_interval * 1554 sc->config.cabqReadytime) / 100; 1555 ath_txq_update(sc, qnum, &qi); 1556 1557 return 0; 1558 } 1559 1560 static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, 1561 struct list_head *list) 1562 { 1563 struct ath_buf *bf, *lastbf; 1564 struct list_head bf_head; 1565 struct ath_tx_status ts; 1566 1567 memset(&ts, 0, sizeof(ts)); 1568 ts.ts_status = ATH9K_TX_FLUSH; 1569 INIT_LIST_HEAD(&bf_head); 1570 1571 while (!list_empty(list)) { 1572 bf = list_first_entry(list, struct ath_buf, list); 1573 1574 if (bf->bf_stale) { 1575 list_del(&bf->list); 1576 1577 ath_tx_return_buffer(sc, bf); 1578 continue; 1579 } 1580 1581 lastbf = bf->bf_lastbf; 1582 list_cut_position(&bf_head, list, &lastbf->list); 1583 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); 1584 } 1585 } 1586 1587 /* 1588 * Drain a given TX queue (could be Beacon or Data) 1589 * 1590 * This assumes output has been stopped and 1591 * we do not need to block ath_tx_tasklet. 1592 */ 1593 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq) 1594 { 1595 ath_txq_lock(sc, txq); 1596 1597 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1598 int idx = txq->txq_tailidx; 1599 1600 while (!list_empty(&txq->txq_fifo[idx])) { 1601 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx]); 1602 1603 INCR(idx, ATH_TXFIFO_DEPTH); 1604 } 1605 txq->txq_tailidx = idx; 1606 } 1607 1608 txq->axq_link = NULL; 1609 txq->axq_tx_inprogress = false; 1610 ath_drain_txq_list(sc, txq, &txq->axq_q); 1611 1612 ath_txq_unlock_complete(sc, txq); 1613 } 1614 1615 bool ath_drain_all_txq(struct ath_softc *sc) 1616 { 1617 struct ath_hw *ah = sc->sc_ah; 1618 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1619 struct ath_txq *txq; 1620 int i; 1621 u32 npend = 0; 1622 1623 if (test_bit(SC_OP_INVALID, &sc->sc_flags)) 1624 return true; 1625 1626 ath9k_hw_abort_tx_dma(ah); 1627 1628 /* Check if any queue remains active */ 1629 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1630 if (!ATH_TXQ_SETUP(sc, i)) 1631 continue; 1632 1633 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum)) 1634 npend |= BIT(i); 1635 } 1636 1637 if (npend) 1638 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend); 1639 1640 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1641 if (!ATH_TXQ_SETUP(sc, i)) 1642 continue; 1643 1644 /* 1645 * The caller will resume queues with ieee80211_wake_queues. 1646 * Mark the queue as not stopped to prevent ath_tx_complete 1647 * from waking the queue too early. 1648 */ 1649 txq = &sc->tx.txq[i]; 1650 txq->stopped = false; 1651 ath_draintxq(sc, txq); 1652 } 1653 1654 return !npend; 1655 } 1656 1657 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 1658 { 1659 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum); 1660 sc->tx.txqsetup &= ~(1<<txq->axq_qnum); 1661 } 1662 1663 /* For each axq_acq entry, for each tid, try to schedule packets 1664 * for transmit until ampdu_depth has reached min Q depth. 1665 */ 1666 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) 1667 { 1668 struct ath_atx_ac *ac, *ac_tmp, *last_ac; 1669 struct ath_atx_tid *tid, *last_tid; 1670 1671 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) || 1672 list_empty(&txq->axq_acq) || 1673 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1674 return; 1675 1676 rcu_read_lock(); 1677 1678 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); 1679 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list); 1680 1681 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { 1682 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list); 1683 list_del(&ac->list); 1684 ac->sched = false; 1685 1686 while (!list_empty(&ac->tid_q)) { 1687 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, 1688 list); 1689 list_del(&tid->list); 1690 tid->sched = false; 1691 1692 if (tid->paused) 1693 continue; 1694 1695 ath_tx_sched_aggr(sc, txq, tid); 1696 1697 /* 1698 * add tid to round-robin queue if more frames 1699 * are pending for the tid 1700 */ 1701 if (!skb_queue_empty(&tid->buf_q)) 1702 ath_tx_queue_tid(txq, tid); 1703 1704 if (tid == last_tid || 1705 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1706 break; 1707 } 1708 1709 if (!list_empty(&ac->tid_q) && !ac->sched) { 1710 ac->sched = true; 1711 list_add_tail(&ac->list, &txq->axq_acq); 1712 } 1713 1714 if (ac == last_ac || 1715 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1716 break; 1717 } 1718 1719 rcu_read_unlock(); 1720 } 1721 1722 /***********/ 1723 /* TX, DMA */ 1724 /***********/ 1725 1726 /* 1727 * Insert a chain of ath_buf (descriptors) on a txq and 1728 * assume the descriptors are already chained together by caller. 1729 */ 1730 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 1731 struct list_head *head, bool internal) 1732 { 1733 struct ath_hw *ah = sc->sc_ah; 1734 struct ath_common *common = ath9k_hw_common(ah); 1735 struct ath_buf *bf, *bf_last; 1736 bool puttxbuf = false; 1737 bool edma; 1738 1739 /* 1740 * Insert the frame on the outbound list and 1741 * pass it on to the hardware. 1742 */ 1743 1744 if (list_empty(head)) 1745 return; 1746 1747 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1748 bf = list_first_entry(head, struct ath_buf, list); 1749 bf_last = list_entry(head->prev, struct ath_buf, list); 1750 1751 ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n", 1752 txq->axq_qnum, txq->axq_depth); 1753 1754 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) { 1755 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]); 1756 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH); 1757 puttxbuf = true; 1758 } else { 1759 list_splice_tail_init(head, &txq->axq_q); 1760 1761 if (txq->axq_link) { 1762 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr); 1763 ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n", 1764 txq->axq_qnum, txq->axq_link, 1765 ito64(bf->bf_daddr), bf->bf_desc); 1766 } else if (!edma) 1767 puttxbuf = true; 1768 1769 txq->axq_link = bf_last->bf_desc; 1770 } 1771 1772 if (puttxbuf) { 1773 TX_STAT_INC(txq->axq_qnum, puttxbuf); 1774 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1775 ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n", 1776 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); 1777 } 1778 1779 if (!edma) { 1780 TX_STAT_INC(txq->axq_qnum, txstart); 1781 ath9k_hw_txstart(ah, txq->axq_qnum); 1782 } 1783 1784 if (!internal) { 1785 while (bf) { 1786 txq->axq_depth++; 1787 if (bf_is_ampdu_not_probing(bf)) 1788 txq->axq_ampdu_depth++; 1789 1790 bf = bf->bf_lastbf->bf_next; 1791 } 1792 } 1793 } 1794 1795 static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq, 1796 struct ath_atx_tid *tid, struct sk_buff *skb, 1797 struct ath_tx_control *txctl) 1798 { 1799 struct ath_frame_info *fi = get_frame_info(skb); 1800 struct list_head bf_head; 1801 struct ath_buf *bf; 1802 1803 /* 1804 * Do not queue to h/w when any of the following conditions is true: 1805 * - there are pending frames in software queue 1806 * - the TID is currently paused for ADDBA/BAR request 1807 * - seqno is not within block-ack window 1808 * - h/w queue depth exceeds low water mark 1809 */ 1810 if ((!skb_queue_empty(&tid->buf_q) || tid->paused || 1811 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) || 1812 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) && 1813 txq != sc->tx.uapsdq) { 1814 /* 1815 * Add this frame to software queue for scheduling later 1816 * for aggregation. 1817 */ 1818 TX_STAT_INC(txq->axq_qnum, a_queued_sw); 1819 __skb_queue_tail(&tid->buf_q, skb); 1820 if (!txctl->an || !txctl->an->sleeping) 1821 ath_tx_queue_tid(txq, tid); 1822 return; 1823 } 1824 1825 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 1826 if (!bf) { 1827 ieee80211_free_txskb(sc->hw, skb); 1828 return; 1829 } 1830 1831 ath_set_rates(tid->an->vif, tid->an->sta, bf); 1832 bf->bf_state.bf_type = BUF_AMPDU; 1833 INIT_LIST_HEAD(&bf_head); 1834 list_add(&bf->list, &bf_head); 1835 1836 /* Add sub-frame to BAW */ 1837 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); 1838 1839 /* Queue to h/w without aggregation */ 1840 TX_STAT_INC(txq->axq_qnum, a_queued_hw); 1841 bf->bf_lastbf = bf; 1842 ath_tx_fill_desc(sc, bf, txq, fi->framelen); 1843 ath_tx_txqaddbuf(sc, txq, &bf_head, false); 1844 } 1845 1846 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 1847 struct ath_atx_tid *tid, struct sk_buff *skb) 1848 { 1849 struct ath_frame_info *fi = get_frame_info(skb); 1850 struct list_head bf_head; 1851 struct ath_buf *bf; 1852 1853 bf = fi->bf; 1854 1855 INIT_LIST_HEAD(&bf_head); 1856 list_add_tail(&bf->list, &bf_head); 1857 bf->bf_state.bf_type = 0; 1858 1859 bf->bf_next = NULL; 1860 bf->bf_lastbf = bf; 1861 ath_tx_fill_desc(sc, bf, txq, fi->framelen); 1862 ath_tx_txqaddbuf(sc, txq, &bf_head, false); 1863 TX_STAT_INC(txq->axq_qnum, queued); 1864 } 1865 1866 static void setup_frame_info(struct ieee80211_hw *hw, 1867 struct ieee80211_sta *sta, 1868 struct sk_buff *skb, 1869 int framelen) 1870 { 1871 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1872 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; 1873 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1874 const struct ieee80211_rate *rate; 1875 struct ath_frame_info *fi = get_frame_info(skb); 1876 struct ath_node *an = NULL; 1877 enum ath9k_key_type keytype; 1878 bool short_preamble = false; 1879 1880 /* 1881 * We check if Short Preamble is needed for the CTS rate by 1882 * checking the BSS's global flag. 1883 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used. 1884 */ 1885 if (tx_info->control.vif && 1886 tx_info->control.vif->bss_conf.use_short_preamble) 1887 short_preamble = true; 1888 1889 rate = ieee80211_get_rts_cts_rate(hw, tx_info); 1890 keytype = ath9k_cmn_get_hw_crypto_keytype(skb); 1891 1892 if (sta) 1893 an = (struct ath_node *) sta->drv_priv; 1894 1895 memset(fi, 0, sizeof(*fi)); 1896 if (hw_key) 1897 fi->keyix = hw_key->hw_key_idx; 1898 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0) 1899 fi->keyix = an->ps_key; 1900 else 1901 fi->keyix = ATH9K_TXKEYIX_INVALID; 1902 fi->keytype = keytype; 1903 fi->framelen = framelen; 1904 fi->rtscts_rate = rate->hw_value; 1905 if (short_preamble) 1906 fi->rtscts_rate |= rate->hw_value_short; 1907 } 1908 1909 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate) 1910 { 1911 struct ath_hw *ah = sc->sc_ah; 1912 struct ath9k_channel *curchan = ah->curchan; 1913 1914 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && 1915 (curchan->channelFlags & CHANNEL_5GHZ) && 1916 (chainmask == 0x7) && (rate < 0x90)) 1917 return 0x3; 1918 else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) && 1919 IS_CCK_RATE(rate)) 1920 return 0x2; 1921 else 1922 return chainmask; 1923 } 1924 1925 /* 1926 * Assign a descriptor (and sequence number if necessary, 1927 * and map buffer for DMA. Frees skb on error 1928 */ 1929 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, 1930 struct ath_txq *txq, 1931 struct ath_atx_tid *tid, 1932 struct sk_buff *skb) 1933 { 1934 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1935 struct ath_frame_info *fi = get_frame_info(skb); 1936 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1937 struct ath_buf *bf; 1938 int fragno; 1939 u16 seqno; 1940 1941 bf = ath_tx_get_buffer(sc); 1942 if (!bf) { 1943 ath_dbg(common, XMIT, "TX buffers are full\n"); 1944 return NULL; 1945 } 1946 1947 ATH_TXBUF_RESET(bf); 1948 1949 if (tid) { 1950 fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 1951 seqno = tid->seq_next; 1952 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); 1953 1954 if (fragno) 1955 hdr->seq_ctrl |= cpu_to_le16(fragno); 1956 1957 if (!ieee80211_has_morefrags(hdr->frame_control)) 1958 INCR(tid->seq_next, IEEE80211_SEQ_MAX); 1959 1960 bf->bf_state.seqno = seqno; 1961 } 1962 1963 bf->bf_mpdu = skb; 1964 1965 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 1966 skb->len, DMA_TO_DEVICE); 1967 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { 1968 bf->bf_mpdu = NULL; 1969 bf->bf_buf_addr = 0; 1970 ath_err(ath9k_hw_common(sc->sc_ah), 1971 "dma_mapping_error() on TX\n"); 1972 ath_tx_return_buffer(sc, bf); 1973 return NULL; 1974 } 1975 1976 fi->bf = bf; 1977 1978 return bf; 1979 } 1980 1981 static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb, 1982 struct ath_tx_control *txctl) 1983 { 1984 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1985 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1986 struct ieee80211_sta *sta = txctl->sta; 1987 struct ieee80211_vif *vif = info->control.vif; 1988 struct ath_softc *sc = hw->priv; 1989 int frmlen = skb->len + FCS_LEN; 1990 int padpos, padsize; 1991 1992 /* NOTE: sta can be NULL according to net/mac80211.h */ 1993 if (sta) 1994 txctl->an = (struct ath_node *)sta->drv_priv; 1995 1996 if (info->control.hw_key) 1997 frmlen += info->control.hw_key->icv_len; 1998 1999 /* 2000 * As a temporary workaround, assign seq# here; this will likely need 2001 * to be cleaned up to work better with Beacon transmission and virtual 2002 * BSSes. 2003 */ 2004 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 2005 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 2006 sc->tx.seq_no += 0x10; 2007 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 2008 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); 2009 } 2010 2011 if ((vif && vif->type != NL80211_IFTYPE_AP && 2012 vif->type != NL80211_IFTYPE_AP_VLAN) || 2013 !ieee80211_is_data(hdr->frame_control)) 2014 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 2015 2016 /* Add the padding after the header if this is not already done */ 2017 padpos = ieee80211_hdrlen(hdr->frame_control); 2018 padsize = padpos & 3; 2019 if (padsize && skb->len > padpos) { 2020 if (skb_headroom(skb) < padsize) 2021 return -ENOMEM; 2022 2023 skb_push(skb, padsize); 2024 memmove(skb->data, skb->data + padsize, padpos); 2025 } 2026 2027 setup_frame_info(hw, sta, skb, frmlen); 2028 return 0; 2029 } 2030 2031 2032 /* Upon failure caller should free skb */ 2033 int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, 2034 struct ath_tx_control *txctl) 2035 { 2036 struct ieee80211_hdr *hdr; 2037 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2038 struct ieee80211_sta *sta = txctl->sta; 2039 struct ieee80211_vif *vif = info->control.vif; 2040 struct ath_softc *sc = hw->priv; 2041 struct ath_txq *txq = txctl->txq; 2042 struct ath_atx_tid *tid = NULL; 2043 struct ath_buf *bf; 2044 u8 tidno; 2045 int q; 2046 int ret; 2047 2048 ret = ath_tx_prepare(hw, skb, txctl); 2049 if (ret) 2050 return ret; 2051 2052 hdr = (struct ieee80211_hdr *) skb->data; 2053 /* 2054 * At this point, the vif, hw_key and sta pointers in the tx control 2055 * info are no longer valid (overwritten by the ath_frame_info data. 2056 */ 2057 2058 q = skb_get_queue_mapping(skb); 2059 2060 ath_txq_lock(sc, txq); 2061 if (txq == sc->tx.txq_map[q] && 2062 ++txq->pending_frames > sc->tx.txq_max_pending[q] && 2063 !txq->stopped) { 2064 ieee80211_stop_queue(sc->hw, q); 2065 txq->stopped = true; 2066 } 2067 2068 if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) { 2069 ath_txq_unlock(sc, txq); 2070 txq = sc->tx.uapsdq; 2071 ath_txq_lock(sc, txq); 2072 } 2073 2074 if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) { 2075 tidno = ieee80211_get_qos_ctl(hdr)[0] & 2076 IEEE80211_QOS_CTL_TID_MASK; 2077 tid = ATH_AN_2_TID(txctl->an, tidno); 2078 2079 WARN_ON(tid->ac->txq != txctl->txq); 2080 } 2081 2082 if ((info->flags & IEEE80211_TX_CTL_AMPDU) && tid) { 2083 /* 2084 * Try aggregation if it's a unicast data frame 2085 * and the destination is HT capable. 2086 */ 2087 ath_tx_send_ampdu(sc, txq, tid, skb, txctl); 2088 goto out; 2089 } 2090 2091 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 2092 if (!bf) { 2093 if (txctl->paprd) 2094 dev_kfree_skb_any(skb); 2095 else 2096 ieee80211_free_txskb(sc->hw, skb); 2097 goto out; 2098 } 2099 2100 bf->bf_state.bfs_paprd = txctl->paprd; 2101 2102 if (txctl->paprd) 2103 bf->bf_state.bfs_paprd_timestamp = jiffies; 2104 2105 ath_set_rates(vif, sta, bf); 2106 ath_tx_send_normal(sc, txq, tid, skb); 2107 2108 out: 2109 ath_txq_unlock(sc, txq); 2110 2111 return 0; 2112 } 2113 2114 void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2115 struct sk_buff *skb) 2116 { 2117 struct ath_softc *sc = hw->priv; 2118 struct ath_tx_control txctl = { 2119 .txq = sc->beacon.cabq 2120 }; 2121 struct ath_tx_info info = {}; 2122 struct ieee80211_hdr *hdr; 2123 struct ath_buf *bf_tail = NULL; 2124 struct ath_buf *bf; 2125 LIST_HEAD(bf_q); 2126 int duration = 0; 2127 int max_duration; 2128 2129 max_duration = 2130 sc->cur_beacon_conf.beacon_interval * 1000 * 2131 sc->cur_beacon_conf.dtim_period / ATH_BCBUF; 2132 2133 do { 2134 struct ath_frame_info *fi = get_frame_info(skb); 2135 2136 if (ath_tx_prepare(hw, skb, &txctl)) 2137 break; 2138 2139 bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb); 2140 if (!bf) 2141 break; 2142 2143 bf->bf_lastbf = bf; 2144 ath_set_rates(vif, NULL, bf); 2145 ath_buf_set_rate(sc, bf, &info, fi->framelen); 2146 duration += info.rates[0].PktDuration; 2147 if (bf_tail) 2148 bf_tail->bf_next = bf; 2149 2150 list_add_tail(&bf->list, &bf_q); 2151 bf_tail = bf; 2152 skb = NULL; 2153 2154 if (duration > max_duration) 2155 break; 2156 2157 skb = ieee80211_get_buffered_bc(hw, vif); 2158 } while(skb); 2159 2160 if (skb) 2161 ieee80211_free_txskb(hw, skb); 2162 2163 if (list_empty(&bf_q)) 2164 return; 2165 2166 bf = list_first_entry(&bf_q, struct ath_buf, list); 2167 hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data; 2168 2169 if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) { 2170 hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA; 2171 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 2172 sizeof(*hdr), DMA_TO_DEVICE); 2173 } 2174 2175 ath_txq_lock(sc, txctl.txq); 2176 ath_tx_fill_desc(sc, bf, txctl.txq, 0); 2177 ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false); 2178 TX_STAT_INC(txctl.txq->axq_qnum, queued); 2179 ath_txq_unlock(sc, txctl.txq); 2180 } 2181 2182 /*****************/ 2183 /* TX Completion */ 2184 /*****************/ 2185 2186 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 2187 int tx_flags, struct ath_txq *txq) 2188 { 2189 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 2190 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2191 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; 2192 int q, padpos, padsize; 2193 unsigned long flags; 2194 2195 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb); 2196 2197 if (sc->sc_ah->caldata) 2198 sc->sc_ah->caldata->paprd_packet_sent = true; 2199 2200 if (!(tx_flags & ATH_TX_ERROR)) 2201 /* Frame was ACKed */ 2202 tx_info->flags |= IEEE80211_TX_STAT_ACK; 2203 2204 padpos = ieee80211_hdrlen(hdr->frame_control); 2205 padsize = padpos & 3; 2206 if (padsize && skb->len>padpos+padsize) { 2207 /* 2208 * Remove MAC header padding before giving the frame back to 2209 * mac80211. 2210 */ 2211 memmove(skb->data + padsize, skb->data, padpos); 2212 skb_pull(skb, padsize); 2213 } 2214 2215 spin_lock_irqsave(&sc->sc_pm_lock, flags); 2216 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) { 2217 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK; 2218 ath_dbg(common, PS, 2219 "Going back to sleep after having received TX status (0x%lx)\n", 2220 sc->ps_flags & (PS_WAIT_FOR_BEACON | 2221 PS_WAIT_FOR_CAB | 2222 PS_WAIT_FOR_PSPOLL_DATA | 2223 PS_WAIT_FOR_TX_ACK)); 2224 } 2225 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 2226 2227 __skb_queue_tail(&txq->complete_q, skb); 2228 2229 q = skb_get_queue_mapping(skb); 2230 if (txq == sc->tx.uapsdq) 2231 txq = sc->tx.txq_map[q]; 2232 2233 if (txq == sc->tx.txq_map[q]) { 2234 if (WARN_ON(--txq->pending_frames < 0)) 2235 txq->pending_frames = 0; 2236 2237 if (txq->stopped && 2238 txq->pending_frames < sc->tx.txq_max_pending[q]) { 2239 ieee80211_wake_queue(sc->hw, q); 2240 txq->stopped = false; 2241 } 2242 } 2243 } 2244 2245 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 2246 struct ath_txq *txq, struct list_head *bf_q, 2247 struct ath_tx_status *ts, int txok) 2248 { 2249 struct sk_buff *skb = bf->bf_mpdu; 2250 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 2251 unsigned long flags; 2252 int tx_flags = 0; 2253 2254 if (!txok) 2255 tx_flags |= ATH_TX_ERROR; 2256 2257 if (ts->ts_status & ATH9K_TXERR_FILT) 2258 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 2259 2260 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE); 2261 bf->bf_buf_addr = 0; 2262 2263 if (bf->bf_state.bfs_paprd) { 2264 if (time_after(jiffies, 2265 bf->bf_state.bfs_paprd_timestamp + 2266 msecs_to_jiffies(ATH_PAPRD_TIMEOUT))) 2267 dev_kfree_skb_any(skb); 2268 else 2269 complete(&sc->paprd_complete); 2270 } else { 2271 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags); 2272 ath_tx_complete(sc, skb, tx_flags, txq); 2273 } 2274 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't 2275 * accidentally reference it later. 2276 */ 2277 bf->bf_mpdu = NULL; 2278 2279 /* 2280 * Return the list of ath_buf of this mpdu to free queue 2281 */ 2282 spin_lock_irqsave(&sc->tx.txbuflock, flags); 2283 list_splice_tail_init(bf_q, &sc->tx.txbuf); 2284 spin_unlock_irqrestore(&sc->tx.txbuflock, flags); 2285 } 2286 2287 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, 2288 struct ath_tx_status *ts, int nframes, int nbad, 2289 int txok) 2290 { 2291 struct sk_buff *skb = bf->bf_mpdu; 2292 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2293 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 2294 struct ieee80211_hw *hw = sc->hw; 2295 struct ath_hw *ah = sc->sc_ah; 2296 u8 i, tx_rateindex; 2297 2298 if (txok) 2299 tx_info->status.ack_signal = ts->ts_rssi; 2300 2301 tx_rateindex = ts->ts_rateindex; 2302 WARN_ON(tx_rateindex >= hw->max_rates); 2303 2304 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 2305 tx_info->flags |= IEEE80211_TX_STAT_AMPDU; 2306 2307 BUG_ON(nbad > nframes); 2308 } 2309 tx_info->status.ampdu_len = nframes; 2310 tx_info->status.ampdu_ack_len = nframes - nbad; 2311 2312 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && 2313 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) { 2314 /* 2315 * If an underrun error is seen assume it as an excessive 2316 * retry only if max frame trigger level has been reached 2317 * (2 KB for single stream, and 4 KB for dual stream). 2318 * Adjust the long retry as if the frame was tried 2319 * hw->max_rate_tries times to affect how rate control updates 2320 * PER for the failed rate. 2321 * In case of congestion on the bus penalizing this type of 2322 * underruns should help hardware actually transmit new frames 2323 * successfully by eventually preferring slower rates. 2324 * This itself should also alleviate congestion on the bus. 2325 */ 2326 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN | 2327 ATH9K_TX_DELIM_UNDERRUN)) && 2328 ieee80211_is_data(hdr->frame_control) && 2329 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level) 2330 tx_info->status.rates[tx_rateindex].count = 2331 hw->max_rate_tries; 2332 } 2333 2334 for (i = tx_rateindex + 1; i < hw->max_rates; i++) { 2335 tx_info->status.rates[i].count = 0; 2336 tx_info->status.rates[i].idx = -1; 2337 } 2338 2339 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; 2340 } 2341 2342 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 2343 { 2344 struct ath_hw *ah = sc->sc_ah; 2345 struct ath_common *common = ath9k_hw_common(ah); 2346 struct ath_buf *bf, *lastbf, *bf_held = NULL; 2347 struct list_head bf_head; 2348 struct ath_desc *ds; 2349 struct ath_tx_status ts; 2350 int status; 2351 2352 ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n", 2353 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), 2354 txq->axq_link); 2355 2356 ath_txq_lock(sc, txq); 2357 for (;;) { 2358 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) 2359 break; 2360 2361 if (list_empty(&txq->axq_q)) { 2362 txq->axq_link = NULL; 2363 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) 2364 ath_txq_schedule(sc, txq); 2365 break; 2366 } 2367 bf = list_first_entry(&txq->axq_q, struct ath_buf, list); 2368 2369 /* 2370 * There is a race condition that a BH gets scheduled 2371 * after sw writes TxE and before hw re-load the last 2372 * descriptor to get the newly chained one. 2373 * Software must keep the last DONE descriptor as a 2374 * holding descriptor - software does so by marking 2375 * it with the STALE flag. 2376 */ 2377 bf_held = NULL; 2378 if (bf->bf_stale) { 2379 bf_held = bf; 2380 if (list_is_last(&bf_held->list, &txq->axq_q)) 2381 break; 2382 2383 bf = list_entry(bf_held->list.next, struct ath_buf, 2384 list); 2385 } 2386 2387 lastbf = bf->bf_lastbf; 2388 ds = lastbf->bf_desc; 2389 2390 memset(&ts, 0, sizeof(ts)); 2391 status = ath9k_hw_txprocdesc(ah, ds, &ts); 2392 if (status == -EINPROGRESS) 2393 break; 2394 2395 TX_STAT_INC(txq->axq_qnum, txprocdesc); 2396 2397 /* 2398 * Remove ath_buf's of the same transmit unit from txq, 2399 * however leave the last descriptor back as the holding 2400 * descriptor for hw. 2401 */ 2402 lastbf->bf_stale = true; 2403 INIT_LIST_HEAD(&bf_head); 2404 if (!list_is_singular(&lastbf->list)) 2405 list_cut_position(&bf_head, 2406 &txq->axq_q, lastbf->list.prev); 2407 2408 if (bf_held) { 2409 list_del(&bf_held->list); 2410 ath_tx_return_buffer(sc, bf_held); 2411 } 2412 2413 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); 2414 } 2415 ath_txq_unlock_complete(sc, txq); 2416 } 2417 2418 void ath_tx_tasklet(struct ath_softc *sc) 2419 { 2420 struct ath_hw *ah = sc->sc_ah; 2421 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs; 2422 int i; 2423 2424 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2425 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) 2426 ath_tx_processq(sc, &sc->tx.txq[i]); 2427 } 2428 } 2429 2430 void ath_tx_edma_tasklet(struct ath_softc *sc) 2431 { 2432 struct ath_tx_status ts; 2433 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2434 struct ath_hw *ah = sc->sc_ah; 2435 struct ath_txq *txq; 2436 struct ath_buf *bf, *lastbf; 2437 struct list_head bf_head; 2438 struct list_head *fifo_list; 2439 int status; 2440 2441 for (;;) { 2442 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) 2443 break; 2444 2445 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts); 2446 if (status == -EINPROGRESS) 2447 break; 2448 if (status == -EIO) { 2449 ath_dbg(common, XMIT, "Error processing tx status\n"); 2450 break; 2451 } 2452 2453 /* Process beacon completions separately */ 2454 if (ts.qid == sc->beacon.beaconq) { 2455 sc->beacon.tx_processed = true; 2456 sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK); 2457 continue; 2458 } 2459 2460 txq = &sc->tx.txq[ts.qid]; 2461 2462 ath_txq_lock(sc, txq); 2463 2464 TX_STAT_INC(txq->axq_qnum, txprocdesc); 2465 2466 fifo_list = &txq->txq_fifo[txq->txq_tailidx]; 2467 if (list_empty(fifo_list)) { 2468 ath_txq_unlock(sc, txq); 2469 return; 2470 } 2471 2472 bf = list_first_entry(fifo_list, struct ath_buf, list); 2473 if (bf->bf_stale) { 2474 list_del(&bf->list); 2475 ath_tx_return_buffer(sc, bf); 2476 bf = list_first_entry(fifo_list, struct ath_buf, list); 2477 } 2478 2479 lastbf = bf->bf_lastbf; 2480 2481 INIT_LIST_HEAD(&bf_head); 2482 if (list_is_last(&lastbf->list, fifo_list)) { 2483 list_splice_tail_init(fifo_list, &bf_head); 2484 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH); 2485 2486 if (!list_empty(&txq->axq_q)) { 2487 struct list_head bf_q; 2488 2489 INIT_LIST_HEAD(&bf_q); 2490 txq->axq_link = NULL; 2491 list_splice_tail_init(&txq->axq_q, &bf_q); 2492 ath_tx_txqaddbuf(sc, txq, &bf_q, true); 2493 } 2494 } else { 2495 lastbf->bf_stale = true; 2496 if (bf != lastbf) 2497 list_cut_position(&bf_head, fifo_list, 2498 lastbf->list.prev); 2499 } 2500 2501 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head); 2502 ath_txq_unlock_complete(sc, txq); 2503 } 2504 } 2505 2506 /*****************/ 2507 /* Init, Cleanup */ 2508 /*****************/ 2509 2510 static int ath_txstatus_setup(struct ath_softc *sc, int size) 2511 { 2512 struct ath_descdma *dd = &sc->txsdma; 2513 u8 txs_len = sc->sc_ah->caps.txs_len; 2514 2515 dd->dd_desc_len = size * txs_len; 2516 dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len, 2517 &dd->dd_desc_paddr, GFP_KERNEL); 2518 if (!dd->dd_desc) 2519 return -ENOMEM; 2520 2521 return 0; 2522 } 2523 2524 static int ath_tx_edma_init(struct ath_softc *sc) 2525 { 2526 int err; 2527 2528 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE); 2529 if (!err) 2530 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc, 2531 sc->txsdma.dd_desc_paddr, 2532 ATH_TXSTATUS_RING_SIZE); 2533 2534 return err; 2535 } 2536 2537 int ath_tx_init(struct ath_softc *sc, int nbufs) 2538 { 2539 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2540 int error = 0; 2541 2542 spin_lock_init(&sc->tx.txbuflock); 2543 2544 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, 2545 "tx", nbufs, 1, 1); 2546 if (error != 0) { 2547 ath_err(common, 2548 "Failed to allocate tx descriptors: %d\n", error); 2549 return error; 2550 } 2551 2552 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf, 2553 "beacon", ATH_BCBUF, 1, 1); 2554 if (error != 0) { 2555 ath_err(common, 2556 "Failed to allocate beacon descriptors: %d\n", error); 2557 return error; 2558 } 2559 2560 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work); 2561 2562 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 2563 error = ath_tx_edma_init(sc); 2564 2565 return error; 2566 } 2567 2568 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) 2569 { 2570 struct ath_atx_tid *tid; 2571 struct ath_atx_ac *ac; 2572 int tidno, acno; 2573 2574 for (tidno = 0, tid = &an->tid[tidno]; 2575 tidno < IEEE80211_NUM_TIDS; 2576 tidno++, tid++) { 2577 tid->an = an; 2578 tid->tidno = tidno; 2579 tid->seq_start = tid->seq_next = 0; 2580 tid->baw_size = WME_MAX_BA; 2581 tid->baw_head = tid->baw_tail = 0; 2582 tid->sched = false; 2583 tid->paused = false; 2584 tid->active = false; 2585 __skb_queue_head_init(&tid->buf_q); 2586 acno = TID_TO_WME_AC(tidno); 2587 tid->ac = &an->ac[acno]; 2588 } 2589 2590 for (acno = 0, ac = &an->ac[acno]; 2591 acno < IEEE80211_NUM_ACS; acno++, ac++) { 2592 ac->sched = false; 2593 ac->txq = sc->tx.txq_map[acno]; 2594 INIT_LIST_HEAD(&ac->tid_q); 2595 } 2596 } 2597 2598 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) 2599 { 2600 struct ath_atx_ac *ac; 2601 struct ath_atx_tid *tid; 2602 struct ath_txq *txq; 2603 int tidno; 2604 2605 for (tidno = 0, tid = &an->tid[tidno]; 2606 tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { 2607 2608 ac = tid->ac; 2609 txq = ac->txq; 2610 2611 ath_txq_lock(sc, txq); 2612 2613 if (tid->sched) { 2614 list_del(&tid->list); 2615 tid->sched = false; 2616 } 2617 2618 if (ac->sched) { 2619 list_del(&ac->list); 2620 tid->ac->sched = false; 2621 } 2622 2623 ath_tid_drain(sc, txq, tid); 2624 tid->active = false; 2625 2626 ath_txq_unlock(sc, txq); 2627 } 2628 } 2629