1 /*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 13 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 14 * redistribution must be conditioned upon including a substantially 15 * similar Disclaimer requirement for further binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGES. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * Driver for the Atheros Wireless LAN controller. 36 * 37 * This software is derived from work of Atsushi Onoe; his contribution 38 * is greatly appreciated. 39 */ 40 41 #include "opt_inet.h" 42 #include "opt_ath.h" 43 #include "opt_wlan.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/sysctl.h> 48 #include <sys/mbuf.h> 49 #include <sys/malloc.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/kernel.h> 53 #include <sys/socket.h> 54 #include <sys/sockio.h> 55 #include <sys/errno.h> 56 #include <sys/callout.h> 57 #include <sys/bus.h> 58 #include <sys/endian.h> 59 #include <sys/kthread.h> 60 #include <sys/taskqueue.h> 61 #include <sys/priv.h> 62 #include <sys/ktr.h> 63 64 #include <machine/bus.h> 65 66 #include <net/if.h> 67 #include <net/if_var.h> 68 #include <net/if_dl.h> 69 #include <net/if_media.h> 70 #include <net/if_types.h> 71 #include <net/if_arp.h> 72 #include <net/ethernet.h> 73 #include <net/if_llc.h> 74 75 #include <net80211/ieee80211_var.h> 76 #include <net80211/ieee80211_regdomain.h> 77 #ifdef IEEE80211_SUPPORT_SUPERG 78 #include <net80211/ieee80211_superg.h> 79 #endif 80 #ifdef IEEE80211_SUPPORT_TDMA 81 #include <net80211/ieee80211_tdma.h> 82 #endif 83 #include <net80211/ieee80211_ht.h> 84 85 #include <net/bpf.h> 86 87 #ifdef INET 88 #include <netinet/in.h> 89 #include <netinet/if_ether.h> 90 #endif 91 92 #include <dev/ath/if_athvar.h> 93 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 94 #include <dev/ath/ath_hal/ah_diagcodes.h> 95 96 #include <dev/ath/if_ath_debug.h> 97 98 #ifdef ATH_TX99_DIAG 99 #include <dev/ath/ath_tx99/ath_tx99.h> 100 #endif 101 102 #include <dev/ath/if_ath_misc.h> 103 #include <dev/ath/if_ath_tx.h> 104 #include <dev/ath/if_ath_tx_ht.h> 105 106 #ifdef ATH_DEBUG_ALQ 107 #include <dev/ath/if_ath_alq.h> 108 #endif 109 110 /* 111 * How many retries to perform in software 112 */ 113 #define SWMAX_RETRIES 10 114 115 /* 116 * What queue to throw the non-QoS TID traffic into 117 */ 118 #define ATH_NONQOS_TID_AC WME_AC_VO 119 120 #if 0 121 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an); 122 #endif 123 static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, 124 int tid); 125 static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, 126 int tid); 127 static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc, 128 struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0); 129 static int ath_tx_action_frame_override_queue(struct ath_softc *sc, 130 struct ieee80211_node *ni, struct mbuf *m0, int *tid); 131 static struct ath_buf * 132 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 133 struct ath_tid *tid, struct ath_buf *bf); 134 135 #ifdef ATH_DEBUG_ALQ 136 void 137 ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first) 138 { 139 struct ath_buf *bf; 140 int i, n; 141 const char *ds; 142 143 /* XXX we should skip out early if debugging isn't enabled! */ 144 bf = bf_first; 145 146 while (bf != NULL) { 147 /* XXX should ensure bf_nseg > 0! */ 148 if (bf->bf_nseg == 0) 149 break; 150 n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1; 151 for (i = 0, ds = (const char *) bf->bf_desc; 152 i < n; 153 i++, ds += sc->sc_tx_desclen) { 154 if_ath_alq_post(&sc->sc_alq, 155 ATH_ALQ_EDMA_TXDESC, 156 sc->sc_tx_desclen, 157 ds); 158 } 159 bf = bf->bf_next; 160 } 161 } 162 #endif /* ATH_DEBUG_ALQ */ 163 164 /* 165 * Whether to use the 11n rate scenario functions or not 166 */ 167 static inline int 168 ath_tx_is_11n(struct ath_softc *sc) 169 { 170 return ((sc->sc_ah->ah_magic == 0x20065416) || 171 (sc->sc_ah->ah_magic == 0x19741014)); 172 } 173 174 /* 175 * Obtain the current TID from the given frame. 176 * 177 * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.) 178 * This has implications for which AC/priority the packet is placed 179 * in. 180 */ 181 static int 182 ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0) 183 { 184 const struct ieee80211_frame *wh; 185 int pri = M_WME_GETAC(m0); 186 187 wh = mtod(m0, const struct ieee80211_frame *); 188 if (! IEEE80211_QOS_HAS_SEQ(wh)) 189 return IEEE80211_NONQOS_TID; 190 else 191 return WME_AC_TO_TID(pri); 192 } 193 194 static void 195 ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) 196 { 197 struct ieee80211_frame *wh; 198 199 wh = mtod(bf->bf_m, struct ieee80211_frame *); 200 /* Only update/resync if needed */ 201 if (bf->bf_state.bfs_isretried == 0) { 202 wh->i_fc[1] |= IEEE80211_FC1_RETRY; 203 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 204 BUS_DMASYNC_PREWRITE); 205 } 206 bf->bf_state.bfs_isretried = 1; 207 bf->bf_state.bfs_retries ++; 208 } 209 210 /* 211 * Determine what the correct AC queue for the given frame 212 * should be. 213 * 214 * This code assumes that the TIDs map consistently to 215 * the underlying hardware (or software) ath_txq. 216 * Since the sender may try to set an AC which is 217 * arbitrary, non-QoS TIDs may end up being put on 218 * completely different ACs. There's no way to put a 219 * TID into multiple ath_txq's for scheduling, so 220 * for now we override the AC/TXQ selection and set 221 * non-QOS TID frames into the BE queue. 222 * 223 * This may be completely incorrect - specifically, 224 * some management frames may end up out of order 225 * compared to the QoS traffic they're controlling. 226 * I'll look into this later. 227 */ 228 static int 229 ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0) 230 { 231 const struct ieee80211_frame *wh; 232 int pri = M_WME_GETAC(m0); 233 wh = mtod(m0, const struct ieee80211_frame *); 234 if (IEEE80211_QOS_HAS_SEQ(wh)) 235 return pri; 236 237 return ATH_NONQOS_TID_AC; 238 } 239 240 void 241 ath_txfrag_cleanup(struct ath_softc *sc, 242 ath_bufhead *frags, struct ieee80211_node *ni) 243 { 244 struct ath_buf *bf, *next; 245 246 ATH_TXBUF_LOCK_ASSERT(sc); 247 248 TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) { 249 /* NB: bf assumed clean */ 250 TAILQ_REMOVE(frags, bf, bf_list); 251 ath_returnbuf_head(sc, bf); 252 ieee80211_node_decref(ni); 253 } 254 } 255 256 /* 257 * Setup xmit of a fragmented frame. Allocate a buffer 258 * for each frag and bump the node reference count to 259 * reflect the held reference to be setup by ath_tx_start. 260 */ 261 int 262 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, 263 struct mbuf *m0, struct ieee80211_node *ni) 264 { 265 struct mbuf *m; 266 struct ath_buf *bf; 267 268 ATH_TXBUF_LOCK(sc); 269 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { 270 /* XXX non-management? */ 271 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 272 if (bf == NULL) { /* out of buffers, cleanup */ 273 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n", 274 __func__); 275 ath_txfrag_cleanup(sc, frags, ni); 276 break; 277 } 278 ieee80211_node_incref(ni); 279 TAILQ_INSERT_TAIL(frags, bf, bf_list); 280 } 281 ATH_TXBUF_UNLOCK(sc); 282 283 return !TAILQ_EMPTY(frags); 284 } 285 286 /* 287 * Reclaim mbuf resources. For fragmented frames we 288 * need to claim each frag chained with m_nextpkt. 289 */ 290 void 291 ath_freetx(struct mbuf *m) 292 { 293 struct mbuf *next; 294 295 do { 296 next = m->m_nextpkt; 297 m->m_nextpkt = NULL; 298 m_freem(m); 299 } while ((m = next) != NULL); 300 } 301 302 static int 303 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) 304 { 305 struct mbuf *m; 306 int error; 307 308 /* 309 * Load the DMA map so any coalescing is done. This 310 * also calculates the number of descriptors we need. 311 */ 312 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 313 bf->bf_segs, &bf->bf_nseg, 314 BUS_DMA_NOWAIT); 315 if (error == EFBIG) { 316 /* XXX packet requires too many descriptors */ 317 bf->bf_nseg = ATH_MAX_SCATTER + 1; 318 } else if (error != 0) { 319 sc->sc_stats.ast_tx_busdma++; 320 ath_freetx(m0); 321 return error; 322 } 323 /* 324 * Discard null packets and check for packets that 325 * require too many TX descriptors. We try to convert 326 * the latter to a cluster. 327 */ 328 if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */ 329 sc->sc_stats.ast_tx_linear++; 330 m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER); 331 if (m == NULL) { 332 ath_freetx(m0); 333 sc->sc_stats.ast_tx_nombuf++; 334 return ENOMEM; 335 } 336 m0 = m; 337 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 338 bf->bf_segs, &bf->bf_nseg, 339 BUS_DMA_NOWAIT); 340 if (error != 0) { 341 sc->sc_stats.ast_tx_busdma++; 342 ath_freetx(m0); 343 return error; 344 } 345 KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER, 346 ("too many segments after defrag; nseg %u", bf->bf_nseg)); 347 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 348 sc->sc_stats.ast_tx_nodata++; 349 ath_freetx(m0); 350 return EIO; 351 } 352 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", 353 __func__, m0, m0->m_pkthdr.len); 354 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 355 bf->bf_m = m0; 356 357 return 0; 358 } 359 360 /* 361 * Chain together segments+descriptors for a frame - 11n or otherwise. 362 * 363 * For aggregates, this is called on each frame in the aggregate. 364 */ 365 static void 366 ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0, 367 struct ath_buf *bf, int is_aggr, int is_first_subframe, 368 int is_last_subframe) 369 { 370 struct ath_hal *ah = sc->sc_ah; 371 char *ds; 372 int i, bp, dsp; 373 HAL_DMA_ADDR bufAddrList[4]; 374 uint32_t segLenList[4]; 375 int numTxMaps = 1; 376 int isFirstDesc = 1; 377 378 /* 379 * XXX There's txdma and txdma_mgmt; the descriptor 380 * sizes must match. 381 */ 382 struct ath_descdma *dd = &sc->sc_txdma; 383 384 /* 385 * Fillin the remainder of the descriptor info. 386 */ 387 388 /* 389 * We need the number of TX data pointers in each descriptor. 390 * EDMA and later chips support 4 TX buffers per descriptor; 391 * previous chips just support one. 392 */ 393 numTxMaps = sc->sc_tx_nmaps; 394 395 /* 396 * For EDMA and later chips ensure the TX map is fully populated 397 * before advancing to the next descriptor. 398 */ 399 ds = (char *) bf->bf_desc; 400 bp = dsp = 0; 401 bzero(bufAddrList, sizeof(bufAddrList)); 402 bzero(segLenList, sizeof(segLenList)); 403 for (i = 0; i < bf->bf_nseg; i++) { 404 bufAddrList[bp] = bf->bf_segs[i].ds_addr; 405 segLenList[bp] = bf->bf_segs[i].ds_len; 406 bp++; 407 408 /* 409 * Go to the next segment if this isn't the last segment 410 * and there's space in the current TX map. 411 */ 412 if ((i != bf->bf_nseg - 1) && (bp < numTxMaps)) 413 continue; 414 415 /* 416 * Last segment or we're out of buffer pointers. 417 */ 418 bp = 0; 419 420 if (i == bf->bf_nseg - 1) 421 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0); 422 else 423 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 424 bf->bf_daddr + dd->dd_descsize * (dsp + 1)); 425 426 /* 427 * XXX This assumes that bfs_txq is the actual destination 428 * hardware queue at this point. It may not have been 429 * assigned, it may actually be pointing to the multicast 430 * software TXQ id. These must be fixed! 431 */ 432 ath_hal_filltxdesc(ah, (struct ath_desc *) ds 433 , bufAddrList 434 , segLenList 435 , bf->bf_descid /* XXX desc id */ 436 , bf->bf_state.bfs_tx_queue 437 , isFirstDesc /* first segment */ 438 , i == bf->bf_nseg - 1 /* last segment */ 439 , (struct ath_desc *) ds0 /* first descriptor */ 440 ); 441 442 /* 443 * Make sure the 11n aggregate fields are cleared. 444 * 445 * XXX TODO: this doesn't need to be called for 446 * aggregate frames; as it'll be called on all 447 * sub-frames. Since the descriptors are in 448 * non-cacheable memory, this leads to some 449 * rather slow writes on MIPS/ARM platforms. 450 */ 451 if (ath_tx_is_11n(sc)) 452 ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds); 453 454 /* 455 * If 11n is enabled, set it up as if it's an aggregate 456 * frame. 457 */ 458 if (is_last_subframe) { 459 ath_hal_set11n_aggr_last(sc->sc_ah, 460 (struct ath_desc *) ds); 461 } else if (is_aggr) { 462 /* 463 * This clears the aggrlen field; so 464 * the caller needs to call set_aggr_first()! 465 * 466 * XXX TODO: don't call this for the first 467 * descriptor in the first frame in an 468 * aggregate! 469 */ 470 ath_hal_set11n_aggr_middle(sc->sc_ah, 471 (struct ath_desc *) ds, 472 bf->bf_state.bfs_ndelim); 473 } 474 isFirstDesc = 0; 475 bf->bf_lastds = (struct ath_desc *) ds; 476 477 /* 478 * Don't forget to skip to the next descriptor. 479 */ 480 ds += sc->sc_tx_desclen; 481 dsp++; 482 483 /* 484 * .. and don't forget to blank these out! 485 */ 486 bzero(bufAddrList, sizeof(bufAddrList)); 487 bzero(segLenList, sizeof(segLenList)); 488 } 489 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 490 } 491 492 /* 493 * Set the rate control fields in the given descriptor based on 494 * the bf_state fields and node state. 495 * 496 * The bfs fields should already be set with the relevant rate 497 * control information, including whether MRR is to be enabled. 498 * 499 * Since the FreeBSD HAL currently sets up the first TX rate 500 * in ath_hal_setuptxdesc(), this will setup the MRR 501 * conditionally for the pre-11n chips, and call ath_buf_set_rate 502 * unconditionally for 11n chips. These require the 11n rate 503 * scenario to be set if MCS rates are enabled, so it's easier 504 * to just always call it. The caller can then only set rates 2, 3 505 * and 4 if multi-rate retry is needed. 506 */ 507 static void 508 ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 509 struct ath_buf *bf) 510 { 511 struct ath_rc_series *rc = bf->bf_state.bfs_rc; 512 513 /* If mrr is disabled, blank tries 1, 2, 3 */ 514 if (! bf->bf_state.bfs_ismrr) 515 rc[1].tries = rc[2].tries = rc[3].tries = 0; 516 517 #if 0 518 /* 519 * If NOACK is set, just set ntries=1. 520 */ 521 else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) { 522 rc[1].tries = rc[2].tries = rc[3].tries = 0; 523 rc[0].tries = 1; 524 } 525 #endif 526 527 /* 528 * Always call - that way a retried descriptor will 529 * have the MRR fields overwritten. 530 * 531 * XXX TODO: see if this is really needed - setting up 532 * the first descriptor should set the MRR fields to 0 533 * for us anyway. 534 */ 535 if (ath_tx_is_11n(sc)) { 536 ath_buf_set_rate(sc, ni, bf); 537 } else { 538 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc 539 , rc[1].ratecode, rc[1].tries 540 , rc[2].ratecode, rc[2].tries 541 , rc[3].ratecode, rc[3].tries 542 ); 543 } 544 } 545 546 /* 547 * Setup segments+descriptors for an 11n aggregate. 548 * bf_first is the first buffer in the aggregate. 549 * The descriptor list must already been linked together using 550 * bf->bf_next. 551 */ 552 static void 553 ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first) 554 { 555 struct ath_buf *bf, *bf_prev = NULL; 556 struct ath_desc *ds0 = bf_first->bf_desc; 557 558 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n", 559 __func__, bf_first->bf_state.bfs_nframes, 560 bf_first->bf_state.bfs_al); 561 562 bf = bf_first; 563 564 if (bf->bf_state.bfs_txrate0 == 0) 565 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n", 566 __func__, bf, 0); 567 if (bf->bf_state.bfs_rc[0].ratecode == 0) 568 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n", 569 __func__, bf, 0); 570 571 /* 572 * Setup all descriptors of all subframes - this will 573 * call ath_hal_set11naggrmiddle() on every frame. 574 */ 575 while (bf != NULL) { 576 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 577 "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n", 578 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen, 579 SEQNO(bf->bf_state.bfs_seqno)); 580 581 /* 582 * Setup the initial fields for the first descriptor - all 583 * the non-11n specific stuff. 584 */ 585 ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc 586 , bf->bf_state.bfs_pktlen /* packet length */ 587 , bf->bf_state.bfs_hdrlen /* header length */ 588 , bf->bf_state.bfs_atype /* Atheros packet type */ 589 , bf->bf_state.bfs_txpower /* txpower */ 590 , bf->bf_state.bfs_txrate0 591 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 592 , bf->bf_state.bfs_keyix /* key cache index */ 593 , bf->bf_state.bfs_txantenna /* antenna mode */ 594 , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */ 595 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 596 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 597 ); 598 599 /* 600 * First descriptor? Setup the rate control and initial 601 * aggregate header information. 602 */ 603 if (bf == bf_first) { 604 /* 605 * setup first desc with rate and aggr info 606 */ 607 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 608 } 609 610 /* 611 * Setup the descriptors for a multi-descriptor frame. 612 * This is both aggregate and non-aggregate aware. 613 */ 614 ath_tx_chaindesclist(sc, ds0, bf, 615 1, /* is_aggr */ 616 !! (bf == bf_first), /* is_first_subframe */ 617 !! (bf->bf_next == NULL) /* is_last_subframe */ 618 ); 619 620 if (bf == bf_first) { 621 /* 622 * Initialise the first 11n aggregate with the 623 * aggregate length and aggregate enable bits. 624 */ 625 ath_hal_set11n_aggr_first(sc->sc_ah, 626 ds0, 627 bf->bf_state.bfs_al, 628 bf->bf_state.bfs_ndelim); 629 } 630 631 /* 632 * Link the last descriptor of the previous frame 633 * to the beginning descriptor of this frame. 634 */ 635 if (bf_prev != NULL) 636 ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds, 637 bf->bf_daddr); 638 639 /* Save a copy so we can link the next descriptor in */ 640 bf_prev = bf; 641 bf = bf->bf_next; 642 } 643 644 /* 645 * Set the first descriptor bf_lastds field to point to 646 * the last descriptor in the last subframe, that's where 647 * the status update will occur. 648 */ 649 bf_first->bf_lastds = bf_prev->bf_lastds; 650 651 /* 652 * And bf_last in the first descriptor points to the end of 653 * the aggregate list. 654 */ 655 bf_first->bf_last = bf_prev; 656 657 /* 658 * For non-AR9300 NICs, which require the rate control 659 * in the final descriptor - let's set that up now. 660 * 661 * This is because the filltxdesc() HAL call doesn't 662 * populate the last segment with rate control information 663 * if firstSeg is also true. For non-aggregate frames 664 * that is fine, as the first frame already has rate control 665 * info. But if the last frame in an aggregate has one 666 * descriptor, both firstseg and lastseg will be true and 667 * the rate info isn't copied. 668 * 669 * This is inefficient on MIPS/ARM platforms that have 670 * non-cachable memory for TX descriptors, but we'll just 671 * make do for now. 672 * 673 * As to why the rate table is stashed in the last descriptor 674 * rather than the first descriptor? Because proctxdesc() 675 * is called on the final descriptor in an MPDU or A-MPDU - 676 * ie, the one that gets updated by the hardware upon 677 * completion. That way proctxdesc() doesn't need to know 678 * about the first _and_ last TX descriptor. 679 */ 680 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0); 681 682 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__); 683 } 684 685 /* 686 * Hand-off a frame to the multicast TX queue. 687 * 688 * This is a software TXQ which will be appended to the CAB queue 689 * during the beacon setup code. 690 * 691 * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID 692 * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated 693 * with the actual hardware txq, or all of this will fall apart. 694 * 695 * XXX It may not be a bad idea to just stuff the QCU ID into bf_state 696 * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated 697 * correctly. 698 */ 699 static void 700 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, 701 struct ath_buf *bf) 702 { 703 ATH_TX_LOCK_ASSERT(sc); 704 705 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 706 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 707 708 /* 709 * Ensure that the tx queue is the cabq, so things get 710 * mapped correctly. 711 */ 712 if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) { 713 DPRINTF(sc, ATH_DEBUG_XMIT, 714 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", 715 __func__, bf, bf->bf_state.bfs_tx_queue, 716 txq->axq_qnum); 717 } 718 719 ATH_TXQ_LOCK(txq); 720 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) { 721 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s); 722 struct ieee80211_frame *wh; 723 724 /* mark previous frame */ 725 wh = mtod(bf_last->bf_m, struct ieee80211_frame *); 726 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 727 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, 728 BUS_DMASYNC_PREWRITE); 729 730 /* link descriptor */ 731 ath_hal_settxdesclink(sc->sc_ah, 732 bf_last->bf_lastds, 733 bf->bf_daddr); 734 } 735 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 736 ATH_TXQ_UNLOCK(txq); 737 } 738 739 /* 740 * Hand-off packet to a hardware queue. 741 */ 742 static void 743 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, 744 struct ath_buf *bf) 745 { 746 struct ath_hal *ah = sc->sc_ah; 747 struct ath_buf *bf_first; 748 749 /* 750 * Insert the frame on the outbound list and pass it on 751 * to the hardware. Multicast frames buffered for power 752 * save stations and transmit from the CAB queue are stored 753 * on a s/w only queue and loaded on to the CAB queue in 754 * the SWBA handler since frames only go out on DTIM and 755 * to avoid possible races. 756 */ 757 ATH_TX_LOCK_ASSERT(sc); 758 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 759 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 760 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ, 761 ("ath_tx_handoff_hw called for mcast queue")); 762 763 /* 764 * XXX racy, should hold the PCU lock when checking this, 765 * and also should ensure that the TX counter is >0! 766 */ 767 KASSERT((sc->sc_inreset_cnt == 0), 768 ("%s: TX during reset?\n", __func__)); 769 770 #if 0 771 /* 772 * This causes a LOR. Find out where the PCU lock is being 773 * held whilst the TXQ lock is grabbed - that shouldn't 774 * be occuring. 775 */ 776 ATH_PCU_LOCK(sc); 777 if (sc->sc_inreset_cnt) { 778 ATH_PCU_UNLOCK(sc); 779 DPRINTF(sc, ATH_DEBUG_RESET, 780 "%s: called with sc_in_reset != 0\n", 781 __func__); 782 DPRINTF(sc, ATH_DEBUG_XMIT, 783 "%s: queued: TXDP[%u] = %p (%p) depth %d\n", 784 __func__, txq->axq_qnum, 785 (caddr_t)bf->bf_daddr, bf->bf_desc, 786 txq->axq_depth); 787 /* XXX axq_link needs to be set and updated! */ 788 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 789 if (bf->bf_state.bfs_aggr) 790 txq->axq_aggr_depth++; 791 return; 792 } 793 ATH_PCU_UNLOCK(sc); 794 #endif 795 796 ATH_TXQ_LOCK(txq); 797 798 /* 799 * XXX TODO: if there's a holdingbf, then 800 * ATH_TXQ_PUTRUNNING should be clear. 801 * 802 * If there is a holdingbf and the list is empty, 803 * then axq_link should be pointing to the holdingbf. 804 * 805 * Otherwise it should point to the last descriptor 806 * in the last ath_buf. 807 * 808 * In any case, we should really ensure that we 809 * update the previous descriptor link pointer to 810 * this descriptor, regardless of all of the above state. 811 * 812 * For now this is captured by having axq_link point 813 * to either the holdingbf (if the TXQ list is empty) 814 * or the end of the list (if the TXQ list isn't empty.) 815 * I'd rather just kill axq_link here and do it as above. 816 */ 817 818 /* 819 * Append the frame to the TX queue. 820 */ 821 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 822 ATH_KTR(sc, ATH_KTR_TX, 3, 823 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p " 824 "depth=%d", 825 txq->axq_qnum, 826 bf, 827 txq->axq_depth); 828 829 /* 830 * If there's a link pointer, update it. 831 * 832 * XXX we should replace this with the above logic, just 833 * to kill axq_link with fire. 834 */ 835 if (txq->axq_link != NULL) { 836 *txq->axq_link = bf->bf_daddr; 837 DPRINTF(sc, ATH_DEBUG_XMIT, 838 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 839 txq->axq_qnum, txq->axq_link, 840 (caddr_t)bf->bf_daddr, bf->bf_desc, 841 txq->axq_depth); 842 ATH_KTR(sc, ATH_KTR_TX, 5, 843 "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) " 844 "lastds=%d", 845 txq->axq_qnum, txq->axq_link, 846 (caddr_t)bf->bf_daddr, bf->bf_desc, 847 bf->bf_lastds); 848 } 849 850 /* 851 * If we've not pushed anything into the hardware yet, 852 * push the head of the queue into the TxDP. 853 * 854 * Once we've started DMA, there's no guarantee that 855 * updating the TxDP with a new value will actually work. 856 * So we just don't do that - if we hit the end of the list, 857 * we keep that buffer around (the "holding buffer") and 858 * re-start DMA by updating the link pointer of _that_ 859 * descriptor and then restart DMA. 860 */ 861 if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) { 862 bf_first = TAILQ_FIRST(&txq->axq_q); 863 txq->axq_flags |= ATH_TXQ_PUTRUNNING; 864 ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr); 865 DPRINTF(sc, ATH_DEBUG_XMIT, 866 "%s: TXDP[%u] = %p (%p) depth %d\n", 867 __func__, txq->axq_qnum, 868 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, 869 txq->axq_depth); 870 ATH_KTR(sc, ATH_KTR_TX, 5, 871 "ath_tx_handoff: TXDP[%u] = %p (%p) " 872 "lastds=%p depth %d", 873 txq->axq_qnum, 874 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, 875 bf_first->bf_lastds, 876 txq->axq_depth); 877 } 878 879 /* 880 * Ensure that the bf TXQ matches this TXQ, so later 881 * checking and holding buffer manipulation is sane. 882 */ 883 if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) { 884 DPRINTF(sc, ATH_DEBUG_XMIT, 885 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", 886 __func__, bf, bf->bf_state.bfs_tx_queue, 887 txq->axq_qnum); 888 } 889 890 /* 891 * Track aggregate queue depth. 892 */ 893 if (bf->bf_state.bfs_aggr) 894 txq->axq_aggr_depth++; 895 896 /* 897 * Update the link pointer. 898 */ 899 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link); 900 901 /* 902 * Start DMA. 903 * 904 * If we wrote a TxDP above, DMA will start from here. 905 * 906 * If DMA is running, it'll do nothing. 907 * 908 * If the DMA engine hit the end of the QCU list (ie LINK=NULL, 909 * or VEOL) then it stops at the last transmitted write. 910 * We then append a new frame by updating the link pointer 911 * in that descriptor and then kick TxE here; it will re-read 912 * that last descriptor and find the new descriptor to transmit. 913 * 914 * This is why we keep the holding descriptor around. 915 */ 916 ath_hal_txstart(ah, txq->axq_qnum); 917 ATH_TXQ_UNLOCK(txq); 918 ATH_KTR(sc, ATH_KTR_TX, 1, 919 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum); 920 } 921 922 /* 923 * Restart TX DMA for the given TXQ. 924 * 925 * This must be called whether the queue is empty or not. 926 */ 927 static void 928 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq) 929 { 930 struct ath_buf *bf, *bf_last; 931 932 ATH_TXQ_LOCK_ASSERT(txq); 933 934 /* XXX make this ATH_TXQ_FIRST */ 935 bf = TAILQ_FIRST(&txq->axq_q); 936 bf_last = ATH_TXQ_LAST(txq, axq_q_s); 937 938 if (bf == NULL) 939 return; 940 941 DPRINTF(sc, ATH_DEBUG_RESET, 942 "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n", 943 __func__, 944 txq->axq_qnum, 945 bf, 946 bf_last, 947 (uint32_t) bf->bf_daddr); 948 949 #ifdef ATH_DEBUG 950 if (sc->sc_debug & ATH_DEBUG_RESET) 951 ath_tx_dump(sc, txq); 952 #endif 953 954 /* 955 * This is called from a restart, so DMA is known to be 956 * completely stopped. 957 */ 958 KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)), 959 ("%s: Q%d: called with PUTRUNNING=1\n", 960 __func__, 961 txq->axq_qnum)); 962 963 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); 964 txq->axq_flags |= ATH_TXQ_PUTRUNNING; 965 966 ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds, 967 &txq->axq_link); 968 ath_hal_txstart(sc->sc_ah, txq->axq_qnum); 969 } 970 971 /* 972 * Hand off a packet to the hardware (or mcast queue.) 973 * 974 * The relevant hardware txq should be locked. 975 */ 976 static void 977 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, 978 struct ath_buf *bf) 979 { 980 ATH_TX_LOCK_ASSERT(sc); 981 982 #ifdef ATH_DEBUG_ALQ 983 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 984 ath_tx_alq_post(sc, bf); 985 #endif 986 987 if (txq->axq_qnum == ATH_TXQ_SWQ) 988 ath_tx_handoff_mcast(sc, txq, bf); 989 else 990 ath_tx_handoff_hw(sc, txq, bf); 991 } 992 993 static int 994 ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni, 995 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, 996 int *keyix) 997 { 998 DPRINTF(sc, ATH_DEBUG_XMIT, 999 "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n", 1000 __func__, 1001 *hdrlen, 1002 *pktlen, 1003 isfrag, 1004 iswep, 1005 m0); 1006 1007 if (iswep) { 1008 const struct ieee80211_cipher *cip; 1009 struct ieee80211_key *k; 1010 1011 /* 1012 * Construct the 802.11 header+trailer for an encrypted 1013 * frame. The only reason this can fail is because of an 1014 * unknown or unsupported cipher/key type. 1015 */ 1016 k = ieee80211_crypto_encap(ni, m0); 1017 if (k == NULL) { 1018 /* 1019 * This can happen when the key is yanked after the 1020 * frame was queued. Just discard the frame; the 1021 * 802.11 layer counts failures and provides 1022 * debugging/diagnostics. 1023 */ 1024 return (0); 1025 } 1026 /* 1027 * Adjust the packet + header lengths for the crypto 1028 * additions and calculate the h/w key index. When 1029 * a s/w mic is done the frame will have had any mic 1030 * added to it prior to entry so m0->m_pkthdr.len will 1031 * account for it. Otherwise we need to add it to the 1032 * packet length. 1033 */ 1034 cip = k->wk_cipher; 1035 (*hdrlen) += cip->ic_header; 1036 (*pktlen) += cip->ic_header + cip->ic_trailer; 1037 /* NB: frags always have any TKIP MIC done in s/w */ 1038 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) 1039 (*pktlen) += cip->ic_miclen; 1040 (*keyix) = k->wk_keyix; 1041 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { 1042 /* 1043 * Use station key cache slot, if assigned. 1044 */ 1045 (*keyix) = ni->ni_ucastkey.wk_keyix; 1046 if ((*keyix) == IEEE80211_KEYIX_NONE) 1047 (*keyix) = HAL_TXKEYIX_INVALID; 1048 } else 1049 (*keyix) = HAL_TXKEYIX_INVALID; 1050 1051 return (1); 1052 } 1053 1054 /* 1055 * Calculate whether interoperability protection is required for 1056 * this frame. 1057 * 1058 * This requires the rate control information be filled in, 1059 * as the protection requirement depends upon the current 1060 * operating mode / PHY. 1061 */ 1062 static void 1063 ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf) 1064 { 1065 struct ieee80211_frame *wh; 1066 uint8_t rix; 1067 uint16_t flags; 1068 int shortPreamble; 1069 const HAL_RATE_TABLE *rt = sc->sc_currates; 1070 struct ifnet *ifp = sc->sc_ifp; 1071 struct ieee80211com *ic = ifp->if_l2com; 1072 1073 flags = bf->bf_state.bfs_txflags; 1074 rix = bf->bf_state.bfs_rc[0].rix; 1075 shortPreamble = bf->bf_state.bfs_shpream; 1076 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1077 1078 /* 1079 * If 802.11g protection is enabled, determine whether 1080 * to use RTS/CTS or just CTS. Note that this is only 1081 * done for OFDM unicast frames. 1082 */ 1083 if ((ic->ic_flags & IEEE80211_F_USEPROT) && 1084 rt->info[rix].phy == IEEE80211_T_OFDM && 1085 (flags & HAL_TXDESC_NOACK) == 0) { 1086 bf->bf_state.bfs_doprot = 1; 1087 /* XXX fragments must use CCK rates w/ protection */ 1088 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { 1089 flags |= HAL_TXDESC_RTSENA; 1090 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { 1091 flags |= HAL_TXDESC_CTSENA; 1092 } 1093 /* 1094 * For frags it would be desirable to use the 1095 * highest CCK rate for RTS/CTS. But stations 1096 * farther away may detect it at a lower CCK rate 1097 * so use the configured protection rate instead 1098 * (for now). 1099 */ 1100 sc->sc_stats.ast_tx_protect++; 1101 } 1102 1103 /* 1104 * If 11n protection is enabled and it's a HT frame, 1105 * enable RTS. 1106 * 1107 * XXX ic_htprotmode or ic_curhtprotmode? 1108 * XXX should it_htprotmode only matter if ic_curhtprotmode 1109 * XXX indicates it's not a HT pure environment? 1110 */ 1111 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && 1112 rt->info[rix].phy == IEEE80211_T_HT && 1113 (flags & HAL_TXDESC_NOACK) == 0) { 1114 flags |= HAL_TXDESC_RTSENA; 1115 sc->sc_stats.ast_tx_htprotect++; 1116 } 1117 bf->bf_state.bfs_txflags = flags; 1118 } 1119 1120 /* 1121 * Update the frame duration given the currently selected rate. 1122 * 1123 * This also updates the frame duration value, so it will require 1124 * a DMA flush. 1125 */ 1126 static void 1127 ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf) 1128 { 1129 struct ieee80211_frame *wh; 1130 uint8_t rix; 1131 uint16_t flags; 1132 int shortPreamble; 1133 struct ath_hal *ah = sc->sc_ah; 1134 const HAL_RATE_TABLE *rt = sc->sc_currates; 1135 int isfrag = bf->bf_m->m_flags & M_FRAG; 1136 1137 flags = bf->bf_state.bfs_txflags; 1138 rix = bf->bf_state.bfs_rc[0].rix; 1139 shortPreamble = bf->bf_state.bfs_shpream; 1140 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1141 1142 /* 1143 * Calculate duration. This logically belongs in the 802.11 1144 * layer but it lacks sufficient information to calculate it. 1145 */ 1146 if ((flags & HAL_TXDESC_NOACK) == 0 && 1147 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 1148 u_int16_t dur; 1149 if (shortPreamble) 1150 dur = rt->info[rix].spAckDuration; 1151 else 1152 dur = rt->info[rix].lpAckDuration; 1153 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 1154 dur += dur; /* additional SIFS+ACK */ 1155 /* 1156 * Include the size of next fragment so NAV is 1157 * updated properly. The last fragment uses only 1158 * the ACK duration 1159 * 1160 * XXX TODO: ensure that the rate lookup for each 1161 * fragment is the same as the rate used by the 1162 * first fragment! 1163 */ 1164 dur += ath_hal_computetxtime(ah, 1165 rt, 1166 bf->bf_nextfraglen, 1167 rix, shortPreamble); 1168 } 1169 if (isfrag) { 1170 /* 1171 * Force hardware to use computed duration for next 1172 * fragment by disabling multi-rate retry which updates 1173 * duration based on the multi-rate duration table. 1174 */ 1175 bf->bf_state.bfs_ismrr = 0; 1176 bf->bf_state.bfs_try0 = ATH_TXMGTTRY; 1177 /* XXX update bfs_rc[0].try? */ 1178 } 1179 1180 /* Update the duration field itself */ 1181 *(u_int16_t *)wh->i_dur = htole16(dur); 1182 } 1183 } 1184 1185 static uint8_t 1186 ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt, 1187 int cix, int shortPreamble) 1188 { 1189 uint8_t ctsrate; 1190 1191 /* 1192 * CTS transmit rate is derived from the transmit rate 1193 * by looking in the h/w rate table. We must also factor 1194 * in whether or not a short preamble is to be used. 1195 */ 1196 /* NB: cix is set above where RTS/CTS is enabled */ 1197 KASSERT(cix != 0xff, ("cix not setup")); 1198 ctsrate = rt->info[cix].rateCode; 1199 1200 /* XXX this should only matter for legacy rates */ 1201 if (shortPreamble) 1202 ctsrate |= rt->info[cix].shortPreamble; 1203 1204 return (ctsrate); 1205 } 1206 1207 /* 1208 * Calculate the RTS/CTS duration for legacy frames. 1209 */ 1210 static int 1211 ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix, 1212 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt, 1213 int flags) 1214 { 1215 int ctsduration = 0; 1216 1217 /* This mustn't be called for HT modes */ 1218 if (rt->info[cix].phy == IEEE80211_T_HT) { 1219 printf("%s: HT rate where it shouldn't be (0x%x)\n", 1220 __func__, rt->info[cix].rateCode); 1221 return (-1); 1222 } 1223 1224 /* 1225 * Compute the transmit duration based on the frame 1226 * size and the size of an ACK frame. We call into the 1227 * HAL to do the computation since it depends on the 1228 * characteristics of the actual PHY being used. 1229 * 1230 * NB: CTS is assumed the same size as an ACK so we can 1231 * use the precalculated ACK durations. 1232 */ 1233 if (shortPreamble) { 1234 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1235 ctsduration += rt->info[cix].spAckDuration; 1236 ctsduration += ath_hal_computetxtime(ah, 1237 rt, pktlen, rix, AH_TRUE); 1238 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1239 ctsduration += rt->info[rix].spAckDuration; 1240 } else { 1241 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1242 ctsduration += rt->info[cix].lpAckDuration; 1243 ctsduration += ath_hal_computetxtime(ah, 1244 rt, pktlen, rix, AH_FALSE); 1245 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1246 ctsduration += rt->info[rix].lpAckDuration; 1247 } 1248 1249 return (ctsduration); 1250 } 1251 1252 /* 1253 * Update the given ath_buf with updated rts/cts setup and duration 1254 * values. 1255 * 1256 * To support rate lookups for each software retry, the rts/cts rate 1257 * and cts duration must be re-calculated. 1258 * 1259 * This function assumes the RTS/CTS flags have been set as needed; 1260 * mrr has been disabled; and the rate control lookup has been done. 1261 * 1262 * XXX TODO: MRR need only be disabled for the pre-11n NICs. 1263 * XXX The 11n NICs support per-rate RTS/CTS configuration. 1264 */ 1265 static void 1266 ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf) 1267 { 1268 uint16_t ctsduration = 0; 1269 uint8_t ctsrate = 0; 1270 uint8_t rix = bf->bf_state.bfs_rc[0].rix; 1271 uint8_t cix = 0; 1272 const HAL_RATE_TABLE *rt = sc->sc_currates; 1273 1274 /* 1275 * No RTS/CTS enabled? Don't bother. 1276 */ 1277 if ((bf->bf_state.bfs_txflags & 1278 (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) { 1279 /* XXX is this really needed? */ 1280 bf->bf_state.bfs_ctsrate = 0; 1281 bf->bf_state.bfs_ctsduration = 0; 1282 return; 1283 } 1284 1285 /* 1286 * If protection is enabled, use the protection rix control 1287 * rate. Otherwise use the rate0 control rate. 1288 */ 1289 if (bf->bf_state.bfs_doprot) 1290 rix = sc->sc_protrix; 1291 else 1292 rix = bf->bf_state.bfs_rc[0].rix; 1293 1294 /* 1295 * If the raw path has hard-coded ctsrate0 to something, 1296 * use it. 1297 */ 1298 if (bf->bf_state.bfs_ctsrate0 != 0) 1299 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0); 1300 else 1301 /* Control rate from above */ 1302 cix = rt->info[rix].controlRate; 1303 1304 /* Calculate the rtscts rate for the given cix */ 1305 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix, 1306 bf->bf_state.bfs_shpream); 1307 1308 /* The 11n chipsets do ctsduration calculations for you */ 1309 if (! ath_tx_is_11n(sc)) 1310 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix, 1311 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, 1312 rt, bf->bf_state.bfs_txflags); 1313 1314 /* Squirrel away in ath_buf */ 1315 bf->bf_state.bfs_ctsrate = ctsrate; 1316 bf->bf_state.bfs_ctsduration = ctsduration; 1317 1318 /* 1319 * Must disable multi-rate retry when using RTS/CTS. 1320 */ 1321 if (!sc->sc_mrrprot) { 1322 bf->bf_state.bfs_ismrr = 0; 1323 bf->bf_state.bfs_try0 = 1324 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */ 1325 } 1326 } 1327 1328 /* 1329 * Setup the descriptor chain for a normal or fast-frame 1330 * frame. 1331 * 1332 * XXX TODO: extend to include the destination hardware QCU ID. 1333 * Make sure that is correct. Make sure that when being added 1334 * to the mcastq, the CABQ QCUID is set or things will get a bit 1335 * odd. 1336 */ 1337 static void 1338 ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf) 1339 { 1340 struct ath_desc *ds = bf->bf_desc; 1341 struct ath_hal *ah = sc->sc_ah; 1342 1343 if (bf->bf_state.bfs_txrate0 == 0) 1344 DPRINTF(sc, ATH_DEBUG_XMIT, 1345 "%s: bf=%p, txrate0=%d\n", __func__, bf, 0); 1346 1347 ath_hal_setuptxdesc(ah, ds 1348 , bf->bf_state.bfs_pktlen /* packet length */ 1349 , bf->bf_state.bfs_hdrlen /* header length */ 1350 , bf->bf_state.bfs_atype /* Atheros packet type */ 1351 , bf->bf_state.bfs_txpower /* txpower */ 1352 , bf->bf_state.bfs_txrate0 1353 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 1354 , bf->bf_state.bfs_keyix /* key cache index */ 1355 , bf->bf_state.bfs_txantenna /* antenna mode */ 1356 , bf->bf_state.bfs_txflags /* flags */ 1357 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 1358 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 1359 ); 1360 1361 /* 1362 * This will be overriden when the descriptor chain is written. 1363 */ 1364 bf->bf_lastds = ds; 1365 bf->bf_last = bf; 1366 1367 /* Set rate control and descriptor chain for this frame */ 1368 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 1369 ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0); 1370 } 1371 1372 /* 1373 * Do a rate lookup. 1374 * 1375 * This performs a rate lookup for the given ath_buf only if it's required. 1376 * Non-data frames and raw frames don't require it. 1377 * 1378 * This populates the primary and MRR entries; MRR values are 1379 * then disabled later on if something requires it (eg RTS/CTS on 1380 * pre-11n chipsets. 1381 * 1382 * This needs to be done before the RTS/CTS fields are calculated 1383 * as they may depend upon the rate chosen. 1384 */ 1385 static void 1386 ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf) 1387 { 1388 uint8_t rate, rix; 1389 int try0; 1390 1391 if (! bf->bf_state.bfs_doratelookup) 1392 return; 1393 1394 /* Get rid of any previous state */ 1395 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1396 1397 ATH_NODE_LOCK(ATH_NODE(bf->bf_node)); 1398 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream, 1399 bf->bf_state.bfs_pktlen, &rix, &try0, &rate); 1400 1401 /* In case MRR is disabled, make sure rc[0] is setup correctly */ 1402 bf->bf_state.bfs_rc[0].rix = rix; 1403 bf->bf_state.bfs_rc[0].ratecode = rate; 1404 bf->bf_state.bfs_rc[0].tries = try0; 1405 1406 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY) 1407 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix, 1408 bf->bf_state.bfs_rc); 1409 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node)); 1410 1411 sc->sc_txrix = rix; /* for LED blinking */ 1412 sc->sc_lastdatarix = rix; /* for fast frames */ 1413 bf->bf_state.bfs_try0 = try0; 1414 bf->bf_state.bfs_txrate0 = rate; 1415 } 1416 1417 /* 1418 * Update the CLRDMASK bit in the ath_buf if it needs to be set. 1419 */ 1420 static void 1421 ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid, 1422 struct ath_buf *bf) 1423 { 1424 struct ath_node *an = ATH_NODE(bf->bf_node); 1425 1426 ATH_TX_LOCK_ASSERT(sc); 1427 1428 if (an->clrdmask == 1) { 1429 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1430 an->clrdmask = 0; 1431 } 1432 } 1433 1434 /* 1435 * Return whether this frame should be software queued or 1436 * direct dispatched. 1437 * 1438 * When doing powersave, BAR frames should be queued but other management 1439 * frames should be directly sent. 1440 * 1441 * When not doing powersave, stick BAR frames into the hardware queue 1442 * so it goes out even though the queue is paused. 1443 * 1444 * For now, management frames are also software queued by default. 1445 */ 1446 static int 1447 ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an, 1448 struct mbuf *m0, int *queue_to_head) 1449 { 1450 struct ieee80211_node *ni = &an->an_node; 1451 struct ieee80211_frame *wh; 1452 uint8_t type, subtype; 1453 1454 wh = mtod(m0, struct ieee80211_frame *); 1455 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1456 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1457 1458 (*queue_to_head) = 0; 1459 1460 /* If it's not in powersave - direct-dispatch BAR */ 1461 if ((ATH_NODE(ni)->an_is_powersave == 0) 1462 && type == IEEE80211_FC0_TYPE_CTL && 1463 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1464 DPRINTF(sc, ATH_DEBUG_SW_TX, 1465 "%s: BAR: TX'ing direct\n", __func__); 1466 return (0); 1467 } else if ((ATH_NODE(ni)->an_is_powersave == 1) 1468 && type == IEEE80211_FC0_TYPE_CTL && 1469 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1470 /* BAR TX whilst asleep; queue */ 1471 DPRINTF(sc, ATH_DEBUG_SW_TX, 1472 "%s: swq: TX'ing\n", __func__); 1473 (*queue_to_head) = 1; 1474 return (1); 1475 } else if ((ATH_NODE(ni)->an_is_powersave == 1) 1476 && (type == IEEE80211_FC0_TYPE_MGT || 1477 type == IEEE80211_FC0_TYPE_CTL)) { 1478 /* 1479 * Other control/mgmt frame; bypass software queuing 1480 * for now! 1481 */ 1482 DPRINTF(sc, ATH_DEBUG_XMIT, 1483 "%s: %6D: Node is asleep; sending mgmt " 1484 "(type=%d, subtype=%d)\n", 1485 __func__, ni->ni_macaddr, ":", type, subtype); 1486 return (0); 1487 } else { 1488 return (1); 1489 } 1490 } 1491 1492 1493 /* 1494 * Transmit the given frame to the hardware. 1495 * 1496 * The frame must already be setup; rate control must already have 1497 * been done. 1498 * 1499 * XXX since the TXQ lock is being held here (and I dislike holding 1500 * it for this long when not doing software aggregation), later on 1501 * break this function into "setup_normal" and "xmit_normal". The 1502 * lock only needs to be held for the ath_tx_handoff call. 1503 * 1504 * XXX we don't update the leak count here - if we're doing 1505 * direct frame dispatch, we need to be able to do it without 1506 * decrementing the leak count (eg multicast queue frames.) 1507 */ 1508 static void 1509 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq, 1510 struct ath_buf *bf) 1511 { 1512 struct ath_node *an = ATH_NODE(bf->bf_node); 1513 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 1514 1515 ATH_TX_LOCK_ASSERT(sc); 1516 1517 /* 1518 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does 1519 * set a completion handler however it doesn't (yet) properly 1520 * handle the strict ordering requirements needed for normal, 1521 * non-aggregate session frames. 1522 * 1523 * Once this is implemented, only set CLRDMASK like this for 1524 * frames that must go out - eg management/raw frames. 1525 */ 1526 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1527 1528 /* Setup the descriptor before handoff */ 1529 ath_tx_do_ratelookup(sc, bf); 1530 ath_tx_calc_duration(sc, bf); 1531 ath_tx_calc_protection(sc, bf); 1532 ath_tx_set_rtscts(sc, bf); 1533 ath_tx_rate_fill_rcflags(sc, bf); 1534 ath_tx_setds(sc, bf); 1535 1536 /* Track per-TID hardware queue depth correctly */ 1537 tid->hwq_depth++; 1538 1539 /* Assign the completion handler */ 1540 bf->bf_comp = ath_tx_normal_comp; 1541 1542 /* Hand off to hardware */ 1543 ath_tx_handoff(sc, txq, bf); 1544 } 1545 1546 /* 1547 * Do the basic frame setup stuff that's required before the frame 1548 * is added to a software queue. 1549 * 1550 * All frames get mostly the same treatment and it's done once. 1551 * Retransmits fiddle with things like the rate control setup, 1552 * setting the retransmit bit in the packet; doing relevant DMA/bus 1553 * syncing and relinking it (back) into the hardware TX queue. 1554 * 1555 * Note that this may cause the mbuf to be reallocated, so 1556 * m0 may not be valid. 1557 */ 1558 static int 1559 ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni, 1560 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq) 1561 { 1562 struct ieee80211vap *vap = ni->ni_vap; 1563 struct ath_hal *ah = sc->sc_ah; 1564 struct ifnet *ifp = sc->sc_ifp; 1565 struct ieee80211com *ic = ifp->if_l2com; 1566 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; 1567 int error, iswep, ismcast, isfrag, ismrr; 1568 int keyix, hdrlen, pktlen, try0 = 0; 1569 u_int8_t rix = 0, txrate = 0; 1570 struct ath_desc *ds; 1571 struct ieee80211_frame *wh; 1572 u_int subtype, flags; 1573 HAL_PKT_TYPE atype; 1574 const HAL_RATE_TABLE *rt; 1575 HAL_BOOL shortPreamble; 1576 struct ath_node *an; 1577 u_int pri; 1578 1579 /* 1580 * To ensure that both sequence numbers and the CCMP PN handling 1581 * is "correct", make sure that the relevant TID queue is locked. 1582 * Otherwise the CCMP PN and seqno may appear out of order, causing 1583 * re-ordered frames to have out of order CCMP PN's, resulting 1584 * in many, many frame drops. 1585 */ 1586 ATH_TX_LOCK_ASSERT(sc); 1587 1588 wh = mtod(m0, struct ieee80211_frame *); 1589 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; 1590 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1591 isfrag = m0->m_flags & M_FRAG; 1592 hdrlen = ieee80211_anyhdrsize(wh); 1593 /* 1594 * Packet length must not include any 1595 * pad bytes; deduct them here. 1596 */ 1597 pktlen = m0->m_pkthdr.len - (hdrlen & 3); 1598 1599 /* Handle encryption twiddling if needed */ 1600 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, 1601 &pktlen, &keyix)) { 1602 ath_freetx(m0); 1603 return EIO; 1604 } 1605 1606 /* packet header may have moved, reset our local pointer */ 1607 wh = mtod(m0, struct ieee80211_frame *); 1608 1609 pktlen += IEEE80211_CRC_LEN; 1610 1611 /* 1612 * Load the DMA map so any coalescing is done. This 1613 * also calculates the number of descriptors we need. 1614 */ 1615 error = ath_tx_dmasetup(sc, bf, m0); 1616 if (error != 0) 1617 return error; 1618 bf->bf_node = ni; /* NB: held reference */ 1619 m0 = bf->bf_m; /* NB: may have changed */ 1620 wh = mtod(m0, struct ieee80211_frame *); 1621 1622 /* setup descriptors */ 1623 ds = bf->bf_desc; 1624 rt = sc->sc_currates; 1625 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1626 1627 /* 1628 * NB: the 802.11 layer marks whether or not we should 1629 * use short preamble based on the current mode and 1630 * negotiated parameters. 1631 */ 1632 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 1633 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 1634 shortPreamble = AH_TRUE; 1635 sc->sc_stats.ast_tx_shortpre++; 1636 } else { 1637 shortPreamble = AH_FALSE; 1638 } 1639 1640 an = ATH_NODE(ni); 1641 //flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 1642 flags = 0; 1643 ismrr = 0; /* default no multi-rate retry*/ 1644 pri = M_WME_GETAC(m0); /* honor classification */ 1645 /* XXX use txparams instead of fixed values */ 1646 /* 1647 * Calculate Atheros packet type from IEEE80211 packet header, 1648 * setup for rate calculations, and select h/w transmit queue. 1649 */ 1650 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 1651 case IEEE80211_FC0_TYPE_MGT: 1652 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1653 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 1654 atype = HAL_PKT_TYPE_BEACON; 1655 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 1656 atype = HAL_PKT_TYPE_PROBE_RESP; 1657 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 1658 atype = HAL_PKT_TYPE_ATIM; 1659 else 1660 atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 1661 rix = an->an_mgmtrix; 1662 txrate = rt->info[rix].rateCode; 1663 if (shortPreamble) 1664 txrate |= rt->info[rix].shortPreamble; 1665 try0 = ATH_TXMGTTRY; 1666 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1667 break; 1668 case IEEE80211_FC0_TYPE_CTL: 1669 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 1670 rix = an->an_mgmtrix; 1671 txrate = rt->info[rix].rateCode; 1672 if (shortPreamble) 1673 txrate |= rt->info[rix].shortPreamble; 1674 try0 = ATH_TXMGTTRY; 1675 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1676 break; 1677 case IEEE80211_FC0_TYPE_DATA: 1678 atype = HAL_PKT_TYPE_NORMAL; /* default */ 1679 /* 1680 * Data frames: multicast frames go out at a fixed rate, 1681 * EAPOL frames use the mgmt frame rate; otherwise consult 1682 * the rate control module for the rate to use. 1683 */ 1684 if (ismcast) { 1685 rix = an->an_mcastrix; 1686 txrate = rt->info[rix].rateCode; 1687 if (shortPreamble) 1688 txrate |= rt->info[rix].shortPreamble; 1689 try0 = 1; 1690 } else if (m0->m_flags & M_EAPOL) { 1691 /* XXX? maybe always use long preamble? */ 1692 rix = an->an_mgmtrix; 1693 txrate = rt->info[rix].rateCode; 1694 if (shortPreamble) 1695 txrate |= rt->info[rix].shortPreamble; 1696 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 1697 } else { 1698 /* 1699 * Do rate lookup on each TX, rather than using 1700 * the hard-coded TX information decided here. 1701 */ 1702 ismrr = 1; 1703 bf->bf_state.bfs_doratelookup = 1; 1704 } 1705 if (cap->cap_wmeParams[pri].wmep_noackPolicy) 1706 flags |= HAL_TXDESC_NOACK; 1707 break; 1708 default: 1709 if_printf(ifp, "bogus frame type 0x%x (%s)\n", 1710 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 1711 /* XXX statistic */ 1712 /* XXX free tx dmamap */ 1713 ath_freetx(m0); 1714 return EIO; 1715 } 1716 1717 /* 1718 * There are two known scenarios where the frame AC doesn't match 1719 * what the destination TXQ is. 1720 * 1721 * + non-QoS frames (eg management?) that the net80211 stack has 1722 * assigned a higher AC to, but since it's a non-QoS TID, it's 1723 * being thrown into TID 16. TID 16 gets the AC_BE queue. 1724 * It's quite possible that management frames should just be 1725 * direct dispatched to hardware rather than go via the software 1726 * queue; that should be investigated in the future. There are 1727 * some specific scenarios where this doesn't make sense, mostly 1728 * surrounding ADDBA request/response - hence why that is special 1729 * cased. 1730 * 1731 * + Multicast frames going into the VAP mcast queue. That shows up 1732 * as "TXQ 11". 1733 * 1734 * This driver should eventually support separate TID and TXQ locking, 1735 * allowing for arbitrary AC frames to appear on arbitrary software 1736 * queues, being queued to the "correct" hardware queue when needed. 1737 */ 1738 #if 0 1739 if (txq != sc->sc_ac2q[pri]) { 1740 DPRINTF(sc, ATH_DEBUG_XMIT, 1741 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n", 1742 __func__, 1743 txq, 1744 txq->axq_qnum, 1745 pri, 1746 sc->sc_ac2q[pri], 1747 sc->sc_ac2q[pri]->axq_qnum); 1748 } 1749 #endif 1750 1751 /* 1752 * Calculate miscellaneous flags. 1753 */ 1754 if (ismcast) { 1755 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 1756 } else if (pktlen > vap->iv_rtsthreshold && 1757 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { 1758 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 1759 sc->sc_stats.ast_tx_rts++; 1760 } 1761 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ 1762 sc->sc_stats.ast_tx_noack++; 1763 #ifdef IEEE80211_SUPPORT_TDMA 1764 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) { 1765 DPRINTF(sc, ATH_DEBUG_TDMA, 1766 "%s: discard frame, ACK required w/ TDMA\n", __func__); 1767 sc->sc_stats.ast_tdma_ack++; 1768 /* XXX free tx dmamap */ 1769 ath_freetx(m0); 1770 return EIO; 1771 } 1772 #endif 1773 1774 /* 1775 * Determine if a tx interrupt should be generated for 1776 * this descriptor. We take a tx interrupt to reap 1777 * descriptors when the h/w hits an EOL condition or 1778 * when the descriptor is specifically marked to generate 1779 * an interrupt. We periodically mark descriptors in this 1780 * way to insure timely replenishing of the supply needed 1781 * for sending frames. Defering interrupts reduces system 1782 * load and potentially allows more concurrent work to be 1783 * done but if done to aggressively can cause senders to 1784 * backup. 1785 * 1786 * NB: use >= to deal with sc_txintrperiod changing 1787 * dynamically through sysctl. 1788 */ 1789 if (flags & HAL_TXDESC_INTREQ) { 1790 txq->axq_intrcnt = 0; 1791 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 1792 flags |= HAL_TXDESC_INTREQ; 1793 txq->axq_intrcnt = 0; 1794 } 1795 1796 /* This point forward is actual TX bits */ 1797 1798 /* 1799 * At this point we are committed to sending the frame 1800 * and we don't need to look at m_nextpkt; clear it in 1801 * case this frame is part of frag chain. 1802 */ 1803 m0->m_nextpkt = NULL; 1804 1805 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 1806 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 1807 sc->sc_hwmap[rix].ieeerate, -1); 1808 1809 if (ieee80211_radiotap_active_vap(vap)) { 1810 u_int64_t tsf = ath_hal_gettsf64(ah); 1811 1812 sc->sc_tx_th.wt_tsf = htole64(tsf); 1813 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 1814 if (iswep) 1815 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 1816 if (isfrag) 1817 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 1818 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 1819 sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni); 1820 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 1821 1822 ieee80211_radiotap_tx(vap, m0); 1823 } 1824 1825 /* Blank the legacy rate array */ 1826 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1827 1828 /* 1829 * ath_buf_set_rate needs at least one rate/try to setup 1830 * the rate scenario. 1831 */ 1832 bf->bf_state.bfs_rc[0].rix = rix; 1833 bf->bf_state.bfs_rc[0].tries = try0; 1834 bf->bf_state.bfs_rc[0].ratecode = txrate; 1835 1836 /* Store the decided rate index values away */ 1837 bf->bf_state.bfs_pktlen = pktlen; 1838 bf->bf_state.bfs_hdrlen = hdrlen; 1839 bf->bf_state.bfs_atype = atype; 1840 bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni); 1841 bf->bf_state.bfs_txrate0 = txrate; 1842 bf->bf_state.bfs_try0 = try0; 1843 bf->bf_state.bfs_keyix = keyix; 1844 bf->bf_state.bfs_txantenna = sc->sc_txantenna; 1845 bf->bf_state.bfs_txflags = flags; 1846 bf->bf_state.bfs_shpream = shortPreamble; 1847 1848 /* XXX this should be done in ath_tx_setrate() */ 1849 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */ 1850 bf->bf_state.bfs_ctsrate = 0; /* calculated later */ 1851 bf->bf_state.bfs_ctsduration = 0; 1852 bf->bf_state.bfs_ismrr = ismrr; 1853 1854 return 0; 1855 } 1856 1857 /* 1858 * Queue a frame to the hardware or software queue. 1859 * 1860 * This can be called by the net80211 code. 1861 * 1862 * XXX what about locking? Or, push the seqno assign into the 1863 * XXX aggregate scheduler so its serialised? 1864 * 1865 * XXX When sending management frames via ath_raw_xmit(), 1866 * should CLRDMASK be set unconditionally? 1867 */ 1868 int 1869 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, 1870 struct ath_buf *bf, struct mbuf *m0) 1871 { 1872 struct ieee80211vap *vap = ni->ni_vap; 1873 struct ath_vap *avp = ATH_VAP(vap); 1874 int r = 0; 1875 u_int pri; 1876 int tid; 1877 struct ath_txq *txq; 1878 int ismcast; 1879 const struct ieee80211_frame *wh; 1880 int is_ampdu, is_ampdu_tx, is_ampdu_pending; 1881 ieee80211_seq seqno; 1882 uint8_t type, subtype; 1883 int queue_to_head; 1884 1885 ATH_TX_LOCK_ASSERT(sc); 1886 1887 /* 1888 * Determine the target hardware queue. 1889 * 1890 * For multicast frames, the txq gets overridden appropriately 1891 * depending upon the state of PS. 1892 * 1893 * For any other frame, we do a TID/QoS lookup inside the frame 1894 * to see what the TID should be. If it's a non-QoS frame, the 1895 * AC and TID are overridden. The TID/TXQ code assumes the 1896 * TID is on a predictable hardware TXQ, so we don't support 1897 * having a node TID queued to multiple hardware TXQs. 1898 * This may change in the future but would require some locking 1899 * fudgery. 1900 */ 1901 pri = ath_tx_getac(sc, m0); 1902 tid = ath_tx_gettid(sc, m0); 1903 1904 txq = sc->sc_ac2q[pri]; 1905 wh = mtod(m0, struct ieee80211_frame *); 1906 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1907 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1908 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1909 1910 /* 1911 * Enforce how deep the multicast queue can grow. 1912 * 1913 * XXX duplicated in ath_raw_xmit(). 1914 */ 1915 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1916 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 1917 > sc->sc_txq_mcastq_maxdepth) { 1918 sc->sc_stats.ast_tx_mcastq_overflow++; 1919 m_freem(m0); 1920 return (ENOBUFS); 1921 } 1922 } 1923 1924 /* 1925 * Enforce how deep the unicast queue can grow. 1926 * 1927 * If the node is in power save then we don't want 1928 * the software queue to grow too deep, or a node may 1929 * end up consuming all of the ath_buf entries. 1930 * 1931 * For now, only do this for DATA frames. 1932 * 1933 * We will want to cap how many management/control 1934 * frames get punted to the software queue so it doesn't 1935 * fill up. But the correct solution isn't yet obvious. 1936 * In any case, this check should at least let frames pass 1937 * that we are direct-dispatching. 1938 * 1939 * XXX TODO: duplicate this to the raw xmit path! 1940 */ 1941 if (type == IEEE80211_FC0_TYPE_DATA && 1942 ATH_NODE(ni)->an_is_powersave && 1943 ATH_NODE(ni)->an_swq_depth > 1944 sc->sc_txq_node_psq_maxdepth) { 1945 sc->sc_stats.ast_tx_node_psq_overflow++; 1946 m_freem(m0); 1947 return (ENOBUFS); 1948 } 1949 1950 /* A-MPDU TX */ 1951 is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid); 1952 is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid); 1953 is_ampdu = is_ampdu_tx | is_ampdu_pending; 1954 1955 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n", 1956 __func__, tid, pri, is_ampdu); 1957 1958 /* Set local packet state, used to queue packets to hardware */ 1959 bf->bf_state.bfs_tid = tid; 1960 bf->bf_state.bfs_tx_queue = txq->axq_qnum; 1961 bf->bf_state.bfs_pri = pri; 1962 1963 #if 1 1964 /* 1965 * When servicing one or more stations in power-save mode 1966 * (or) if there is some mcast data waiting on the mcast 1967 * queue (to prevent out of order delivery) multicast frames 1968 * must be bufferd until after the beacon. 1969 * 1970 * TODO: we should lock the mcastq before we check the length. 1971 */ 1972 if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { 1973 txq = &avp->av_mcastq; 1974 /* 1975 * Mark the frame as eventually belonging on the CAB 1976 * queue, so the descriptor setup functions will 1977 * correctly initialise the descriptor 'qcuId' field. 1978 */ 1979 bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum; 1980 } 1981 #endif 1982 1983 /* Do the generic frame setup */ 1984 /* XXX should just bzero the bf_state? */ 1985 bf->bf_state.bfs_dobaw = 0; 1986 1987 /* A-MPDU TX? Manually set sequence number */ 1988 /* 1989 * Don't do it whilst pending; the net80211 layer still 1990 * assigns them. 1991 */ 1992 if (is_ampdu_tx) { 1993 /* 1994 * Always call; this function will 1995 * handle making sure that null data frames 1996 * don't get a sequence number from the current 1997 * TID and thus mess with the BAW. 1998 */ 1999 seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0); 2000 2001 /* 2002 * Don't add QoS NULL frames to the BAW. 2003 */ 2004 if (IEEE80211_QOS_HAS_SEQ(wh) && 2005 subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) { 2006 bf->bf_state.bfs_dobaw = 1; 2007 } 2008 } 2009 2010 /* 2011 * If needed, the sequence number has been assigned. 2012 * Squirrel it away somewhere easy to get to. 2013 */ 2014 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT; 2015 2016 /* Is ampdu pending? fetch the seqno and print it out */ 2017 if (is_ampdu_pending) 2018 DPRINTF(sc, ATH_DEBUG_SW_TX, 2019 "%s: tid %d: ampdu pending, seqno %d\n", 2020 __func__, tid, M_SEQNO_GET(m0)); 2021 2022 /* This also sets up the DMA map */ 2023 r = ath_tx_normal_setup(sc, ni, bf, m0, txq); 2024 2025 if (r != 0) 2026 goto done; 2027 2028 /* At this point m0 could have changed! */ 2029 m0 = bf->bf_m; 2030 2031 #if 1 2032 /* 2033 * If it's a multicast frame, do a direct-dispatch to the 2034 * destination hardware queue. Don't bother software 2035 * queuing it. 2036 */ 2037 /* 2038 * If it's a BAR frame, do a direct dispatch to the 2039 * destination hardware queue. Don't bother software 2040 * queuing it, as the TID will now be paused. 2041 * Sending a BAR frame can occur from the net80211 txa timer 2042 * (ie, retries) or from the ath txtask (completion call.) 2043 * It queues directly to hardware because the TID is paused 2044 * at this point (and won't be unpaused until the BAR has 2045 * either been TXed successfully or max retries has been 2046 * reached.) 2047 */ 2048 /* 2049 * Until things are better debugged - if this node is asleep 2050 * and we're sending it a non-BAR frame, direct dispatch it. 2051 * Why? Because we need to figure out what's actually being 2052 * sent - eg, during reassociation/reauthentication after 2053 * the node (last) disappeared whilst asleep, the driver should 2054 * have unpaused/unsleep'ed the node. So until that is 2055 * sorted out, use this workaround. 2056 */ 2057 if (txq == &avp->av_mcastq) { 2058 DPRINTF(sc, ATH_DEBUG_SW_TX, 2059 "%s: bf=%p: mcastq: TX'ing\n", __func__, bf); 2060 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2061 ath_tx_xmit_normal(sc, txq, bf); 2062 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, 2063 &queue_to_head)) { 2064 ath_tx_swq(sc, ni, txq, queue_to_head, bf); 2065 } else { 2066 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2067 ath_tx_xmit_normal(sc, txq, bf); 2068 } 2069 #else 2070 /* 2071 * For now, since there's no software queue, 2072 * direct-dispatch to the hardware. 2073 */ 2074 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2075 /* 2076 * Update the current leak count if 2077 * we're leaking frames; and set the 2078 * MORE flag as appropriate. 2079 */ 2080 ath_tx_leak_count_update(sc, tid, bf); 2081 ath_tx_xmit_normal(sc, txq, bf); 2082 #endif 2083 done: 2084 return 0; 2085 } 2086 2087 static int 2088 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, 2089 struct ath_buf *bf, struct mbuf *m0, 2090 const struct ieee80211_bpf_params *params) 2091 { 2092 struct ifnet *ifp = sc->sc_ifp; 2093 struct ieee80211com *ic = ifp->if_l2com; 2094 struct ath_hal *ah = sc->sc_ah; 2095 struct ieee80211vap *vap = ni->ni_vap; 2096 int error, ismcast, ismrr; 2097 int keyix, hdrlen, pktlen, try0, txantenna; 2098 u_int8_t rix, txrate; 2099 struct ieee80211_frame *wh; 2100 u_int flags; 2101 HAL_PKT_TYPE atype; 2102 const HAL_RATE_TABLE *rt; 2103 struct ath_desc *ds; 2104 u_int pri; 2105 int o_tid = -1; 2106 int do_override; 2107 uint8_t type, subtype; 2108 int queue_to_head; 2109 2110 ATH_TX_LOCK_ASSERT(sc); 2111 2112 wh = mtod(m0, struct ieee80211_frame *); 2113 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2114 hdrlen = ieee80211_anyhdrsize(wh); 2115 /* 2116 * Packet length must not include any 2117 * pad bytes; deduct them here. 2118 */ 2119 /* XXX honor IEEE80211_BPF_DATAPAD */ 2120 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; 2121 2122 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2123 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2124 2125 ATH_KTR(sc, ATH_KTR_TX, 2, 2126 "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf); 2127 2128 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n", 2129 __func__, ismcast); 2130 2131 pri = params->ibp_pri & 3; 2132 /* Override pri if the frame isn't a QoS one */ 2133 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2134 pri = ath_tx_getac(sc, m0); 2135 2136 /* XXX If it's an ADDBA, override the correct queue */ 2137 do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid); 2138 2139 /* Map ADDBA to the correct priority */ 2140 if (do_override) { 2141 #if 0 2142 DPRINTF(sc, ATH_DEBUG_XMIT, 2143 "%s: overriding tid %d pri %d -> %d\n", 2144 __func__, o_tid, pri, TID_TO_WME_AC(o_tid)); 2145 #endif 2146 pri = TID_TO_WME_AC(o_tid); 2147 } 2148 2149 /* Handle encryption twiddling if needed */ 2150 if (! ath_tx_tag_crypto(sc, ni, 2151 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, 2152 &hdrlen, &pktlen, &keyix)) { 2153 ath_freetx(m0); 2154 return EIO; 2155 } 2156 /* packet header may have moved, reset our local pointer */ 2157 wh = mtod(m0, struct ieee80211_frame *); 2158 2159 /* Do the generic frame setup */ 2160 /* XXX should just bzero the bf_state? */ 2161 bf->bf_state.bfs_dobaw = 0; 2162 2163 error = ath_tx_dmasetup(sc, bf, m0); 2164 if (error != 0) 2165 return error; 2166 m0 = bf->bf_m; /* NB: may have changed */ 2167 wh = mtod(m0, struct ieee80211_frame *); 2168 bf->bf_node = ni; /* NB: held reference */ 2169 2170 /* Always enable CLRDMASK for raw frames for now.. */ 2171 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 2172 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 2173 if (params->ibp_flags & IEEE80211_BPF_RTS) 2174 flags |= HAL_TXDESC_RTSENA; 2175 else if (params->ibp_flags & IEEE80211_BPF_CTS) { 2176 /* XXX assume 11g/11n protection? */ 2177 bf->bf_state.bfs_doprot = 1; 2178 flags |= HAL_TXDESC_CTSENA; 2179 } 2180 /* XXX leave ismcast to injector? */ 2181 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) 2182 flags |= HAL_TXDESC_NOACK; 2183 2184 rt = sc->sc_currates; 2185 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 2186 rix = ath_tx_findrix(sc, params->ibp_rate0); 2187 txrate = rt->info[rix].rateCode; 2188 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 2189 txrate |= rt->info[rix].shortPreamble; 2190 sc->sc_txrix = rix; 2191 try0 = params->ibp_try0; 2192 ismrr = (params->ibp_try1 != 0); 2193 txantenna = params->ibp_pri >> 2; 2194 if (txantenna == 0) /* XXX? */ 2195 txantenna = sc->sc_txantenna; 2196 2197 /* 2198 * Since ctsrate is fixed, store it away for later 2199 * use when the descriptor fields are being set. 2200 */ 2201 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) 2202 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate; 2203 2204 /* 2205 * NB: we mark all packets as type PSPOLL so the h/w won't 2206 * set the sequence number, duration, etc. 2207 */ 2208 atype = HAL_PKT_TYPE_PSPOLL; 2209 2210 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 2211 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, 2212 sc->sc_hwmap[rix].ieeerate, -1); 2213 2214 if (ieee80211_radiotap_active_vap(vap)) { 2215 u_int64_t tsf = ath_hal_gettsf64(ah); 2216 2217 sc->sc_tx_th.wt_tsf = htole64(tsf); 2218 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 2219 if (wh->i_fc[1] & IEEE80211_FC1_WEP) 2220 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2221 if (m0->m_flags & M_FRAG) 2222 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 2223 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 2224 sc->sc_tx_th.wt_txpower = MIN(params->ibp_power, 2225 ieee80211_get_node_txpower(ni)); 2226 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 2227 2228 ieee80211_radiotap_tx(vap, m0); 2229 } 2230 2231 /* 2232 * Formulate first tx descriptor with tx controls. 2233 */ 2234 ds = bf->bf_desc; 2235 /* XXX check return value? */ 2236 2237 /* Store the decided rate index values away */ 2238 bf->bf_state.bfs_pktlen = pktlen; 2239 bf->bf_state.bfs_hdrlen = hdrlen; 2240 bf->bf_state.bfs_atype = atype; 2241 bf->bf_state.bfs_txpower = MIN(params->ibp_power, 2242 ieee80211_get_node_txpower(ni)); 2243 bf->bf_state.bfs_txrate0 = txrate; 2244 bf->bf_state.bfs_try0 = try0; 2245 bf->bf_state.bfs_keyix = keyix; 2246 bf->bf_state.bfs_txantenna = txantenna; 2247 bf->bf_state.bfs_txflags = flags; 2248 bf->bf_state.bfs_shpream = 2249 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE); 2250 2251 /* Set local packet state, used to queue packets to hardware */ 2252 bf->bf_state.bfs_tid = WME_AC_TO_TID(pri); 2253 bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum; 2254 bf->bf_state.bfs_pri = pri; 2255 2256 /* XXX this should be done in ath_tx_setrate() */ 2257 bf->bf_state.bfs_ctsrate = 0; 2258 bf->bf_state.bfs_ctsduration = 0; 2259 bf->bf_state.bfs_ismrr = ismrr; 2260 2261 /* Blank the legacy rate array */ 2262 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 2263 2264 bf->bf_state.bfs_rc[0].rix = 2265 ath_tx_findrix(sc, params->ibp_rate0); 2266 bf->bf_state.bfs_rc[0].tries = try0; 2267 bf->bf_state.bfs_rc[0].ratecode = txrate; 2268 2269 if (ismrr) { 2270 int rix; 2271 2272 rix = ath_tx_findrix(sc, params->ibp_rate1); 2273 bf->bf_state.bfs_rc[1].rix = rix; 2274 bf->bf_state.bfs_rc[1].tries = params->ibp_try1; 2275 2276 rix = ath_tx_findrix(sc, params->ibp_rate2); 2277 bf->bf_state.bfs_rc[2].rix = rix; 2278 bf->bf_state.bfs_rc[2].tries = params->ibp_try2; 2279 2280 rix = ath_tx_findrix(sc, params->ibp_rate3); 2281 bf->bf_state.bfs_rc[3].rix = rix; 2282 bf->bf_state.bfs_rc[3].tries = params->ibp_try3; 2283 } 2284 /* 2285 * All the required rate control decisions have been made; 2286 * fill in the rc flags. 2287 */ 2288 ath_tx_rate_fill_rcflags(sc, bf); 2289 2290 /* NB: no buffered multicast in power save support */ 2291 2292 /* 2293 * If we're overiding the ADDBA destination, dump directly 2294 * into the hardware queue, right after any pending 2295 * frames to that node are. 2296 */ 2297 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n", 2298 __func__, do_override); 2299 2300 #if 1 2301 /* 2302 * Put addba frames in the right place in the right TID/HWQ. 2303 */ 2304 if (do_override) { 2305 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2306 /* 2307 * XXX if it's addba frames, should we be leaking 2308 * them out via the frame leak method? 2309 * XXX for now let's not risk it; but we may wish 2310 * to investigate this later. 2311 */ 2312 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2313 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, 2314 &queue_to_head)) { 2315 /* Queue to software queue */ 2316 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf); 2317 } else { 2318 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2319 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2320 } 2321 #else 2322 /* Direct-dispatch to the hardware */ 2323 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2324 /* 2325 * Update the current leak count if 2326 * we're leaking frames; and set the 2327 * MORE flag as appropriate. 2328 */ 2329 ath_tx_leak_count_update(sc, tid, bf); 2330 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2331 #endif 2332 return 0; 2333 } 2334 2335 /* 2336 * Send a raw frame. 2337 * 2338 * This can be called by net80211. 2339 */ 2340 int 2341 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 2342 const struct ieee80211_bpf_params *params) 2343 { 2344 struct ieee80211com *ic = ni->ni_ic; 2345 struct ifnet *ifp = ic->ic_ifp; 2346 struct ath_softc *sc = ifp->if_softc; 2347 struct ath_buf *bf; 2348 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 2349 int error = 0; 2350 2351 ATH_PCU_LOCK(sc); 2352 if (sc->sc_inreset_cnt > 0) { 2353 DPRINTF(sc, ATH_DEBUG_XMIT, 2354 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2355 error = EIO; 2356 ATH_PCU_UNLOCK(sc); 2357 goto bad0; 2358 } 2359 sc->sc_txstart_cnt++; 2360 ATH_PCU_UNLOCK(sc); 2361 2362 ATH_TX_LOCK(sc); 2363 2364 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) { 2365 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__, 2366 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ? 2367 "!running" : "invalid"); 2368 m_freem(m); 2369 error = ENETDOWN; 2370 goto bad; 2371 } 2372 2373 /* 2374 * Enforce how deep the multicast queue can grow. 2375 * 2376 * XXX duplicated in ath_tx_start(). 2377 */ 2378 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2379 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 2380 > sc->sc_txq_mcastq_maxdepth) { 2381 sc->sc_stats.ast_tx_mcastq_overflow++; 2382 error = ENOBUFS; 2383 } 2384 2385 if (error != 0) { 2386 m_freem(m); 2387 goto bad; 2388 } 2389 } 2390 2391 /* 2392 * Grab a TX buffer and associated resources. 2393 */ 2394 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); 2395 if (bf == NULL) { 2396 sc->sc_stats.ast_tx_nobuf++; 2397 m_freem(m); 2398 error = ENOBUFS; 2399 goto bad; 2400 } 2401 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n", 2402 m, params, bf); 2403 2404 if (params == NULL) { 2405 /* 2406 * Legacy path; interpret frame contents to decide 2407 * precisely how to send the frame. 2408 */ 2409 if (ath_tx_start(sc, ni, bf, m)) { 2410 error = EIO; /* XXX */ 2411 goto bad2; 2412 } 2413 } else { 2414 /* 2415 * Caller supplied explicit parameters to use in 2416 * sending the frame. 2417 */ 2418 if (ath_tx_raw_start(sc, ni, bf, m, params)) { 2419 error = EIO; /* XXX */ 2420 goto bad2; 2421 } 2422 } 2423 sc->sc_wd_timer = 5; 2424 ifp->if_opackets++; 2425 sc->sc_stats.ast_tx_raw++; 2426 2427 /* 2428 * Update the TIM - if there's anything queued to the 2429 * software queue and power save is enabled, we should 2430 * set the TIM. 2431 */ 2432 ath_tx_update_tim(sc, ni, 1); 2433 2434 ATH_TX_UNLOCK(sc); 2435 2436 ATH_PCU_LOCK(sc); 2437 sc->sc_txstart_cnt--; 2438 ATH_PCU_UNLOCK(sc); 2439 2440 return 0; 2441 bad2: 2442 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, " 2443 "bf=%p", 2444 m, 2445 params, 2446 bf); 2447 ATH_TXBUF_LOCK(sc); 2448 ath_returnbuf_head(sc, bf); 2449 ATH_TXBUF_UNLOCK(sc); 2450 bad: 2451 2452 ATH_TX_UNLOCK(sc); 2453 2454 ATH_PCU_LOCK(sc); 2455 sc->sc_txstart_cnt--; 2456 ATH_PCU_UNLOCK(sc); 2457 bad0: 2458 ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p", 2459 m, params); 2460 ifp->if_oerrors++; 2461 sc->sc_stats.ast_tx_raw_fail++; 2462 ieee80211_free_node(ni); 2463 2464 return error; 2465 } 2466 2467 /* Some helper functions */ 2468 2469 /* 2470 * ADDBA (and potentially others) need to be placed in the same 2471 * hardware queue as the TID/node it's relating to. This is so 2472 * it goes out after any pending non-aggregate frames to the 2473 * same node/TID. 2474 * 2475 * If this isn't done, the ADDBA can go out before the frames 2476 * queued in hardware. Even though these frames have a sequence 2477 * number -earlier- than the ADDBA can be transmitted (but 2478 * no frames whose sequence numbers are after the ADDBA should 2479 * be!) they'll arrive after the ADDBA - and the receiving end 2480 * will simply drop them as being out of the BAW. 2481 * 2482 * The frames can't be appended to the TID software queue - it'll 2483 * never be sent out. So these frames have to be directly 2484 * dispatched to the hardware, rather than queued in software. 2485 * So if this function returns true, the TXQ has to be 2486 * overridden and it has to be directly dispatched. 2487 * 2488 * It's a dirty hack, but someone's gotta do it. 2489 */ 2490 2491 /* 2492 * XXX doesn't belong here! 2493 */ 2494 static int 2495 ieee80211_is_action(struct ieee80211_frame *wh) 2496 { 2497 /* Type: Management frame? */ 2498 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 2499 IEEE80211_FC0_TYPE_MGT) 2500 return 0; 2501 2502 /* Subtype: Action frame? */ 2503 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) != 2504 IEEE80211_FC0_SUBTYPE_ACTION) 2505 return 0; 2506 2507 return 1; 2508 } 2509 2510 #define MS(_v, _f) (((_v) & _f) >> _f##_S) 2511 /* 2512 * Return an alternate TID for ADDBA request frames. 2513 * 2514 * Yes, this likely should be done in the net80211 layer. 2515 */ 2516 static int 2517 ath_tx_action_frame_override_queue(struct ath_softc *sc, 2518 struct ieee80211_node *ni, 2519 struct mbuf *m0, int *tid) 2520 { 2521 struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *); 2522 struct ieee80211_action_ba_addbarequest *ia; 2523 uint8_t *frm; 2524 uint16_t baparamset; 2525 2526 /* Not action frame? Bail */ 2527 if (! ieee80211_is_action(wh)) 2528 return 0; 2529 2530 /* XXX Not needed for frames we send? */ 2531 #if 0 2532 /* Correct length? */ 2533 if (! ieee80211_parse_action(ni, m)) 2534 return 0; 2535 #endif 2536 2537 /* Extract out action frame */ 2538 frm = (u_int8_t *)&wh[1]; 2539 ia = (struct ieee80211_action_ba_addbarequest *) frm; 2540 2541 /* Not ADDBA? Bail */ 2542 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA) 2543 return 0; 2544 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST) 2545 return 0; 2546 2547 /* Extract TID, return it */ 2548 baparamset = le16toh(ia->rq_baparamset); 2549 *tid = (int) MS(baparamset, IEEE80211_BAPS_TID); 2550 2551 return 1; 2552 } 2553 #undef MS 2554 2555 /* Per-node software queue operations */ 2556 2557 /* 2558 * Add the current packet to the given BAW. 2559 * It is assumed that the current packet 2560 * 2561 * + fits inside the BAW; 2562 * + already has had a sequence number allocated. 2563 * 2564 * Since the BAW status may be modified by both the ath task and 2565 * the net80211/ifnet contexts, the TID must be locked. 2566 */ 2567 void 2568 ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an, 2569 struct ath_tid *tid, struct ath_buf *bf) 2570 { 2571 int index, cindex; 2572 struct ieee80211_tx_ampdu *tap; 2573 2574 ATH_TX_LOCK_ASSERT(sc); 2575 2576 if (bf->bf_state.bfs_isretried) 2577 return; 2578 2579 tap = ath_tx_get_tx_tid(an, tid->tid); 2580 2581 if (! bf->bf_state.bfs_dobaw) { 2582 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2583 "%s: dobaw=0, seqno=%d, window %d:%d\n", 2584 __func__, SEQNO(bf->bf_state.bfs_seqno), 2585 tap->txa_start, tap->txa_wnd); 2586 } 2587 2588 if (bf->bf_state.bfs_addedbaw) 2589 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2590 "%s: re-added? tid=%d, seqno %d; window %d:%d; " 2591 "baw head=%d tail=%d\n", 2592 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2593 tap->txa_start, tap->txa_wnd, tid->baw_head, 2594 tid->baw_tail); 2595 2596 /* 2597 * Verify that the given sequence number is not outside of the 2598 * BAW. Complain loudly if that's the case. 2599 */ 2600 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2601 SEQNO(bf->bf_state.bfs_seqno))) { 2602 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2603 "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; " 2604 "baw head=%d tail=%d\n", 2605 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2606 tap->txa_start, tap->txa_wnd, tid->baw_head, 2607 tid->baw_tail); 2608 } 2609 2610 /* 2611 * ni->ni_txseqs[] is the currently allocated seqno. 2612 * the txa state contains the current baw start. 2613 */ 2614 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno)); 2615 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2616 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2617 "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d " 2618 "baw head=%d tail=%d\n", 2619 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2620 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, 2621 tid->baw_tail); 2622 2623 2624 #if 0 2625 assert(tid->tx_buf[cindex] == NULL); 2626 #endif 2627 if (tid->tx_buf[cindex] != NULL) { 2628 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2629 "%s: ba packet dup (index=%d, cindex=%d, " 2630 "head=%d, tail=%d)\n", 2631 __func__, index, cindex, tid->baw_head, tid->baw_tail); 2632 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2633 "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n", 2634 __func__, 2635 tid->tx_buf[cindex], 2636 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno), 2637 bf, 2638 SEQNO(bf->bf_state.bfs_seqno) 2639 ); 2640 } 2641 tid->tx_buf[cindex] = bf; 2642 2643 if (index >= ((tid->baw_tail - tid->baw_head) & 2644 (ATH_TID_MAX_BUFS - 1))) { 2645 tid->baw_tail = cindex; 2646 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 2647 } 2648 } 2649 2650 /* 2651 * Flip the BAW buffer entry over from the existing one to the new one. 2652 * 2653 * When software retransmitting a (sub-)frame, it is entirely possible that 2654 * the frame ath_buf is marked as BUSY and can't be immediately reused. 2655 * In that instance the buffer is cloned and the new buffer is used for 2656 * retransmit. We thus need to update the ath_buf slot in the BAW buf 2657 * tracking array to maintain consistency. 2658 */ 2659 static void 2660 ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an, 2661 struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf) 2662 { 2663 int index, cindex; 2664 struct ieee80211_tx_ampdu *tap; 2665 int seqno = SEQNO(old_bf->bf_state.bfs_seqno); 2666 2667 ATH_TX_LOCK_ASSERT(sc); 2668 2669 tap = ath_tx_get_tx_tid(an, tid->tid); 2670 index = ATH_BA_INDEX(tap->txa_start, seqno); 2671 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2672 2673 /* 2674 * Just warn for now; if it happens then we should find out 2675 * about it. It's highly likely the aggregation session will 2676 * soon hang. 2677 */ 2678 if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) { 2679 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2680 "%s: retransmitted buffer" 2681 " has mismatching seqno's, BA session may hang.\n", 2682 __func__); 2683 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2684 "%s: old seqno=%d, new_seqno=%d\n", __func__, 2685 old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno); 2686 } 2687 2688 if (tid->tx_buf[cindex] != old_bf) { 2689 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2690 "%s: ath_buf pointer incorrect; " 2691 " has m BA session may hang.\n", __func__); 2692 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2693 "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf); 2694 } 2695 2696 tid->tx_buf[cindex] = new_bf; 2697 } 2698 2699 /* 2700 * seq_start - left edge of BAW 2701 * seq_next - current/next sequence number to allocate 2702 * 2703 * Since the BAW status may be modified by both the ath task and 2704 * the net80211/ifnet contexts, the TID must be locked. 2705 */ 2706 static void 2707 ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an, 2708 struct ath_tid *tid, const struct ath_buf *bf) 2709 { 2710 int index, cindex; 2711 struct ieee80211_tx_ampdu *tap; 2712 int seqno = SEQNO(bf->bf_state.bfs_seqno); 2713 2714 ATH_TX_LOCK_ASSERT(sc); 2715 2716 tap = ath_tx_get_tx_tid(an, tid->tid); 2717 index = ATH_BA_INDEX(tap->txa_start, seqno); 2718 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2719 2720 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2721 "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, " 2722 "baw head=%d, tail=%d\n", 2723 __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index, 2724 cindex, tid->baw_head, tid->baw_tail); 2725 2726 /* 2727 * If this occurs then we have a big problem - something else 2728 * has slid tap->txa_start along without updating the BAW 2729 * tracking start/end pointers. Thus the TX BAW state is now 2730 * completely busted. 2731 * 2732 * But for now, since I haven't yet fixed TDMA and buffer cloning, 2733 * it's quite possible that a cloned buffer is making its way 2734 * here and causing it to fire off. Disable TDMA for now. 2735 */ 2736 if (tid->tx_buf[cindex] != bf) { 2737 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2738 "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n", 2739 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 2740 tid->tx_buf[cindex], 2741 (tid->tx_buf[cindex] != NULL) ? 2742 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1); 2743 } 2744 2745 tid->tx_buf[cindex] = NULL; 2746 2747 while (tid->baw_head != tid->baw_tail && 2748 !tid->tx_buf[tid->baw_head]) { 2749 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 2750 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 2751 } 2752 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2753 "%s: baw is now %d:%d, baw head=%d\n", 2754 __func__, tap->txa_start, tap->txa_wnd, tid->baw_head); 2755 } 2756 2757 static void 2758 ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid, 2759 struct ath_buf *bf) 2760 { 2761 struct ieee80211_frame *wh; 2762 2763 ATH_TX_LOCK_ASSERT(sc); 2764 2765 if (tid->an->an_leak_count > 0) { 2766 wh = mtod(bf->bf_m, struct ieee80211_frame *); 2767 2768 /* 2769 * Update MORE based on the software/net80211 queue states. 2770 */ 2771 if ((tid->an->an_stack_psq > 0) 2772 || (tid->an->an_swq_depth > 0)) 2773 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 2774 else 2775 wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA; 2776 2777 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 2778 "%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n", 2779 __func__, 2780 tid->an->an_node.ni_macaddr, 2781 ":", 2782 tid->an->an_leak_count, 2783 tid->an->an_stack_psq, 2784 tid->an->an_swq_depth, 2785 !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA)); 2786 2787 /* 2788 * Re-sync the underlying buffer. 2789 */ 2790 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 2791 BUS_DMASYNC_PREWRITE); 2792 2793 tid->an->an_leak_count --; 2794 } 2795 } 2796 2797 static int 2798 ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid) 2799 { 2800 2801 ATH_TX_LOCK_ASSERT(sc); 2802 2803 if (tid->an->an_leak_count > 0) { 2804 return (1); 2805 } 2806 if (tid->paused) 2807 return (0); 2808 return (1); 2809 } 2810 2811 /* 2812 * Mark the current node/TID as ready to TX. 2813 * 2814 * This is done to make it easy for the software scheduler to 2815 * find which nodes have data to send. 2816 * 2817 * The TXQ lock must be held. 2818 */ 2819 void 2820 ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid) 2821 { 2822 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2823 2824 ATH_TX_LOCK_ASSERT(sc); 2825 2826 /* 2827 * If we are leaking out a frame to this destination 2828 * for PS-POLL, ensure that we allow scheduling to 2829 * occur. 2830 */ 2831 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 2832 return; /* paused, can't schedule yet */ 2833 2834 if (tid->sched) 2835 return; /* already scheduled */ 2836 2837 tid->sched = 1; 2838 2839 #if 0 2840 /* 2841 * If this is a sleeping node we're leaking to, given 2842 * it a higher priority. This is so bad for QoS it hurts. 2843 */ 2844 if (tid->an->an_leak_count) { 2845 TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem); 2846 } else { 2847 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2848 } 2849 #endif 2850 2851 /* 2852 * We can't do the above - it'll confuse the TXQ software 2853 * scheduler which will keep checking the _head_ TID 2854 * in the list to see if it has traffic. If we queue 2855 * a TID to the head of the list and it doesn't transmit, 2856 * we'll check it again. 2857 * 2858 * So, get the rest of this leaking frames support working 2859 * and reliable first and _then_ optimise it so they're 2860 * pushed out in front of any other pending software 2861 * queued nodes. 2862 */ 2863 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2864 } 2865 2866 /* 2867 * Mark the current node as no longer needing to be polled for 2868 * TX packets. 2869 * 2870 * The TXQ lock must be held. 2871 */ 2872 static void 2873 ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid) 2874 { 2875 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2876 2877 ATH_TX_LOCK_ASSERT(sc); 2878 2879 if (tid->sched == 0) 2880 return; 2881 2882 tid->sched = 0; 2883 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem); 2884 } 2885 2886 /* 2887 * Assign a sequence number manually to the given frame. 2888 * 2889 * This should only be called for A-MPDU TX frames. 2890 */ 2891 static ieee80211_seq 2892 ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, 2893 struct ath_buf *bf, struct mbuf *m0) 2894 { 2895 struct ieee80211_frame *wh; 2896 int tid, pri; 2897 ieee80211_seq seqno; 2898 uint8_t subtype; 2899 2900 /* TID lookup */ 2901 wh = mtod(m0, struct ieee80211_frame *); 2902 pri = M_WME_GETAC(m0); /* honor classification */ 2903 tid = WME_AC_TO_TID(pri); 2904 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n", 2905 __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 2906 2907 /* XXX Is it a control frame? Ignore */ 2908 2909 /* Does the packet require a sequence number? */ 2910 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2911 return -1; 2912 2913 ATH_TX_LOCK_ASSERT(sc); 2914 2915 /* 2916 * Is it a QOS NULL Data frame? Give it a sequence number from 2917 * the default TID (IEEE80211_NONQOS_TID.) 2918 * 2919 * The RX path of everything I've looked at doesn't include the NULL 2920 * data frame sequence number in the aggregation state updates, so 2921 * assigning it a sequence number there will cause a BAW hole on the 2922 * RX side. 2923 */ 2924 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2925 if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) { 2926 /* XXX no locking for this TID? This is a bit of a problem. */ 2927 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; 2928 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); 2929 } else { 2930 /* Manually assign sequence number */ 2931 seqno = ni->ni_txseqs[tid]; 2932 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE); 2933 } 2934 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 2935 M_SEQNO_SET(m0, seqno); 2936 2937 /* Return so caller can do something with it if needed */ 2938 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: -> seqno=%d\n", __func__, seqno); 2939 return seqno; 2940 } 2941 2942 /* 2943 * Attempt to direct dispatch an aggregate frame to hardware. 2944 * If the frame is out of BAW, queue. 2945 * Otherwise, schedule it as a single frame. 2946 */ 2947 static void 2948 ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an, 2949 struct ath_txq *txq, struct ath_buf *bf) 2950 { 2951 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 2952 struct ieee80211_tx_ampdu *tap; 2953 2954 ATH_TX_LOCK_ASSERT(sc); 2955 2956 tap = ath_tx_get_tx_tid(an, tid->tid); 2957 2958 /* paused? queue */ 2959 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { 2960 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 2961 /* XXX don't sched - we're paused! */ 2962 return; 2963 } 2964 2965 /* outside baw? queue */ 2966 if (bf->bf_state.bfs_dobaw && 2967 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2968 SEQNO(bf->bf_state.bfs_seqno)))) { 2969 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 2970 ath_tx_tid_sched(sc, tid); 2971 return; 2972 } 2973 2974 /* 2975 * This is a temporary check and should be removed once 2976 * all the relevant code paths have been fixed. 2977 * 2978 * During aggregate retries, it's possible that the head 2979 * frame will fail (which has the bfs_aggr and bfs_nframes 2980 * fields set for said aggregate) and will be retried as 2981 * a single frame. In this instance, the values should 2982 * be reset or the completion code will get upset with you. 2983 */ 2984 if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) { 2985 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 2986 "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__, 2987 bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes); 2988 bf->bf_state.bfs_aggr = 0; 2989 bf->bf_state.bfs_nframes = 1; 2990 } 2991 2992 /* Update CLRDMASK just before this frame is queued */ 2993 ath_tx_update_clrdmask(sc, tid, bf); 2994 2995 /* Direct dispatch to hardware */ 2996 ath_tx_do_ratelookup(sc, bf); 2997 ath_tx_calc_duration(sc, bf); 2998 ath_tx_calc_protection(sc, bf); 2999 ath_tx_set_rtscts(sc, bf); 3000 ath_tx_rate_fill_rcflags(sc, bf); 3001 ath_tx_setds(sc, bf); 3002 3003 /* Statistics */ 3004 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++; 3005 3006 /* Track per-TID hardware queue depth correctly */ 3007 tid->hwq_depth++; 3008 3009 /* Add to BAW */ 3010 if (bf->bf_state.bfs_dobaw) { 3011 ath_tx_addto_baw(sc, an, tid, bf); 3012 bf->bf_state.bfs_addedbaw = 1; 3013 } 3014 3015 /* Set completion handler, multi-frame aggregate or not */ 3016 bf->bf_comp = ath_tx_aggr_comp; 3017 3018 /* 3019 * Update the current leak count if 3020 * we're leaking frames; and set the 3021 * MORE flag as appropriate. 3022 */ 3023 ath_tx_leak_count_update(sc, tid, bf); 3024 3025 /* Hand off to hardware */ 3026 ath_tx_handoff(sc, txq, bf); 3027 } 3028 3029 /* 3030 * Attempt to send the packet. 3031 * If the queue isn't busy, direct-dispatch. 3032 * If the queue is busy enough, queue the given packet on the 3033 * relevant software queue. 3034 */ 3035 void 3036 ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, 3037 struct ath_txq *txq, int queue_to_head, struct ath_buf *bf) 3038 { 3039 struct ath_node *an = ATH_NODE(ni); 3040 struct ieee80211_frame *wh; 3041 struct ath_tid *atid; 3042 int pri, tid; 3043 struct mbuf *m0 = bf->bf_m; 3044 3045 ATH_TX_LOCK_ASSERT(sc); 3046 3047 /* Fetch the TID - non-QoS frames get assigned to TID 16 */ 3048 wh = mtod(m0, struct ieee80211_frame *); 3049 pri = ath_tx_getac(sc, m0); 3050 tid = ath_tx_gettid(sc, m0); 3051 atid = &an->an_tid[tid]; 3052 3053 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n", 3054 __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 3055 3056 /* Set local packet state, used to queue packets to hardware */ 3057 /* XXX potentially duplicate info, re-check */ 3058 bf->bf_state.bfs_tid = tid; 3059 bf->bf_state.bfs_tx_queue = txq->axq_qnum; 3060 bf->bf_state.bfs_pri = pri; 3061 3062 /* 3063 * If the hardware queue isn't busy, queue it directly. 3064 * If the hardware queue is busy, queue it. 3065 * If the TID is paused or the traffic it outside BAW, software 3066 * queue it. 3067 * 3068 * If the node is in power-save and we're leaking a frame, 3069 * leak a single frame. 3070 */ 3071 if (! ath_tx_tid_can_tx_or_sched(sc, atid)) { 3072 /* TID is paused, queue */ 3073 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__); 3074 /* 3075 * If the caller requested that it be sent at a high 3076 * priority, queue it at the head of the list. 3077 */ 3078 if (queue_to_head) 3079 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 3080 else 3081 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3082 } else if (ath_tx_ampdu_pending(sc, an, tid)) { 3083 /* AMPDU pending; queue */ 3084 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__); 3085 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3086 /* XXX sched? */ 3087 } else if (ath_tx_ampdu_running(sc, an, tid)) { 3088 /* AMPDU running, attempt direct dispatch if possible */ 3089 3090 /* 3091 * Always queue the frame to the tail of the list. 3092 */ 3093 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3094 3095 /* 3096 * If the hardware queue isn't busy, direct dispatch 3097 * the head frame in the list. Don't schedule the 3098 * TID - let it build some more frames first? 3099 * 3100 * When running A-MPDU, always just check the hardware 3101 * queue depth against the aggregate frame limit. 3102 * We don't want to burst a large number of single frames 3103 * out to the hardware; we want to aggressively hold back. 3104 * 3105 * Otherwise, schedule the TID. 3106 */ 3107 /* XXX TXQ locking */ 3108 if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_aggr) { 3109 bf = ATH_TID_FIRST(atid); 3110 ATH_TID_REMOVE(atid, bf, bf_list); 3111 3112 /* 3113 * Ensure it's definitely treated as a non-AMPDU 3114 * frame - this information may have been left 3115 * over from a previous attempt. 3116 */ 3117 bf->bf_state.bfs_aggr = 0; 3118 bf->bf_state.bfs_nframes = 1; 3119 3120 /* Queue to the hardware */ 3121 ath_tx_xmit_aggr(sc, an, txq, bf); 3122 DPRINTF(sc, ATH_DEBUG_SW_TX, 3123 "%s: xmit_aggr\n", 3124 __func__); 3125 } else { 3126 DPRINTF(sc, ATH_DEBUG_SW_TX, 3127 "%s: ampdu; swq'ing\n", 3128 __func__); 3129 3130 ath_tx_tid_sched(sc, atid); 3131 } 3132 /* 3133 * If we're not doing A-MPDU, be prepared to direct dispatch 3134 * up to both limits if possible. This particular corner 3135 * case may end up with packet starvation between aggregate 3136 * traffic and non-aggregate traffic: we wnat to ensure 3137 * that non-aggregate stations get a few frames queued to the 3138 * hardware before the aggregate station(s) get their chance. 3139 * 3140 * So if you only ever see a couple of frames direct dispatched 3141 * to the hardware from a non-AMPDU client, check both here 3142 * and in the software queue dispatcher to ensure that those 3143 * non-AMPDU stations get a fair chance to transmit. 3144 */ 3145 /* XXX TXQ locking */ 3146 } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) && 3147 (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) { 3148 /* AMPDU not running, attempt direct dispatch */ 3149 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__); 3150 /* See if clrdmask needs to be set */ 3151 ath_tx_update_clrdmask(sc, atid, bf); 3152 3153 /* 3154 * Update the current leak count if 3155 * we're leaking frames; and set the 3156 * MORE flag as appropriate. 3157 */ 3158 ath_tx_leak_count_update(sc, atid, bf); 3159 3160 /* 3161 * Dispatch the frame. 3162 */ 3163 ath_tx_xmit_normal(sc, txq, bf); 3164 } else { 3165 /* Busy; queue */ 3166 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__); 3167 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3168 ath_tx_tid_sched(sc, atid); 3169 } 3170 } 3171 3172 /* 3173 * Only set the clrdmask bit if none of the nodes are currently 3174 * filtered. 3175 * 3176 * XXX TODO: go through all the callers and check to see 3177 * which are being called in the context of looping over all 3178 * TIDs (eg, if all tids are being paused, resumed, etc.) 3179 * That'll avoid O(n^2) complexity here. 3180 */ 3181 static void 3182 ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an) 3183 { 3184 int i; 3185 3186 ATH_TX_LOCK_ASSERT(sc); 3187 3188 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 3189 if (an->an_tid[i].isfiltered == 1) 3190 return; 3191 } 3192 an->clrdmask = 1; 3193 } 3194 3195 /* 3196 * Configure the per-TID node state. 3197 * 3198 * This likely belongs in if_ath_node.c but I can't think of anywhere 3199 * else to put it just yet. 3200 * 3201 * This sets up the SLISTs and the mutex as appropriate. 3202 */ 3203 void 3204 ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an) 3205 { 3206 int i, j; 3207 struct ath_tid *atid; 3208 3209 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 3210 atid = &an->an_tid[i]; 3211 3212 /* XXX now with this bzer(), is the field 0'ing needed? */ 3213 bzero(atid, sizeof(*atid)); 3214 3215 TAILQ_INIT(&atid->tid_q); 3216 TAILQ_INIT(&atid->filtq.tid_q); 3217 atid->tid = i; 3218 atid->an = an; 3219 for (j = 0; j < ATH_TID_MAX_BUFS; j++) 3220 atid->tx_buf[j] = NULL; 3221 atid->baw_head = atid->baw_tail = 0; 3222 atid->paused = 0; 3223 atid->sched = 0; 3224 atid->hwq_depth = 0; 3225 atid->cleanup_inprogress = 0; 3226 if (i == IEEE80211_NONQOS_TID) 3227 atid->ac = ATH_NONQOS_TID_AC; 3228 else 3229 atid->ac = TID_TO_WME_AC(i); 3230 } 3231 an->clrdmask = 1; /* Always start by setting this bit */ 3232 } 3233 3234 /* 3235 * Pause the current TID. This stops packets from being transmitted 3236 * on it. 3237 * 3238 * Since this is also called from upper layers as well as the driver, 3239 * it will get the TID lock. 3240 */ 3241 static void 3242 ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid) 3243 { 3244 3245 ATH_TX_LOCK_ASSERT(sc); 3246 tid->paused++; 3247 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: paused = %d\n", 3248 __func__, tid->paused); 3249 } 3250 3251 /* 3252 * Unpause the current TID, and schedule it if needed. 3253 */ 3254 static void 3255 ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid) 3256 { 3257 ATH_TX_LOCK_ASSERT(sc); 3258 3259 /* 3260 * There's some odd places where ath_tx_tid_resume() is called 3261 * when it shouldn't be; this works around that particular issue 3262 * until it's actually resolved. 3263 */ 3264 if (tid->paused == 0) { 3265 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3266 "%s: %6D: paused=0?\n", __func__, 3267 tid->an->an_node.ni_macaddr, ":"); 3268 } else { 3269 tid->paused--; 3270 } 3271 3272 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: unpaused = %d\n", 3273 __func__, tid->paused); 3274 3275 if (tid->paused) 3276 return; 3277 3278 /* 3279 * Override the clrdmask configuration for the next frame 3280 * from this TID, just to get the ball rolling. 3281 */ 3282 ath_tx_set_clrdmask(sc, tid->an); 3283 3284 if (tid->axq_depth == 0) 3285 return; 3286 3287 /* XXX isfiltered shouldn't ever be 0 at this point */ 3288 if (tid->isfiltered == 1) { 3289 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n", 3290 __func__); 3291 return; 3292 } 3293 3294 ath_tx_tid_sched(sc, tid); 3295 3296 /* 3297 * Queue the software TX scheduler. 3298 */ 3299 ath_tx_swq_kick(sc); 3300 } 3301 3302 /* 3303 * Add the given ath_buf to the TID filtered frame list. 3304 * This requires the TID be filtered. 3305 */ 3306 static void 3307 ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid, 3308 struct ath_buf *bf) 3309 { 3310 3311 ATH_TX_LOCK_ASSERT(sc); 3312 3313 if (!tid->isfiltered) 3314 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n", 3315 __func__); 3316 3317 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf); 3318 3319 /* Set the retry bit and bump the retry counter */ 3320 ath_tx_set_retry(sc, bf); 3321 sc->sc_stats.ast_tx_swfiltered++; 3322 3323 ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list); 3324 } 3325 3326 /* 3327 * Handle a completed filtered frame from the given TID. 3328 * This just enables/pauses the filtered frame state if required 3329 * and appends the filtered frame to the filtered queue. 3330 */ 3331 static void 3332 ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid, 3333 struct ath_buf *bf) 3334 { 3335 3336 ATH_TX_LOCK_ASSERT(sc); 3337 3338 if (! tid->isfiltered) { 3339 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: filter transition\n", 3340 __func__); 3341 tid->isfiltered = 1; 3342 ath_tx_tid_pause(sc, tid); 3343 } 3344 3345 /* Add the frame to the filter queue */ 3346 ath_tx_tid_filt_addbuf(sc, tid, bf); 3347 } 3348 3349 /* 3350 * Complete the filtered frame TX completion. 3351 * 3352 * If there are no more frames in the hardware queue, unpause/unfilter 3353 * the TID if applicable. Otherwise we will wait for a node PS transition 3354 * to unfilter. 3355 */ 3356 static void 3357 ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid) 3358 { 3359 struct ath_buf *bf; 3360 3361 ATH_TX_LOCK_ASSERT(sc); 3362 3363 if (tid->hwq_depth != 0) 3364 return; 3365 3366 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: hwq=0, transition back\n", 3367 __func__); 3368 tid->isfiltered = 0; 3369 /* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */ 3370 ath_tx_set_clrdmask(sc, tid->an); 3371 3372 /* XXX this is really quite inefficient */ 3373 while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) { 3374 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3375 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3376 } 3377 3378 ath_tx_tid_resume(sc, tid); 3379 } 3380 3381 /* 3382 * Called when a single (aggregate or otherwise) frame is completed. 3383 * 3384 * Returns 1 if the buffer could be added to the filtered list 3385 * (cloned or otherwise), 0 if the buffer couldn't be added to the 3386 * filtered list (failed clone; expired retry) and the caller should 3387 * free it and handle it like a failure (eg by sending a BAR.) 3388 */ 3389 static int 3390 ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid, 3391 struct ath_buf *bf) 3392 { 3393 struct ath_buf *nbf; 3394 int retval; 3395 3396 ATH_TX_LOCK_ASSERT(sc); 3397 3398 /* 3399 * Don't allow a filtered frame to live forever. 3400 */ 3401 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3402 sc->sc_stats.ast_tx_swretrymax++; 3403 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3404 "%s: bf=%p, seqno=%d, exceeded retries\n", 3405 __func__, 3406 bf, 3407 bf->bf_state.bfs_seqno); 3408 return (0); 3409 } 3410 3411 /* 3412 * A busy buffer can't be added to the retry list. 3413 * It needs to be cloned. 3414 */ 3415 if (bf->bf_flags & ATH_BUF_BUSY) { 3416 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3417 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3418 "%s: busy buffer clone: %p -> %p\n", 3419 __func__, bf, nbf); 3420 } else { 3421 nbf = bf; 3422 } 3423 3424 if (nbf == NULL) { 3425 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3426 "%s: busy buffer couldn't be cloned (%p)!\n", 3427 __func__, bf); 3428 retval = 1; 3429 } else { 3430 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3431 retval = 0; 3432 } 3433 ath_tx_tid_filt_comp_complete(sc, tid); 3434 3435 return (retval); 3436 } 3437 3438 static void 3439 ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid, 3440 struct ath_buf *bf_first, ath_bufhead *bf_q) 3441 { 3442 struct ath_buf *bf, *bf_next, *nbf; 3443 3444 ATH_TX_LOCK_ASSERT(sc); 3445 3446 bf = bf_first; 3447 while (bf) { 3448 bf_next = bf->bf_next; 3449 bf->bf_next = NULL; /* Remove it from the aggr list */ 3450 3451 /* 3452 * Don't allow a filtered frame to live forever. 3453 */ 3454 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3455 sc->sc_stats.ast_tx_swretrymax++; 3456 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3457 "%s: bf=%p, seqno=%d, exceeded retries\n", 3458 __func__, 3459 bf, 3460 bf->bf_state.bfs_seqno); 3461 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3462 goto next; 3463 } 3464 3465 if (bf->bf_flags & ATH_BUF_BUSY) { 3466 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3467 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3468 "%s: busy buffer cloned: %p -> %p", 3469 __func__, bf, nbf); 3470 } else { 3471 nbf = bf; 3472 } 3473 3474 /* 3475 * If the buffer couldn't be cloned, add it to bf_q; 3476 * the caller will free the buffer(s) as required. 3477 */ 3478 if (nbf == NULL) { 3479 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3480 "%s: buffer couldn't be cloned! (%p)\n", 3481 __func__, bf); 3482 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3483 } else { 3484 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3485 } 3486 next: 3487 bf = bf_next; 3488 } 3489 3490 ath_tx_tid_filt_comp_complete(sc, tid); 3491 } 3492 3493 /* 3494 * Suspend the queue because we need to TX a BAR. 3495 */ 3496 static void 3497 ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid) 3498 { 3499 3500 ATH_TX_LOCK_ASSERT(sc); 3501 3502 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3503 "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n", 3504 __func__, 3505 tid->tid, 3506 tid->bar_wait, 3507 tid->bar_tx); 3508 3509 /* We shouldn't be called when bar_tx is 1 */ 3510 if (tid->bar_tx) { 3511 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3512 "%s: bar_tx is 1?!\n", __func__); 3513 } 3514 3515 /* If we've already been called, just be patient. */ 3516 if (tid->bar_wait) 3517 return; 3518 3519 /* Wait! */ 3520 tid->bar_wait = 1; 3521 3522 /* Only one pause, no matter how many frames fail */ 3523 ath_tx_tid_pause(sc, tid); 3524 } 3525 3526 /* 3527 * We've finished with BAR handling - either we succeeded or 3528 * failed. Either way, unsuspend TX. 3529 */ 3530 static void 3531 ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid) 3532 { 3533 3534 ATH_TX_LOCK_ASSERT(sc); 3535 3536 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3537 "%s: %6D: TID=%d, called\n", 3538 __func__, 3539 tid->an->an_node.ni_macaddr, 3540 ":", 3541 tid->tid); 3542 3543 if (tid->bar_tx == 0 || tid->bar_wait == 0) { 3544 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3545 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3546 __func__, tid->an->an_node.ni_macaddr, ":", 3547 tid->tid, tid->bar_tx, tid->bar_wait); 3548 } 3549 3550 tid->bar_tx = tid->bar_wait = 0; 3551 ath_tx_tid_resume(sc, tid); 3552 } 3553 3554 /* 3555 * Return whether we're ready to TX a BAR frame. 3556 * 3557 * Requires the TID lock be held. 3558 */ 3559 static int 3560 ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid) 3561 { 3562 3563 ATH_TX_LOCK_ASSERT(sc); 3564 3565 if (tid->bar_wait == 0 || tid->hwq_depth > 0) 3566 return (0); 3567 3568 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3569 "%s: %6D: TID=%d, bar ready\n", 3570 __func__, 3571 tid->an->an_node.ni_macaddr, 3572 ":", 3573 tid->tid); 3574 3575 return (1); 3576 } 3577 3578 /* 3579 * Check whether the current TID is ready to have a BAR 3580 * TXed and if so, do the TX. 3581 * 3582 * Since the TID/TXQ lock can't be held during a call to 3583 * ieee80211_send_bar(), we have to do the dirty thing of unlocking it, 3584 * sending the BAR and locking it again. 3585 * 3586 * Eventually, the code to send the BAR should be broken out 3587 * from this routine so the lock doesn't have to be reacquired 3588 * just to be immediately dropped by the caller. 3589 */ 3590 static void 3591 ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid) 3592 { 3593 struct ieee80211_tx_ampdu *tap; 3594 3595 ATH_TX_LOCK_ASSERT(sc); 3596 3597 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3598 "%s: %6D: TID=%d, called\n", 3599 __func__, 3600 tid->an->an_node.ni_macaddr, 3601 ":", 3602 tid->tid); 3603 3604 tap = ath_tx_get_tx_tid(tid->an, tid->tid); 3605 3606 /* 3607 * This is an error condition! 3608 */ 3609 if (tid->bar_wait == 0 || tid->bar_tx == 1) { 3610 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3611 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3612 __func__, tid->an->an_node.ni_macaddr, ":", 3613 tid->tid, tid->bar_tx, tid->bar_wait); 3614 return; 3615 } 3616 3617 /* Don't do anything if we still have pending frames */ 3618 if (tid->hwq_depth > 0) { 3619 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3620 "%s: %6D: TID=%d, hwq_depth=%d, waiting\n", 3621 __func__, 3622 tid->an->an_node.ni_macaddr, 3623 ":", 3624 tid->tid, 3625 tid->hwq_depth); 3626 return; 3627 } 3628 3629 /* We're now about to TX */ 3630 tid->bar_tx = 1; 3631 3632 /* 3633 * Override the clrdmask configuration for the next frame, 3634 * just to get the ball rolling. 3635 */ 3636 ath_tx_set_clrdmask(sc, tid->an); 3637 3638 /* 3639 * Calculate new BAW left edge, now that all frames have either 3640 * succeeded or failed. 3641 * 3642 * XXX verify this is _actually_ the valid value to begin at! 3643 */ 3644 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3645 "%s: %6D: TID=%d, new BAW left edge=%d\n", 3646 __func__, 3647 tid->an->an_node.ni_macaddr, 3648 ":", 3649 tid->tid, 3650 tap->txa_start); 3651 3652 /* Try sending the BAR frame */ 3653 /* We can't hold the lock here! */ 3654 3655 ATH_TX_UNLOCK(sc); 3656 if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) { 3657 /* Success? Now we wait for notification that it's done */ 3658 ATH_TX_LOCK(sc); 3659 return; 3660 } 3661 3662 /* Failure? For now, warn loudly and continue */ 3663 ATH_TX_LOCK(sc); 3664 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3665 "%s: %6D: TID=%d, failed to TX BAR, continue!\n", 3666 __func__, tid->an->an_node.ni_macaddr, ":", 3667 tid->tid); 3668 ath_tx_tid_bar_unsuspend(sc, tid); 3669 } 3670 3671 static void 3672 ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an, 3673 struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf) 3674 { 3675 3676 ATH_TX_LOCK_ASSERT(sc); 3677 3678 /* 3679 * If the current TID is running AMPDU, update 3680 * the BAW. 3681 */ 3682 if (ath_tx_ampdu_running(sc, an, tid->tid) && 3683 bf->bf_state.bfs_dobaw) { 3684 /* 3685 * Only remove the frame from the BAW if it's 3686 * been transmitted at least once; this means 3687 * the frame was in the BAW to begin with. 3688 */ 3689 if (bf->bf_state.bfs_retries > 0) { 3690 ath_tx_update_baw(sc, an, tid, bf); 3691 bf->bf_state.bfs_dobaw = 0; 3692 } 3693 #if 0 3694 /* 3695 * This has become a non-fatal error now 3696 */ 3697 if (! bf->bf_state.bfs_addedbaw) 3698 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW 3699 "%s: wasn't added: seqno %d\n", 3700 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3701 #endif 3702 } 3703 3704 /* Strip it out of an aggregate list if it was in one */ 3705 bf->bf_next = NULL; 3706 3707 /* Insert on the free queue to be freed by the caller */ 3708 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 3709 } 3710 3711 static void 3712 ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an, 3713 const char *pfx, struct ath_tid *tid, struct ath_buf *bf) 3714 { 3715 struct ieee80211_node *ni = &an->an_node; 3716 struct ath_txq *txq; 3717 struct ieee80211_tx_ampdu *tap; 3718 3719 txq = sc->sc_ac2q[tid->ac]; 3720 tap = ath_tx_get_tx_tid(an, tid->tid); 3721 3722 DPRINTF(sc, ATH_DEBUG_SW_TX, 3723 "%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, " 3724 "seqno=%d, retry=%d\n", 3725 __func__, 3726 pfx, 3727 ni->ni_macaddr, 3728 ":", 3729 bf, 3730 bf->bf_state.bfs_addedbaw, 3731 bf->bf_state.bfs_dobaw, 3732 SEQNO(bf->bf_state.bfs_seqno), 3733 bf->bf_state.bfs_retries); 3734 DPRINTF(sc, ATH_DEBUG_SW_TX, 3735 "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n", 3736 __func__, 3737 pfx, 3738 ni->ni_macaddr, 3739 ":", 3740 bf, 3741 txq->axq_qnum, 3742 txq->axq_depth, 3743 txq->axq_aggr_depth); 3744 DPRINTF(sc, ATH_DEBUG_SW_TX, 3745 "%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, " 3746 "isfiltered=%d\n", 3747 __func__, 3748 pfx, 3749 ni->ni_macaddr, 3750 ":", 3751 bf, 3752 tid->axq_depth, 3753 tid->hwq_depth, 3754 tid->bar_wait, 3755 tid->isfiltered); 3756 DPRINTF(sc, ATH_DEBUG_SW_TX, 3757 "%s: %s: %6D: tid %d: " 3758 "sched=%d, paused=%d, " 3759 "incomp=%d, baw_head=%d, " 3760 "baw_tail=%d txa_start=%d, ni_txseqs=%d\n", 3761 __func__, 3762 pfx, 3763 ni->ni_macaddr, 3764 ":", 3765 tid->tid, 3766 tid->sched, tid->paused, 3767 tid->incomp, tid->baw_head, 3768 tid->baw_tail, tap == NULL ? -1 : tap->txa_start, 3769 ni->ni_txseqs[tid->tid]); 3770 3771 /* XXX Dump the frame, see what it is? */ 3772 ieee80211_dump_pkt(ni->ni_ic, 3773 mtod(bf->bf_m, const uint8_t *), 3774 bf->bf_m->m_len, 0, -1); 3775 } 3776 3777 /* 3778 * Free any packets currently pending in the software TX queue. 3779 * 3780 * This will be called when a node is being deleted. 3781 * 3782 * It can also be called on an active node during an interface 3783 * reset or state transition. 3784 * 3785 * (From Linux/reference): 3786 * 3787 * TODO: For frame(s) that are in the retry state, we will reuse the 3788 * sequence number(s) without setting the retry bit. The 3789 * alternative is to give up on these and BAR the receiver's window 3790 * forward. 3791 */ 3792 static void 3793 ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an, 3794 struct ath_tid *tid, ath_bufhead *bf_cq) 3795 { 3796 struct ath_buf *bf; 3797 struct ieee80211_tx_ampdu *tap; 3798 struct ieee80211_node *ni = &an->an_node; 3799 int t; 3800 3801 tap = ath_tx_get_tx_tid(an, tid->tid); 3802 3803 ATH_TX_LOCK_ASSERT(sc); 3804 3805 /* Walk the queue, free frames */ 3806 t = 0; 3807 for (;;) { 3808 bf = ATH_TID_FIRST(tid); 3809 if (bf == NULL) { 3810 break; 3811 } 3812 3813 if (t == 0) { 3814 ath_tx_tid_drain_print(sc, an, "norm", tid, bf); 3815 t = 1; 3816 } 3817 3818 ATH_TID_REMOVE(tid, bf, bf_list); 3819 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3820 } 3821 3822 /* And now, drain the filtered frame queue */ 3823 t = 0; 3824 for (;;) { 3825 bf = ATH_TID_FILT_FIRST(tid); 3826 if (bf == NULL) 3827 break; 3828 3829 if (t == 0) { 3830 ath_tx_tid_drain_print(sc, an, "filt", tid, bf); 3831 t = 1; 3832 } 3833 3834 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3835 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3836 } 3837 3838 /* 3839 * Override the clrdmask configuration for the next frame 3840 * in case there is some future transmission, just to get 3841 * the ball rolling. 3842 * 3843 * This won't hurt things if the TID is about to be freed. 3844 */ 3845 ath_tx_set_clrdmask(sc, tid->an); 3846 3847 /* 3848 * Now that it's completed, grab the TID lock and update 3849 * the sequence number and BAW window. 3850 * Because sequence numbers have been assigned to frames 3851 * that haven't been sent yet, it's entirely possible 3852 * we'll be called with some pending frames that have not 3853 * been transmitted. 3854 * 3855 * The cleaner solution is to do the sequence number allocation 3856 * when the packet is first transmitted - and thus the "retries" 3857 * check above would be enough to update the BAW/seqno. 3858 */ 3859 3860 /* But don't do it for non-QoS TIDs */ 3861 if (tap) { 3862 #if 1 3863 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3864 "%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n", 3865 __func__, 3866 ni->ni_macaddr, 3867 ":", 3868 an, 3869 tid->tid, 3870 tap->txa_start); 3871 #endif 3872 ni->ni_txseqs[tid->tid] = tap->txa_start; 3873 tid->baw_tail = tid->baw_head; 3874 } 3875 } 3876 3877 /* 3878 * Reset the TID state. This must be only called once the node has 3879 * had its frames flushed from this TID, to ensure that no other 3880 * pause / unpause logic can kick in. 3881 */ 3882 static void 3883 ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid) 3884 { 3885 3886 #if 0 3887 tid->bar_wait = tid->bar_tx = tid->isfiltered = 0; 3888 tid->paused = tid->sched = tid->addba_tx_pending = 0; 3889 tid->incomp = tid->cleanup_inprogress = 0; 3890 #endif 3891 3892 /* 3893 * If we have a bar_wait set, we need to unpause the TID 3894 * here. Otherwise once cleanup has finished, the TID won't 3895 * have the right paused counter. 3896 * 3897 * XXX I'm not going through resume here - I don't want the 3898 * node to be rescheuled just yet. This however should be 3899 * methodized! 3900 */ 3901 if (tid->bar_wait) { 3902 if (tid->paused > 0) { 3903 tid->paused --; 3904 } 3905 } 3906 3907 /* 3908 * XXX same with a currently filtered TID. 3909 * 3910 * Since this is being called during a flush, we assume that 3911 * the filtered frame list is actually empty. 3912 * 3913 * XXX TODO: add in a check to ensure that the filtered queue 3914 * depth is actually 0! 3915 */ 3916 if (tid->isfiltered) { 3917 if (tid->paused > 0) { 3918 tid->paused --; 3919 } 3920 } 3921 3922 /* 3923 * Clear BAR, filtered frames, scheduled and ADDBA pending. 3924 * The TID may be going through cleanup from the last association 3925 * where things in the BAW are still in the hardware queue. 3926 */ 3927 tid->bar_wait = 0; 3928 tid->bar_tx = 0; 3929 tid->isfiltered = 0; 3930 tid->sched = 0; 3931 tid->addba_tx_pending = 0; 3932 3933 /* 3934 * XXX TODO: it may just be enough to walk the HWQs and mark 3935 * frames for that node as non-aggregate; or mark the ath_node 3936 * with something that indicates that aggregation is no longer 3937 * occuring. Then we can just toss the BAW complaints and 3938 * do a complete hard reset of state here - no pause, no 3939 * complete counter, etc. 3940 */ 3941 3942 } 3943 3944 /* 3945 * Flush all software queued packets for the given node. 3946 * 3947 * This occurs when a completion handler frees the last buffer 3948 * for a node, and the node is thus freed. This causes the node 3949 * to be cleaned up, which ends up calling ath_tx_node_flush. 3950 */ 3951 void 3952 ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an) 3953 { 3954 int tid; 3955 ath_bufhead bf_cq; 3956 struct ath_buf *bf; 3957 3958 TAILQ_INIT(&bf_cq); 3959 3960 ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p", 3961 &an->an_node); 3962 3963 ATH_TX_LOCK(sc); 3964 DPRINTF(sc, ATH_DEBUG_NODE, 3965 "%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, " 3966 "swq_depth=%d, clrdmask=%d, leak_count=%d\n", 3967 __func__, 3968 an->an_node.ni_macaddr, 3969 ":", 3970 an->an_is_powersave, 3971 an->an_stack_psq, 3972 an->an_tim_set, 3973 an->an_swq_depth, 3974 an->clrdmask, 3975 an->an_leak_count); 3976 3977 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 3978 struct ath_tid *atid = &an->an_tid[tid]; 3979 3980 /* Free packets */ 3981 ath_tx_tid_drain(sc, an, atid, &bf_cq); 3982 3983 /* Remove this tid from the list of active tids */ 3984 ath_tx_tid_unsched(sc, atid); 3985 3986 /* Reset the per-TID pause, BAR, etc state */ 3987 ath_tx_tid_reset(sc, atid); 3988 } 3989 3990 /* 3991 * Clear global leak count 3992 */ 3993 an->an_leak_count = 0; 3994 ATH_TX_UNLOCK(sc); 3995 3996 /* Handle completed frames */ 3997 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 3998 TAILQ_REMOVE(&bf_cq, bf, bf_list); 3999 ath_tx_default_comp(sc, bf, 0); 4000 } 4001 } 4002 4003 /* 4004 * Drain all the software TXQs currently with traffic queued. 4005 */ 4006 void 4007 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq) 4008 { 4009 struct ath_tid *tid; 4010 ath_bufhead bf_cq; 4011 struct ath_buf *bf; 4012 4013 TAILQ_INIT(&bf_cq); 4014 ATH_TX_LOCK(sc); 4015 4016 /* 4017 * Iterate over all active tids for the given txq, 4018 * flushing and unsched'ing them 4019 */ 4020 while (! TAILQ_EMPTY(&txq->axq_tidq)) { 4021 tid = TAILQ_FIRST(&txq->axq_tidq); 4022 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq); 4023 ath_tx_tid_unsched(sc, tid); 4024 } 4025 4026 ATH_TX_UNLOCK(sc); 4027 4028 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4029 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4030 ath_tx_default_comp(sc, bf, 0); 4031 } 4032 } 4033 4034 /* 4035 * Handle completion of non-aggregate session frames. 4036 * 4037 * This (currently) doesn't implement software retransmission of 4038 * non-aggregate frames! 4039 * 4040 * Software retransmission of non-aggregate frames needs to obey 4041 * the strict sequence number ordering, and drop any frames that 4042 * will fail this. 4043 * 4044 * For now, filtered frames and frame transmission will cause 4045 * all kinds of issues. So we don't support them. 4046 * 4047 * So anyone queuing frames via ath_tx_normal_xmit() or 4048 * ath_tx_hw_queue_norm() must override and set CLRDMASK. 4049 */ 4050 void 4051 ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4052 { 4053 struct ieee80211_node *ni = bf->bf_node; 4054 struct ath_node *an = ATH_NODE(ni); 4055 int tid = bf->bf_state.bfs_tid; 4056 struct ath_tid *atid = &an->an_tid[tid]; 4057 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 4058 4059 /* The TID state is protected behind the TXQ lock */ 4060 ATH_TX_LOCK(sc); 4061 4062 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n", 4063 __func__, bf, fail, atid->hwq_depth - 1); 4064 4065 atid->hwq_depth--; 4066 4067 #if 0 4068 /* 4069 * If the frame was filtered, stick it on the filter frame 4070 * queue and complain about it. It shouldn't happen! 4071 */ 4072 if ((ts->ts_status & HAL_TXERR_FILT) || 4073 (ts->ts_status != 0 && atid->isfiltered)) { 4074 DPRINTF(sc, ATH_DEBUG_SW_TX, 4075 "%s: isfiltered=%d, ts_status=%d: huh?\n", 4076 __func__, 4077 atid->isfiltered, 4078 ts->ts_status); 4079 ath_tx_tid_filt_comp_buf(sc, atid, bf); 4080 } 4081 #endif 4082 if (atid->isfiltered) 4083 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__); 4084 if (atid->hwq_depth < 0) 4085 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", 4086 __func__, atid->hwq_depth); 4087 4088 /* 4089 * If the queue is filtered, potentially mark it as complete 4090 * and reschedule it as needed. 4091 * 4092 * This is required as there may be a subsequent TX descriptor 4093 * for this end-node that has CLRDMASK set, so it's quite possible 4094 * that a filtered frame will be followed by a non-filtered 4095 * (complete or otherwise) frame. 4096 * 4097 * XXX should we do this before we complete the frame? 4098 */ 4099 if (atid->isfiltered) 4100 ath_tx_tid_filt_comp_complete(sc, atid); 4101 ATH_TX_UNLOCK(sc); 4102 4103 /* 4104 * punt to rate control if we're not being cleaned up 4105 * during a hw queue drain and the frame wanted an ACK. 4106 */ 4107 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 4108 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 4109 ts, bf->bf_state.bfs_pktlen, 4110 1, (ts->ts_status == 0) ? 0 : 1); 4111 4112 ath_tx_default_comp(sc, bf, fail); 4113 } 4114 4115 /* 4116 * Handle cleanup of aggregate session packets that aren't 4117 * an A-MPDU. 4118 * 4119 * There's no need to update the BAW here - the session is being 4120 * torn down. 4121 */ 4122 static void 4123 ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf) 4124 { 4125 struct ieee80211_node *ni = bf->bf_node; 4126 struct ath_node *an = ATH_NODE(ni); 4127 int tid = bf->bf_state.bfs_tid; 4128 struct ath_tid *atid = &an->an_tid[tid]; 4129 4130 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n", 4131 __func__, tid, atid->incomp); 4132 4133 ATH_TX_LOCK(sc); 4134 atid->incomp--; 4135 if (atid->incomp == 0) { 4136 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4137 "%s: TID %d: cleaned up! resume!\n", 4138 __func__, tid); 4139 atid->cleanup_inprogress = 0; 4140 ath_tx_tid_resume(sc, atid); 4141 } 4142 ATH_TX_UNLOCK(sc); 4143 4144 ath_tx_default_comp(sc, bf, 0); 4145 } 4146 4147 /* 4148 * Performs transmit side cleanup when TID changes from aggregated to 4149 * unaggregated. 4150 * 4151 * - Discard all retry frames from the s/w queue. 4152 * - Fix the tx completion function for all buffers in s/w queue. 4153 * - Count the number of unacked frames, and let transmit completion 4154 * handle it later. 4155 * 4156 * The caller is responsible for pausing the TID and unpausing the 4157 * TID if no cleanup was required. Otherwise the cleanup path will 4158 * unpause the TID once the last hardware queued frame is completed. 4159 */ 4160 static void 4161 ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid, 4162 ath_bufhead *bf_cq) 4163 { 4164 struct ath_tid *atid = &an->an_tid[tid]; 4165 struct ieee80211_tx_ampdu *tap; 4166 struct ath_buf *bf, *bf_next; 4167 4168 ATH_TX_LOCK_ASSERT(sc); 4169 4170 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4171 "%s: TID %d: called\n", __func__, tid); 4172 4173 /* 4174 * Move the filtered frames to the TX queue, before 4175 * we run off and discard/process things. 4176 */ 4177 /* XXX this is really quite inefficient */ 4178 while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) { 4179 ATH_TID_FILT_REMOVE(atid, bf, bf_list); 4180 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4181 } 4182 4183 /* 4184 * Update the frames in the software TX queue: 4185 * 4186 * + Discard retry frames in the queue 4187 * + Fix the completion function to be non-aggregate 4188 */ 4189 bf = ATH_TID_FIRST(atid); 4190 while (bf) { 4191 if (bf->bf_state.bfs_isretried) { 4192 bf_next = TAILQ_NEXT(bf, bf_list); 4193 ATH_TID_REMOVE(atid, bf, bf_list); 4194 if (bf->bf_state.bfs_dobaw) { 4195 ath_tx_update_baw(sc, an, atid, bf); 4196 if (!bf->bf_state.bfs_addedbaw) 4197 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4198 "%s: wasn't added: seqno %d\n", 4199 __func__, 4200 SEQNO(bf->bf_state.bfs_seqno)); 4201 } 4202 bf->bf_state.bfs_dobaw = 0; 4203 /* 4204 * Call the default completion handler with "fail" just 4205 * so upper levels are suitably notified about this. 4206 */ 4207 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 4208 bf = bf_next; 4209 continue; 4210 } 4211 /* Give these the default completion handler */ 4212 bf->bf_comp = ath_tx_normal_comp; 4213 bf = TAILQ_NEXT(bf, bf_list); 4214 } 4215 4216 /* 4217 * Calculate what hardware-queued frames exist based 4218 * on the current BAW size. Ie, what frames have been 4219 * added to the TX hardware queue for this TID but 4220 * not yet ACKed. 4221 */ 4222 tap = ath_tx_get_tx_tid(an, tid); 4223 /* Need the lock - fiddling with BAW */ 4224 while (atid->baw_head != atid->baw_tail) { 4225 if (atid->tx_buf[atid->baw_head]) { 4226 atid->incomp++; 4227 atid->cleanup_inprogress = 1; 4228 atid->tx_buf[atid->baw_head] = NULL; 4229 } 4230 INCR(atid->baw_head, ATH_TID_MAX_BUFS); 4231 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 4232 } 4233 4234 if (atid->cleanup_inprogress) 4235 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4236 "%s: TID %d: cleanup needed: %d packets\n", 4237 __func__, tid, atid->incomp); 4238 4239 /* Owner now must free completed frames */ 4240 } 4241 4242 static struct ath_buf * 4243 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 4244 struct ath_tid *tid, struct ath_buf *bf) 4245 { 4246 struct ath_buf *nbf; 4247 int error; 4248 4249 /* 4250 * Clone the buffer. This will handle the dma unmap and 4251 * copy the node reference to the new buffer. If this 4252 * works out, 'bf' will have no DMA mapping, no mbuf 4253 * pointer and no node reference. 4254 */ 4255 nbf = ath_buf_clone(sc, bf); 4256 4257 #if 0 4258 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n", 4259 __func__); 4260 #endif 4261 4262 if (nbf == NULL) { 4263 /* Failed to clone */ 4264 DPRINTF(sc, ATH_DEBUG_XMIT, 4265 "%s: failed to clone a busy buffer\n", 4266 __func__); 4267 return NULL; 4268 } 4269 4270 /* Setup the dma for the new buffer */ 4271 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m); 4272 if (error != 0) { 4273 DPRINTF(sc, ATH_DEBUG_XMIT, 4274 "%s: failed to setup dma for clone\n", 4275 __func__); 4276 /* 4277 * Put this at the head of the list, not tail; 4278 * that way it doesn't interfere with the 4279 * busy buffer logic (which uses the tail of 4280 * the list.) 4281 */ 4282 ATH_TXBUF_LOCK(sc); 4283 ath_returnbuf_head(sc, nbf); 4284 ATH_TXBUF_UNLOCK(sc); 4285 return NULL; 4286 } 4287 4288 /* Update BAW if required, before we free the original buf */ 4289 if (bf->bf_state.bfs_dobaw) 4290 ath_tx_switch_baw_buf(sc, an, tid, bf, nbf); 4291 4292 /* Free original buffer; return new buffer */ 4293 ath_freebuf(sc, bf); 4294 4295 return nbf; 4296 } 4297 4298 /* 4299 * Handle retrying an unaggregate frame in an aggregate 4300 * session. 4301 * 4302 * If too many retries occur, pause the TID, wait for 4303 * any further retransmits (as there's no reason why 4304 * non-aggregate frames in an aggregate session are 4305 * transmitted in-order; they just have to be in-BAW) 4306 * and then queue a BAR. 4307 */ 4308 static void 4309 ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf) 4310 { 4311 struct ieee80211_node *ni = bf->bf_node; 4312 struct ath_node *an = ATH_NODE(ni); 4313 int tid = bf->bf_state.bfs_tid; 4314 struct ath_tid *atid = &an->an_tid[tid]; 4315 struct ieee80211_tx_ampdu *tap; 4316 4317 ATH_TX_LOCK(sc); 4318 4319 tap = ath_tx_get_tx_tid(an, tid); 4320 4321 /* 4322 * If the buffer is marked as busy, we can't directly 4323 * reuse it. Instead, try to clone the buffer. 4324 * If the clone is successful, recycle the old buffer. 4325 * If the clone is unsuccessful, set bfs_retries to max 4326 * to force the next bit of code to free the buffer 4327 * for us. 4328 */ 4329 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4330 (bf->bf_flags & ATH_BUF_BUSY)) { 4331 struct ath_buf *nbf; 4332 nbf = ath_tx_retry_clone(sc, an, atid, bf); 4333 if (nbf) 4334 /* bf has been freed at this point */ 4335 bf = nbf; 4336 else 4337 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4338 } 4339 4340 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4341 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4342 "%s: exceeded retries; seqno %d\n", 4343 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4344 sc->sc_stats.ast_tx_swretrymax++; 4345 4346 /* Update BAW anyway */ 4347 if (bf->bf_state.bfs_dobaw) { 4348 ath_tx_update_baw(sc, an, atid, bf); 4349 if (! bf->bf_state.bfs_addedbaw) 4350 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4351 "%s: wasn't added: seqno %d\n", 4352 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4353 } 4354 bf->bf_state.bfs_dobaw = 0; 4355 4356 /* Suspend the TX queue and get ready to send the BAR */ 4357 ath_tx_tid_bar_suspend(sc, atid); 4358 4359 /* Send the BAR if there are no other frames waiting */ 4360 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4361 ath_tx_tid_bar_tx(sc, atid); 4362 4363 ATH_TX_UNLOCK(sc); 4364 4365 /* Free buffer, bf is free after this call */ 4366 ath_tx_default_comp(sc, bf, 0); 4367 return; 4368 } 4369 4370 /* 4371 * This increments the retry counter as well as 4372 * sets the retry flag in the ath_buf and packet 4373 * body. 4374 */ 4375 ath_tx_set_retry(sc, bf); 4376 sc->sc_stats.ast_tx_swretries++; 4377 4378 /* 4379 * Insert this at the head of the queue, so it's 4380 * retried before any current/subsequent frames. 4381 */ 4382 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4383 ath_tx_tid_sched(sc, atid); 4384 /* Send the BAR if there are no other frames waiting */ 4385 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4386 ath_tx_tid_bar_tx(sc, atid); 4387 4388 ATH_TX_UNLOCK(sc); 4389 } 4390 4391 /* 4392 * Common code for aggregate excessive retry/subframe retry. 4393 * If retrying, queues buffers to bf_q. If not, frees the 4394 * buffers. 4395 * 4396 * XXX should unify this with ath_tx_aggr_retry_unaggr() 4397 */ 4398 static int 4399 ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf, 4400 ath_bufhead *bf_q) 4401 { 4402 struct ieee80211_node *ni = bf->bf_node; 4403 struct ath_node *an = ATH_NODE(ni); 4404 int tid = bf->bf_state.bfs_tid; 4405 struct ath_tid *atid = &an->an_tid[tid]; 4406 4407 ATH_TX_LOCK_ASSERT(sc); 4408 4409 /* XXX clr11naggr should be done for all subframes */ 4410 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 4411 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0); 4412 4413 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */ 4414 4415 /* 4416 * If the buffer is marked as busy, we can't directly 4417 * reuse it. Instead, try to clone the buffer. 4418 * If the clone is successful, recycle the old buffer. 4419 * If the clone is unsuccessful, set bfs_retries to max 4420 * to force the next bit of code to free the buffer 4421 * for us. 4422 */ 4423 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4424 (bf->bf_flags & ATH_BUF_BUSY)) { 4425 struct ath_buf *nbf; 4426 nbf = ath_tx_retry_clone(sc, an, atid, bf); 4427 if (nbf) 4428 /* bf has been freed at this point */ 4429 bf = nbf; 4430 else 4431 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4432 } 4433 4434 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4435 sc->sc_stats.ast_tx_swretrymax++; 4436 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4437 "%s: max retries: seqno %d\n", 4438 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4439 ath_tx_update_baw(sc, an, atid, bf); 4440 if (!bf->bf_state.bfs_addedbaw) 4441 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4442 "%s: wasn't added: seqno %d\n", 4443 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4444 bf->bf_state.bfs_dobaw = 0; 4445 return 1; 4446 } 4447 4448 ath_tx_set_retry(sc, bf); 4449 sc->sc_stats.ast_tx_swretries++; 4450 bf->bf_next = NULL; /* Just to make sure */ 4451 4452 /* Clear the aggregate state */ 4453 bf->bf_state.bfs_aggr = 0; 4454 bf->bf_state.bfs_ndelim = 0; /* ??? needed? */ 4455 bf->bf_state.bfs_nframes = 1; 4456 4457 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 4458 return 0; 4459 } 4460 4461 /* 4462 * error pkt completion for an aggregate destination 4463 */ 4464 static void 4465 ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first, 4466 struct ath_tid *tid) 4467 { 4468 struct ieee80211_node *ni = bf_first->bf_node; 4469 struct ath_node *an = ATH_NODE(ni); 4470 struct ath_buf *bf_next, *bf; 4471 ath_bufhead bf_q; 4472 int drops = 0; 4473 struct ieee80211_tx_ampdu *tap; 4474 ath_bufhead bf_cq; 4475 4476 TAILQ_INIT(&bf_q); 4477 TAILQ_INIT(&bf_cq); 4478 4479 /* 4480 * Update rate control - all frames have failed. 4481 * 4482 * XXX use the length in the first frame in the series; 4483 * XXX just so things are consistent for now. 4484 */ 4485 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc, 4486 &bf_first->bf_status.ds_txstat, 4487 bf_first->bf_state.bfs_pktlen, 4488 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes); 4489 4490 ATH_TX_LOCK(sc); 4491 tap = ath_tx_get_tx_tid(an, tid->tid); 4492 sc->sc_stats.ast_tx_aggr_failall++; 4493 4494 /* Retry all subframes */ 4495 bf = bf_first; 4496 while (bf) { 4497 bf_next = bf->bf_next; 4498 bf->bf_next = NULL; /* Remove it from the aggr list */ 4499 sc->sc_stats.ast_tx_aggr_fail++; 4500 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4501 drops++; 4502 bf->bf_next = NULL; 4503 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4504 } 4505 bf = bf_next; 4506 } 4507 4508 /* Prepend all frames to the beginning of the queue */ 4509 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 4510 TAILQ_REMOVE(&bf_q, bf, bf_list); 4511 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 4512 } 4513 4514 /* 4515 * Schedule the TID to be re-tried. 4516 */ 4517 ath_tx_tid_sched(sc, tid); 4518 4519 /* 4520 * send bar if we dropped any frames 4521 * 4522 * Keep the txq lock held for now, as we need to ensure 4523 * that ni_txseqs[] is consistent (as it's being updated 4524 * in the ifnet TX context or raw TX context.) 4525 */ 4526 if (drops) { 4527 /* Suspend the TX queue and get ready to send the BAR */ 4528 ath_tx_tid_bar_suspend(sc, tid); 4529 } 4530 4531 /* 4532 * Send BAR if required 4533 */ 4534 if (ath_tx_tid_bar_tx_ready(sc, tid)) 4535 ath_tx_tid_bar_tx(sc, tid); 4536 4537 ATH_TX_UNLOCK(sc); 4538 4539 /* Complete frames which errored out */ 4540 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4541 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4542 ath_tx_default_comp(sc, bf, 0); 4543 } 4544 } 4545 4546 /* 4547 * Handle clean-up of packets from an aggregate list. 4548 * 4549 * There's no need to update the BAW here - the session is being 4550 * torn down. 4551 */ 4552 static void 4553 ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first) 4554 { 4555 struct ath_buf *bf, *bf_next; 4556 struct ieee80211_node *ni = bf_first->bf_node; 4557 struct ath_node *an = ATH_NODE(ni); 4558 int tid = bf_first->bf_state.bfs_tid; 4559 struct ath_tid *atid = &an->an_tid[tid]; 4560 4561 ATH_TX_LOCK(sc); 4562 4563 /* update incomp */ 4564 bf = bf_first; 4565 while (bf) { 4566 atid->incomp--; 4567 bf = bf->bf_next; 4568 } 4569 4570 if (atid->incomp == 0) { 4571 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4572 "%s: TID %d: cleaned up! resume!\n", 4573 __func__, tid); 4574 atid->cleanup_inprogress = 0; 4575 ath_tx_tid_resume(sc, atid); 4576 } 4577 4578 /* Send BAR if required */ 4579 /* XXX why would we send a BAR when transitioning to non-aggregation? */ 4580 /* 4581 * XXX TODO: we should likely just tear down the BAR state here, 4582 * rather than sending a BAR. 4583 */ 4584 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4585 ath_tx_tid_bar_tx(sc, atid); 4586 4587 ATH_TX_UNLOCK(sc); 4588 4589 /* Handle frame completion */ 4590 bf = bf_first; 4591 while (bf) { 4592 bf_next = bf->bf_next; 4593 ath_tx_default_comp(sc, bf, 1); 4594 bf = bf_next; 4595 } 4596 } 4597 4598 /* 4599 * Handle completion of an set of aggregate frames. 4600 * 4601 * Note: the completion handler is the last descriptor in the aggregate, 4602 * not the last descriptor in the first frame. 4603 */ 4604 static void 4605 ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first, 4606 int fail) 4607 { 4608 //struct ath_desc *ds = bf->bf_lastds; 4609 struct ieee80211_node *ni = bf_first->bf_node; 4610 struct ath_node *an = ATH_NODE(ni); 4611 int tid = bf_first->bf_state.bfs_tid; 4612 struct ath_tid *atid = &an->an_tid[tid]; 4613 struct ath_tx_status ts; 4614 struct ieee80211_tx_ampdu *tap; 4615 ath_bufhead bf_q; 4616 ath_bufhead bf_cq; 4617 int seq_st, tx_ok; 4618 int hasba, isaggr; 4619 uint32_t ba[2]; 4620 struct ath_buf *bf, *bf_next; 4621 int ba_index; 4622 int drops = 0; 4623 int nframes = 0, nbad = 0, nf; 4624 int pktlen; 4625 /* XXX there's too much on the stack? */ 4626 struct ath_rc_series rc[ATH_RC_NUM]; 4627 int txseq; 4628 4629 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n", 4630 __func__, atid->hwq_depth); 4631 4632 /* 4633 * Take a copy; this may be needed -after- bf_first 4634 * has been completed and freed. 4635 */ 4636 ts = bf_first->bf_status.ds_txstat; 4637 4638 TAILQ_INIT(&bf_q); 4639 TAILQ_INIT(&bf_cq); 4640 4641 /* The TID state is kept behind the TXQ lock */ 4642 ATH_TX_LOCK(sc); 4643 4644 atid->hwq_depth--; 4645 if (atid->hwq_depth < 0) 4646 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n", 4647 __func__, atid->hwq_depth); 4648 4649 /* 4650 * If the TID is filtered, handle completing the filter 4651 * transition before potentially kicking it to the cleanup 4652 * function. 4653 * 4654 * XXX this is duplicate work, ew. 4655 */ 4656 if (atid->isfiltered) 4657 ath_tx_tid_filt_comp_complete(sc, atid); 4658 4659 /* 4660 * Punt cleanup to the relevant function, not our problem now 4661 */ 4662 if (atid->cleanup_inprogress) { 4663 if (atid->isfiltered) 4664 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4665 "%s: isfiltered=1, normal_comp?\n", 4666 __func__); 4667 ATH_TX_UNLOCK(sc); 4668 ath_tx_comp_cleanup_aggr(sc, bf_first); 4669 return; 4670 } 4671 4672 /* 4673 * If the frame is filtered, transition to filtered frame 4674 * mode and add this to the filtered frame list. 4675 * 4676 * XXX TODO: figure out how this interoperates with 4677 * BAR, pause and cleanup states. 4678 */ 4679 if ((ts.ts_status & HAL_TXERR_FILT) || 4680 (ts.ts_status != 0 && atid->isfiltered)) { 4681 if (fail != 0) 4682 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4683 "%s: isfiltered=1, fail=%d\n", __func__, fail); 4684 ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq); 4685 4686 /* Remove from BAW */ 4687 TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) { 4688 if (bf->bf_state.bfs_addedbaw) 4689 drops++; 4690 if (bf->bf_state.bfs_dobaw) { 4691 ath_tx_update_baw(sc, an, atid, bf); 4692 if (!bf->bf_state.bfs_addedbaw) 4693 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4694 "%s: wasn't added: seqno %d\n", 4695 __func__, 4696 SEQNO(bf->bf_state.bfs_seqno)); 4697 } 4698 bf->bf_state.bfs_dobaw = 0; 4699 } 4700 /* 4701 * If any intermediate frames in the BAW were dropped when 4702 * handling filtering things, send a BAR. 4703 */ 4704 if (drops) 4705 ath_tx_tid_bar_suspend(sc, atid); 4706 4707 /* 4708 * Finish up by sending a BAR if required and freeing 4709 * the frames outside of the TX lock. 4710 */ 4711 goto finish_send_bar; 4712 } 4713 4714 /* 4715 * XXX for now, use the first frame in the aggregate for 4716 * XXX rate control completion; it's at least consistent. 4717 */ 4718 pktlen = bf_first->bf_state.bfs_pktlen; 4719 4720 /* 4721 * Handle errors first! 4722 * 4723 * Here, handle _any_ error as a "exceeded retries" error. 4724 * Later on (when filtered frames are to be specially handled) 4725 * it'll have to be expanded. 4726 */ 4727 #if 0 4728 if (ts.ts_status & HAL_TXERR_XRETRY) { 4729 #endif 4730 if (ts.ts_status != 0) { 4731 ATH_TX_UNLOCK(sc); 4732 ath_tx_comp_aggr_error(sc, bf_first, atid); 4733 return; 4734 } 4735 4736 tap = ath_tx_get_tx_tid(an, tid); 4737 4738 /* 4739 * extract starting sequence and block-ack bitmap 4740 */ 4741 /* XXX endian-ness of seq_st, ba? */ 4742 seq_st = ts.ts_seqnum; 4743 hasba = !! (ts.ts_flags & HAL_TX_BA); 4744 tx_ok = (ts.ts_status == 0); 4745 isaggr = bf_first->bf_state.bfs_aggr; 4746 ba[0] = ts.ts_ba_low; 4747 ba[1] = ts.ts_ba_high; 4748 4749 /* 4750 * Copy the TX completion status and the rate control 4751 * series from the first descriptor, as it may be freed 4752 * before the rate control code can get its grubby fingers 4753 * into things. 4754 */ 4755 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc)); 4756 4757 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4758 "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, " 4759 "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n", 4760 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags, 4761 isaggr, seq_st, hasba, ba[0], ba[1]); 4762 4763 /* 4764 * The reference driver doesn't do this; it simply ignores 4765 * this check in its entirety. 4766 * 4767 * I've seen this occur when using iperf to send traffic 4768 * out tid 1 - the aggregate frames are all marked as TID 1, 4769 * but the TXSTATUS has TID=0. So, let's just ignore this 4770 * check. 4771 */ 4772 #if 0 4773 /* Occasionally, the MAC sends a tx status for the wrong TID. */ 4774 if (tid != ts.ts_tid) { 4775 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n", 4776 __func__, tid, ts.ts_tid); 4777 tx_ok = 0; 4778 } 4779 #endif 4780 4781 /* AR5416 BA bug; this requires an interface reset */ 4782 if (isaggr && tx_ok && (! hasba)) { 4783 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4784 "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, " 4785 "seq_st=%d\n", 4786 __func__, hasba, tx_ok, isaggr, seq_st); 4787 /* XXX TODO: schedule an interface reset */ 4788 #ifdef ATH_DEBUG 4789 ath_printtxbuf(sc, bf_first, 4790 sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0); 4791 #endif 4792 } 4793 4794 /* 4795 * Walk the list of frames, figure out which ones were correctly 4796 * sent and which weren't. 4797 */ 4798 bf = bf_first; 4799 nf = bf_first->bf_state.bfs_nframes; 4800 4801 /* bf_first is going to be invalid once this list is walked */ 4802 bf_first = NULL; 4803 4804 /* 4805 * Walk the list of completed frames and determine 4806 * which need to be completed and which need to be 4807 * retransmitted. 4808 * 4809 * For completed frames, the completion functions need 4810 * to be called at the end of this function as the last 4811 * node reference may free the node. 4812 * 4813 * Finally, since the TXQ lock can't be held during the 4814 * completion callback (to avoid lock recursion), 4815 * the completion calls have to be done outside of the 4816 * lock. 4817 */ 4818 while (bf) { 4819 nframes++; 4820 ba_index = ATH_BA_INDEX(seq_st, 4821 SEQNO(bf->bf_state.bfs_seqno)); 4822 bf_next = bf->bf_next; 4823 bf->bf_next = NULL; /* Remove it from the aggr list */ 4824 4825 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4826 "%s: checking bf=%p seqno=%d; ack=%d\n", 4827 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 4828 ATH_BA_ISSET(ba, ba_index)); 4829 4830 if (tx_ok && ATH_BA_ISSET(ba, ba_index)) { 4831 sc->sc_stats.ast_tx_aggr_ok++; 4832 ath_tx_update_baw(sc, an, atid, bf); 4833 bf->bf_state.bfs_dobaw = 0; 4834 if (!bf->bf_state.bfs_addedbaw) 4835 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4836 "%s: wasn't added: seqno %d\n", 4837 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4838 bf->bf_next = NULL; 4839 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4840 } else { 4841 sc->sc_stats.ast_tx_aggr_fail++; 4842 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4843 drops++; 4844 bf->bf_next = NULL; 4845 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4846 } 4847 nbad++; 4848 } 4849 bf = bf_next; 4850 } 4851 4852 /* 4853 * Now that the BAW updates have been done, unlock 4854 * 4855 * txseq is grabbed before the lock is released so we 4856 * have a consistent view of what -was- in the BAW. 4857 * Anything after this point will not yet have been 4858 * TXed. 4859 */ 4860 txseq = tap->txa_start; 4861 ATH_TX_UNLOCK(sc); 4862 4863 if (nframes != nf) 4864 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4865 "%s: num frames seen=%d; bf nframes=%d\n", 4866 __func__, nframes, nf); 4867 4868 /* 4869 * Now we know how many frames were bad, call the rate 4870 * control code. 4871 */ 4872 if (fail == 0) 4873 ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes, 4874 nbad); 4875 4876 /* 4877 * send bar if we dropped any frames 4878 */ 4879 if (drops) { 4880 /* Suspend the TX queue and get ready to send the BAR */ 4881 ATH_TX_LOCK(sc); 4882 ath_tx_tid_bar_suspend(sc, atid); 4883 ATH_TX_UNLOCK(sc); 4884 } 4885 4886 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4887 "%s: txa_start now %d\n", __func__, tap->txa_start); 4888 4889 ATH_TX_LOCK(sc); 4890 4891 /* Prepend all frames to the beginning of the queue */ 4892 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 4893 TAILQ_REMOVE(&bf_q, bf, bf_list); 4894 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4895 } 4896 4897 /* 4898 * Reschedule to grab some further frames. 4899 */ 4900 ath_tx_tid_sched(sc, atid); 4901 4902 /* 4903 * If the queue is filtered, re-schedule as required. 4904 * 4905 * This is required as there may be a subsequent TX descriptor 4906 * for this end-node that has CLRDMASK set, so it's quite possible 4907 * that a filtered frame will be followed by a non-filtered 4908 * (complete or otherwise) frame. 4909 * 4910 * XXX should we do this before we complete the frame? 4911 */ 4912 if (atid->isfiltered) 4913 ath_tx_tid_filt_comp_complete(sc, atid); 4914 4915 finish_send_bar: 4916 4917 /* 4918 * Send BAR if required 4919 */ 4920 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4921 ath_tx_tid_bar_tx(sc, atid); 4922 4923 ATH_TX_UNLOCK(sc); 4924 4925 /* Do deferred completion */ 4926 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4927 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4928 ath_tx_default_comp(sc, bf, 0); 4929 } 4930 } 4931 4932 /* 4933 * Handle completion of unaggregated frames in an ADDBA 4934 * session. 4935 * 4936 * Fail is set to 1 if the entry is being freed via a call to 4937 * ath_tx_draintxq(). 4938 */ 4939 static void 4940 ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail) 4941 { 4942 struct ieee80211_node *ni = bf->bf_node; 4943 struct ath_node *an = ATH_NODE(ni); 4944 int tid = bf->bf_state.bfs_tid; 4945 struct ath_tid *atid = &an->an_tid[tid]; 4946 struct ath_tx_status ts; 4947 int drops = 0; 4948 4949 /* 4950 * Take a copy of this; filtering/cloning the frame may free the 4951 * bf pointer. 4952 */ 4953 ts = bf->bf_status.ds_txstat; 4954 4955 /* 4956 * Update rate control status here, before we possibly 4957 * punt to retry or cleanup. 4958 * 4959 * Do it outside of the TXQ lock. 4960 */ 4961 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 4962 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 4963 &bf->bf_status.ds_txstat, 4964 bf->bf_state.bfs_pktlen, 4965 1, (ts.ts_status == 0) ? 0 : 1); 4966 4967 /* 4968 * This is called early so atid->hwq_depth can be tracked. 4969 * This unfortunately means that it's released and regrabbed 4970 * during retry and cleanup. That's rather inefficient. 4971 */ 4972 ATH_TX_LOCK(sc); 4973 4974 if (tid == IEEE80211_NONQOS_TID) 4975 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__); 4976 4977 DPRINTF(sc, ATH_DEBUG_SW_TX, 4978 "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n", 4979 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth, 4980 SEQNO(bf->bf_state.bfs_seqno)); 4981 4982 atid->hwq_depth--; 4983 if (atid->hwq_depth < 0) 4984 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", 4985 __func__, atid->hwq_depth); 4986 4987 /* 4988 * If the TID is filtered, handle completing the filter 4989 * transition before potentially kicking it to the cleanup 4990 * function. 4991 */ 4992 if (atid->isfiltered) 4993 ath_tx_tid_filt_comp_complete(sc, atid); 4994 4995 /* 4996 * If a cleanup is in progress, punt to comp_cleanup; 4997 * rather than handling it here. It's thus their 4998 * responsibility to clean up, call the completion 4999 * function in net80211, etc. 5000 */ 5001 if (atid->cleanup_inprogress) { 5002 if (atid->isfiltered) 5003 DPRINTF(sc, ATH_DEBUG_SW_TX, 5004 "%s: isfiltered=1, normal_comp?\n", 5005 __func__); 5006 ATH_TX_UNLOCK(sc); 5007 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n", 5008 __func__); 5009 ath_tx_comp_cleanup_unaggr(sc, bf); 5010 return; 5011 } 5012 5013 /* 5014 * XXX TODO: how does cleanup, BAR and filtered frame handling 5015 * overlap? 5016 * 5017 * If the frame is filtered OR if it's any failure but 5018 * the TID is filtered, the frame must be added to the 5019 * filtered frame list. 5020 * 5021 * However - a busy buffer can't be added to the filtered 5022 * list as it will end up being recycled without having 5023 * been made available for the hardware. 5024 */ 5025 if ((ts.ts_status & HAL_TXERR_FILT) || 5026 (ts.ts_status != 0 && atid->isfiltered)) { 5027 int freeframe; 5028 5029 if (fail != 0) 5030 DPRINTF(sc, ATH_DEBUG_SW_TX, 5031 "%s: isfiltered=1, fail=%d\n", 5032 __func__, fail); 5033 freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf); 5034 if (freeframe) { 5035 /* Remove from BAW */ 5036 if (bf->bf_state.bfs_addedbaw) 5037 drops++; 5038 if (bf->bf_state.bfs_dobaw) { 5039 ath_tx_update_baw(sc, an, atid, bf); 5040 if (!bf->bf_state.bfs_addedbaw) 5041 DPRINTF(sc, ATH_DEBUG_SW_TX, 5042 "%s: wasn't added: seqno %d\n", 5043 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5044 } 5045 bf->bf_state.bfs_dobaw = 0; 5046 } 5047 5048 /* 5049 * If the frame couldn't be filtered, treat it as a drop and 5050 * prepare to send a BAR. 5051 */ 5052 if (freeframe && drops) 5053 ath_tx_tid_bar_suspend(sc, atid); 5054 5055 /* 5056 * Send BAR if required 5057 */ 5058 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5059 ath_tx_tid_bar_tx(sc, atid); 5060 5061 ATH_TX_UNLOCK(sc); 5062 /* 5063 * If freeframe is set, then the frame couldn't be 5064 * cloned and bf is still valid. Just complete/free it. 5065 */ 5066 if (freeframe) 5067 ath_tx_default_comp(sc, bf, fail); 5068 5069 5070 return; 5071 } 5072 /* 5073 * Don't bother with the retry check if all frames 5074 * are being failed (eg during queue deletion.) 5075 */ 5076 #if 0 5077 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) { 5078 #endif 5079 if (fail == 0 && ts.ts_status != 0) { 5080 ATH_TX_UNLOCK(sc); 5081 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n", 5082 __func__); 5083 ath_tx_aggr_retry_unaggr(sc, bf); 5084 return; 5085 } 5086 5087 /* Success? Complete */ 5088 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n", 5089 __func__, tid, SEQNO(bf->bf_state.bfs_seqno)); 5090 if (bf->bf_state.bfs_dobaw) { 5091 ath_tx_update_baw(sc, an, atid, bf); 5092 bf->bf_state.bfs_dobaw = 0; 5093 if (!bf->bf_state.bfs_addedbaw) 5094 DPRINTF(sc, ATH_DEBUG_SW_TX, 5095 "%s: wasn't added: seqno %d\n", 5096 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5097 } 5098 5099 /* 5100 * If the queue is filtered, re-schedule as required. 5101 * 5102 * This is required as there may be a subsequent TX descriptor 5103 * for this end-node that has CLRDMASK set, so it's quite possible 5104 * that a filtered frame will be followed by a non-filtered 5105 * (complete or otherwise) frame. 5106 * 5107 * XXX should we do this before we complete the frame? 5108 */ 5109 if (atid->isfiltered) 5110 ath_tx_tid_filt_comp_complete(sc, atid); 5111 5112 /* 5113 * Send BAR if required 5114 */ 5115 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5116 ath_tx_tid_bar_tx(sc, atid); 5117 5118 ATH_TX_UNLOCK(sc); 5119 5120 ath_tx_default_comp(sc, bf, fail); 5121 /* bf is freed at this point */ 5122 } 5123 5124 void 5125 ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 5126 { 5127 if (bf->bf_state.bfs_aggr) 5128 ath_tx_aggr_comp_aggr(sc, bf, fail); 5129 else 5130 ath_tx_aggr_comp_unaggr(sc, bf, fail); 5131 } 5132 5133 /* 5134 * Schedule some packets from the given node/TID to the hardware. 5135 * 5136 * This is the aggregate version. 5137 */ 5138 void 5139 ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an, 5140 struct ath_tid *tid) 5141 { 5142 struct ath_buf *bf; 5143 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 5144 struct ieee80211_tx_ampdu *tap; 5145 ATH_AGGR_STATUS status; 5146 ath_bufhead bf_q; 5147 5148 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid); 5149 ATH_TX_LOCK_ASSERT(sc); 5150 5151 /* 5152 * XXX TODO: If we're called for a queue that we're leaking frames to, 5153 * ensure we only leak one. 5154 */ 5155 5156 tap = ath_tx_get_tx_tid(an, tid->tid); 5157 5158 if (tid->tid == IEEE80211_NONQOS_TID) 5159 DPRINTF(sc, ATH_DEBUG_SW_TX, 5160 "%s: called for TID=NONQOS_TID?\n", __func__); 5161 5162 for (;;) { 5163 status = ATH_AGGR_DONE; 5164 5165 /* 5166 * If the upper layer has paused the TID, don't 5167 * queue any further packets. 5168 * 5169 * This can also occur from the completion task because 5170 * of packet loss; but as its serialised with this code, 5171 * it won't "appear" half way through queuing packets. 5172 */ 5173 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 5174 break; 5175 5176 bf = ATH_TID_FIRST(tid); 5177 if (bf == NULL) { 5178 break; 5179 } 5180 5181 /* 5182 * If the packet doesn't fall within the BAW (eg a NULL 5183 * data frame), schedule it directly; continue. 5184 */ 5185 if (! bf->bf_state.bfs_dobaw) { 5186 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5187 "%s: non-baw packet\n", 5188 __func__); 5189 ATH_TID_REMOVE(tid, bf, bf_list); 5190 5191 if (bf->bf_state.bfs_nframes > 1) 5192 DPRINTF(sc, ATH_DEBUG_SW_TX, 5193 "%s: aggr=%d, nframes=%d\n", 5194 __func__, 5195 bf->bf_state.bfs_aggr, 5196 bf->bf_state.bfs_nframes); 5197 5198 /* 5199 * This shouldn't happen - such frames shouldn't 5200 * ever have been queued as an aggregate in the 5201 * first place. However, make sure the fields 5202 * are correctly setup just to be totally sure. 5203 */ 5204 bf->bf_state.bfs_aggr = 0; 5205 bf->bf_state.bfs_nframes = 1; 5206 5207 /* Update CLRDMASK just before this frame is queued */ 5208 ath_tx_update_clrdmask(sc, tid, bf); 5209 5210 ath_tx_do_ratelookup(sc, bf); 5211 ath_tx_calc_duration(sc, bf); 5212 ath_tx_calc_protection(sc, bf); 5213 ath_tx_set_rtscts(sc, bf); 5214 ath_tx_rate_fill_rcflags(sc, bf); 5215 ath_tx_setds(sc, bf); 5216 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 5217 5218 sc->sc_aggr_stats.aggr_nonbaw_pkt++; 5219 5220 /* Queue the packet; continue */ 5221 goto queuepkt; 5222 } 5223 5224 TAILQ_INIT(&bf_q); 5225 5226 /* 5227 * Do a rate control lookup on the first frame in the 5228 * list. The rate control code needs that to occur 5229 * before it can determine whether to TX. 5230 * It's inaccurate because the rate control code doesn't 5231 * really "do" aggregate lookups, so it only considers 5232 * the size of the first frame. 5233 */ 5234 ath_tx_do_ratelookup(sc, bf); 5235 bf->bf_state.bfs_rc[3].rix = 0; 5236 bf->bf_state.bfs_rc[3].tries = 0; 5237 5238 ath_tx_calc_duration(sc, bf); 5239 ath_tx_calc_protection(sc, bf); 5240 5241 ath_tx_set_rtscts(sc, bf); 5242 ath_tx_rate_fill_rcflags(sc, bf); 5243 5244 status = ath_tx_form_aggr(sc, an, tid, &bf_q); 5245 5246 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5247 "%s: ath_tx_form_aggr() status=%d\n", __func__, status); 5248 5249 /* 5250 * No frames to be picked up - out of BAW 5251 */ 5252 if (TAILQ_EMPTY(&bf_q)) 5253 break; 5254 5255 /* 5256 * This assumes that the descriptor list in the ath_bufhead 5257 * are already linked together via bf_next pointers. 5258 */ 5259 bf = TAILQ_FIRST(&bf_q); 5260 5261 if (status == ATH_AGGR_8K_LIMITED) 5262 sc->sc_aggr_stats.aggr_rts_aggr_limited++; 5263 5264 /* 5265 * If it's the only frame send as non-aggregate 5266 * assume that ath_tx_form_aggr() has checked 5267 * whether it's in the BAW and added it appropriately. 5268 */ 5269 if (bf->bf_state.bfs_nframes == 1) { 5270 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5271 "%s: single-frame aggregate\n", __func__); 5272 5273 /* Update CLRDMASK just before this frame is queued */ 5274 ath_tx_update_clrdmask(sc, tid, bf); 5275 5276 bf->bf_state.bfs_aggr = 0; 5277 bf->bf_state.bfs_ndelim = 0; 5278 ath_tx_setds(sc, bf); 5279 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 5280 if (status == ATH_AGGR_BAW_CLOSED) 5281 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++; 5282 else 5283 sc->sc_aggr_stats.aggr_single_pkt++; 5284 } else { 5285 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5286 "%s: multi-frame aggregate: %d frames, " 5287 "length %d\n", 5288 __func__, bf->bf_state.bfs_nframes, 5289 bf->bf_state.bfs_al); 5290 bf->bf_state.bfs_aggr = 1; 5291 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++; 5292 sc->sc_aggr_stats.aggr_aggr_pkt++; 5293 5294 /* Update CLRDMASK just before this frame is queued */ 5295 ath_tx_update_clrdmask(sc, tid, bf); 5296 5297 /* 5298 * Calculate the duration/protection as required. 5299 */ 5300 ath_tx_calc_duration(sc, bf); 5301 ath_tx_calc_protection(sc, bf); 5302 5303 /* 5304 * Update the rate and rtscts information based on the 5305 * rate decision made by the rate control code; 5306 * the first frame in the aggregate needs it. 5307 */ 5308 ath_tx_set_rtscts(sc, bf); 5309 5310 /* 5311 * Setup the relevant descriptor fields 5312 * for aggregation. The first descriptor 5313 * already points to the rest in the chain. 5314 */ 5315 ath_tx_setds_11n(sc, bf); 5316 5317 } 5318 queuepkt: 5319 /* Set completion handler, multi-frame aggregate or not */ 5320 bf->bf_comp = ath_tx_aggr_comp; 5321 5322 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID) 5323 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__); 5324 5325 /* 5326 * Update leak count and frame config if were leaking frames. 5327 * 5328 * XXX TODO: it should update all frames in an aggregate 5329 * correctly! 5330 */ 5331 ath_tx_leak_count_update(sc, tid, bf); 5332 5333 /* Punt to txq */ 5334 ath_tx_handoff(sc, txq, bf); 5335 5336 /* Track outstanding buffer count to hardware */ 5337 /* aggregates are "one" buffer */ 5338 tid->hwq_depth++; 5339 5340 /* 5341 * Break out if ath_tx_form_aggr() indicated 5342 * there can't be any further progress (eg BAW is full.) 5343 * Checking for an empty txq is done above. 5344 * 5345 * XXX locking on txq here? 5346 */ 5347 /* XXX TXQ locking */ 5348 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr || 5349 (status == ATH_AGGR_BAW_CLOSED || 5350 status == ATH_AGGR_LEAK_CLOSED)) 5351 break; 5352 } 5353 } 5354 5355 /* 5356 * Schedule some packets from the given node/TID to the hardware. 5357 * 5358 * XXX TODO: this routine doesn't enforce the maximum TXQ depth. 5359 * It just dumps frames into the TXQ. We should limit how deep 5360 * the transmit queue can grow for frames dispatched to the given 5361 * TXQ. 5362 * 5363 * To avoid locking issues, either we need to own the TXQ lock 5364 * at this point, or we need to pass in the maximum frame count 5365 * from the caller. 5366 */ 5367 void 5368 ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an, 5369 struct ath_tid *tid) 5370 { 5371 struct ath_buf *bf; 5372 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 5373 5374 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n", 5375 __func__, an, tid->tid); 5376 5377 ATH_TX_LOCK_ASSERT(sc); 5378 5379 /* Check - is AMPDU pending or running? then print out something */ 5380 if (ath_tx_ampdu_pending(sc, an, tid->tid)) 5381 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n", 5382 __func__, tid->tid); 5383 if (ath_tx_ampdu_running(sc, an, tid->tid)) 5384 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n", 5385 __func__, tid->tid); 5386 5387 for (;;) { 5388 5389 /* 5390 * If the upper layers have paused the TID, don't 5391 * queue any further packets. 5392 * 5393 * XXX if we are leaking frames, make sure we decrement 5394 * that counter _and_ we continue here. 5395 */ 5396 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 5397 break; 5398 5399 bf = ATH_TID_FIRST(tid); 5400 if (bf == NULL) { 5401 break; 5402 } 5403 5404 ATH_TID_REMOVE(tid, bf, bf_list); 5405 5406 /* Sanity check! */ 5407 if (tid->tid != bf->bf_state.bfs_tid) { 5408 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !=" 5409 " tid %d\n", __func__, bf->bf_state.bfs_tid, 5410 tid->tid); 5411 } 5412 /* Normal completion handler */ 5413 bf->bf_comp = ath_tx_normal_comp; 5414 5415 /* 5416 * Override this for now, until the non-aggregate 5417 * completion handler correctly handles software retransmits. 5418 */ 5419 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 5420 5421 /* Update CLRDMASK just before this frame is queued */ 5422 ath_tx_update_clrdmask(sc, tid, bf); 5423 5424 /* Program descriptors + rate control */ 5425 ath_tx_do_ratelookup(sc, bf); 5426 ath_tx_calc_duration(sc, bf); 5427 ath_tx_calc_protection(sc, bf); 5428 ath_tx_set_rtscts(sc, bf); 5429 ath_tx_rate_fill_rcflags(sc, bf); 5430 ath_tx_setds(sc, bf); 5431 5432 /* 5433 * Update the current leak count if 5434 * we're leaking frames; and set the 5435 * MORE flag as appropriate. 5436 */ 5437 ath_tx_leak_count_update(sc, tid, bf); 5438 5439 /* Track outstanding buffer count to hardware */ 5440 /* aggregates are "one" buffer */ 5441 tid->hwq_depth++; 5442 5443 /* Punt to hardware or software txq */ 5444 ath_tx_handoff(sc, txq, bf); 5445 } 5446 } 5447 5448 /* 5449 * Schedule some packets to the given hardware queue. 5450 * 5451 * This function walks the list of TIDs (ie, ath_node TIDs 5452 * with queued traffic) and attempts to schedule traffic 5453 * from them. 5454 * 5455 * TID scheduling is implemented as a FIFO, with TIDs being 5456 * added to the end of the queue after some frames have been 5457 * scheduled. 5458 */ 5459 void 5460 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq) 5461 { 5462 struct ath_tid *tid, *next, *last; 5463 5464 ATH_TX_LOCK_ASSERT(sc); 5465 5466 /* 5467 * Don't schedule if the hardware queue is busy. 5468 * This (hopefully) gives some more time to aggregate 5469 * some packets in the aggregation queue. 5470 * 5471 * XXX It doesn't stop a parallel sender from sneaking 5472 * in transmitting a frame! 5473 */ 5474 /* XXX TXQ locking */ 5475 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { 5476 sc->sc_aggr_stats.aggr_sched_nopkt++; 5477 return; 5478 } 5479 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { 5480 sc->sc_aggr_stats.aggr_sched_nopkt++; 5481 return; 5482 } 5483 5484 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s); 5485 5486 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) { 5487 /* 5488 * Suspend paused queues here; they'll be resumed 5489 * once the addba completes or times out. 5490 */ 5491 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n", 5492 __func__, tid->tid, tid->paused); 5493 ath_tx_tid_unsched(sc, tid); 5494 /* 5495 * This node may be in power-save and we're leaking 5496 * a frame; be careful. 5497 */ 5498 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { 5499 continue; 5500 } 5501 if (ath_tx_ampdu_running(sc, tid->an, tid->tid)) 5502 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid); 5503 else 5504 ath_tx_tid_hw_queue_norm(sc, tid->an, tid); 5505 5506 /* Not empty? Re-schedule */ 5507 if (tid->axq_depth != 0) 5508 ath_tx_tid_sched(sc, tid); 5509 5510 /* 5511 * Give the software queue time to aggregate more 5512 * packets. If we aren't running aggregation then 5513 * we should still limit the hardware queue depth. 5514 */ 5515 /* XXX TXQ locking */ 5516 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { 5517 break; 5518 } 5519 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { 5520 break; 5521 } 5522 5523 /* 5524 * If this was the last entry on the original list, stop. 5525 * Otherwise nodes that have been rescheduled onto the end 5526 * of the TID FIFO list will just keep being rescheduled. 5527 * 5528 * XXX What should we do about nodes that were paused 5529 * but are pending a leaking frame in response to a ps-poll? 5530 * They'll be put at the front of the list; so they'll 5531 * prematurely trigger this condition! Ew. 5532 */ 5533 if (tid == last) 5534 break; 5535 } 5536 } 5537 5538 /* 5539 * TX addba handling 5540 */ 5541 5542 /* 5543 * Return net80211 TID struct pointer, or NULL for none 5544 */ 5545 struct ieee80211_tx_ampdu * 5546 ath_tx_get_tx_tid(struct ath_node *an, int tid) 5547 { 5548 struct ieee80211_node *ni = &an->an_node; 5549 struct ieee80211_tx_ampdu *tap; 5550 5551 if (tid == IEEE80211_NONQOS_TID) 5552 return NULL; 5553 5554 tap = &ni->ni_tx_ampdu[tid]; 5555 return tap; 5556 } 5557 5558 /* 5559 * Is AMPDU-TX running? 5560 */ 5561 static int 5562 ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid) 5563 { 5564 struct ieee80211_tx_ampdu *tap; 5565 5566 if (tid == IEEE80211_NONQOS_TID) 5567 return 0; 5568 5569 tap = ath_tx_get_tx_tid(an, tid); 5570 if (tap == NULL) 5571 return 0; /* Not valid; default to not running */ 5572 5573 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING); 5574 } 5575 5576 /* 5577 * Is AMPDU-TX negotiation pending? 5578 */ 5579 static int 5580 ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid) 5581 { 5582 struct ieee80211_tx_ampdu *tap; 5583 5584 if (tid == IEEE80211_NONQOS_TID) 5585 return 0; 5586 5587 tap = ath_tx_get_tx_tid(an, tid); 5588 if (tap == NULL) 5589 return 0; /* Not valid; default to not pending */ 5590 5591 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND); 5592 } 5593 5594 /* 5595 * Is AMPDU-TX pending for the given TID? 5596 */ 5597 5598 5599 /* 5600 * Method to handle sending an ADDBA request. 5601 * 5602 * We tap this so the relevant flags can be set to pause the TID 5603 * whilst waiting for the response. 5604 * 5605 * XXX there's no timeout handler we can override? 5606 */ 5607 int 5608 ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5609 int dialogtoken, int baparamset, int batimeout) 5610 { 5611 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5612 int tid = tap->txa_tid; 5613 struct ath_node *an = ATH_NODE(ni); 5614 struct ath_tid *atid = &an->an_tid[tid]; 5615 5616 /* 5617 * XXX danger Will Robinson! 5618 * 5619 * Although the taskqueue may be running and scheduling some more 5620 * packets, these should all be _before_ the addba sequence number. 5621 * However, net80211 will keep self-assigning sequence numbers 5622 * until addba has been negotiated. 5623 * 5624 * In the past, these packets would be "paused" (which still works 5625 * fine, as they're being scheduled to the driver in the same 5626 * serialised method which is calling the addba request routine) 5627 * and when the aggregation session begins, they'll be dequeued 5628 * as aggregate packets and added to the BAW. However, now there's 5629 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these 5630 * packets. Thus they never get included in the BAW tracking and 5631 * this can cause the initial burst of packets after the addba 5632 * negotiation to "hang", as they quickly fall outside the BAW. 5633 * 5634 * The "eventual" solution should be to tag these packets with 5635 * dobaw. Although net80211 has given us a sequence number, 5636 * it'll be "after" the left edge of the BAW and thus it'll 5637 * fall within it. 5638 */ 5639 ATH_TX_LOCK(sc); 5640 /* 5641 * This is a bit annoying. Until net80211 HT code inherits some 5642 * (any) locking, we may have this called in parallel BUT only 5643 * one response/timeout will be called. Grr. 5644 */ 5645 if (atid->addba_tx_pending == 0) { 5646 ath_tx_tid_pause(sc, atid); 5647 atid->addba_tx_pending = 1; 5648 } 5649 ATH_TX_UNLOCK(sc); 5650 5651 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5652 "%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n", 5653 __func__, 5654 ni->ni_macaddr, 5655 ":", 5656 dialogtoken, baparamset, batimeout); 5657 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5658 "%s: txa_start=%d, ni_txseqs=%d\n", 5659 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5660 5661 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 5662 batimeout); 5663 } 5664 5665 /* 5666 * Handle an ADDBA response. 5667 * 5668 * We unpause the queue so TX'ing can resume. 5669 * 5670 * Any packets TX'ed from this point should be "aggregate" (whether 5671 * aggregate or not) so the BAW is updated. 5672 * 5673 * Note! net80211 keeps self-assigning sequence numbers until 5674 * ampdu is negotiated. This means the initially-negotiated BAW left 5675 * edge won't match the ni->ni_txseq. 5676 * 5677 * So, being very dirty, the BAW left edge is "slid" here to match 5678 * ni->ni_txseq. 5679 * 5680 * What likely SHOULD happen is that all packets subsequent to the 5681 * addba request should be tagged as aggregate and queued as non-aggregate 5682 * frames; thus updating the BAW. For now though, I'll just slide the 5683 * window. 5684 */ 5685 int 5686 ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5687 int status, int code, int batimeout) 5688 { 5689 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5690 int tid = tap->txa_tid; 5691 struct ath_node *an = ATH_NODE(ni); 5692 struct ath_tid *atid = &an->an_tid[tid]; 5693 int r; 5694 5695 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5696 "%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__, 5697 ni->ni_macaddr, 5698 ":", 5699 status, code, batimeout); 5700 5701 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5702 "%s: txa_start=%d, ni_txseqs=%d\n", 5703 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5704 5705 /* 5706 * Call this first, so the interface flags get updated 5707 * before the TID is unpaused. Otherwise a race condition 5708 * exists where the unpaused TID still doesn't yet have 5709 * IEEE80211_AGGR_RUNNING set. 5710 */ 5711 r = sc->sc_addba_response(ni, tap, status, code, batimeout); 5712 5713 ATH_TX_LOCK(sc); 5714 atid->addba_tx_pending = 0; 5715 /* 5716 * XXX dirty! 5717 * Slide the BAW left edge to wherever net80211 left it for us. 5718 * Read above for more information. 5719 */ 5720 tap->txa_start = ni->ni_txseqs[tid]; 5721 ath_tx_tid_resume(sc, atid); 5722 ATH_TX_UNLOCK(sc); 5723 return r; 5724 } 5725 5726 5727 /* 5728 * Stop ADDBA on a queue. 5729 * 5730 * This can be called whilst BAR TX is currently active on the queue, 5731 * so make sure this is unblocked before continuing. 5732 */ 5733 void 5734 ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 5735 { 5736 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5737 int tid = tap->txa_tid; 5738 struct ath_node *an = ATH_NODE(ni); 5739 struct ath_tid *atid = &an->an_tid[tid]; 5740 ath_bufhead bf_cq; 5741 struct ath_buf *bf; 5742 5743 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n", 5744 __func__, 5745 ni->ni_macaddr, 5746 ":"); 5747 5748 /* 5749 * Pause TID traffic early, so there aren't any races 5750 * Unblock the pending BAR held traffic, if it's currently paused. 5751 */ 5752 ATH_TX_LOCK(sc); 5753 ath_tx_tid_pause(sc, atid); 5754 if (atid->bar_wait) { 5755 /* 5756 * bar_unsuspend() expects bar_tx == 1, as it should be 5757 * called from the TX completion path. This quietens 5758 * the warning. It's cleared for us anyway. 5759 */ 5760 atid->bar_tx = 1; 5761 ath_tx_tid_bar_unsuspend(sc, atid); 5762 } 5763 ATH_TX_UNLOCK(sc); 5764 5765 /* There's no need to hold the TXQ lock here */ 5766 sc->sc_addba_stop(ni, tap); 5767 5768 /* 5769 * ath_tx_tid_cleanup will resume the TID if possible, otherwise 5770 * it'll set the cleanup flag, and it'll be unpaused once 5771 * things have been cleaned up. 5772 */ 5773 TAILQ_INIT(&bf_cq); 5774 ATH_TX_LOCK(sc); 5775 ath_tx_tid_cleanup(sc, an, tid, &bf_cq); 5776 /* 5777 * Unpause the TID if no cleanup is required. 5778 */ 5779 if (! atid->cleanup_inprogress) 5780 ath_tx_tid_resume(sc, atid); 5781 ATH_TX_UNLOCK(sc); 5782 5783 /* Handle completing frames and fail them */ 5784 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 5785 TAILQ_REMOVE(&bf_cq, bf, bf_list); 5786 ath_tx_default_comp(sc, bf, 1); 5787 } 5788 5789 } 5790 5791 /* 5792 * Handle a node reassociation. 5793 * 5794 * We may have a bunch of frames queued to the hardware; those need 5795 * to be marked as cleanup. 5796 */ 5797 void 5798 ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an) 5799 { 5800 struct ath_tid *tid; 5801 int i; 5802 ath_bufhead bf_cq; 5803 struct ath_buf *bf; 5804 5805 TAILQ_INIT(&bf_cq); 5806 5807 ATH_TX_UNLOCK_ASSERT(sc); 5808 5809 ATH_TX_LOCK(sc); 5810 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 5811 tid = &an->an_tid[i]; 5812 if (tid->hwq_depth == 0) 5813 continue; 5814 ath_tx_tid_pause(sc, tid); 5815 DPRINTF(sc, ATH_DEBUG_NODE, 5816 "%s: %6D: TID %d: cleaning up TID\n", 5817 __func__, 5818 an->an_node.ni_macaddr, 5819 ":", 5820 i); 5821 ath_tx_tid_cleanup(sc, an, i, &bf_cq); 5822 /* 5823 * Unpause the TID if no cleanup is required. 5824 */ 5825 if (! tid->cleanup_inprogress) 5826 ath_tx_tid_resume(sc, tid); 5827 } 5828 ATH_TX_UNLOCK(sc); 5829 5830 /* Handle completing frames and fail them */ 5831 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 5832 TAILQ_REMOVE(&bf_cq, bf, bf_list); 5833 ath_tx_default_comp(sc, bf, 1); 5834 } 5835 } 5836 5837 /* 5838 * Note: net80211 bar_timeout() doesn't call this function on BAR failure; 5839 * it simply tears down the aggregation session. Ew. 5840 * 5841 * It however will call ieee80211_ampdu_stop() which will call 5842 * ic->ic_addba_stop(). 5843 * 5844 * XXX This uses a hard-coded max BAR count value; the whole 5845 * XXX BAR TX success or failure should be better handled! 5846 */ 5847 void 5848 ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5849 int status) 5850 { 5851 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5852 int tid = tap->txa_tid; 5853 struct ath_node *an = ATH_NODE(ni); 5854 struct ath_tid *atid = &an->an_tid[tid]; 5855 int attempts = tap->txa_attempts; 5856 5857 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 5858 "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d\n", 5859 __func__, 5860 ni->ni_macaddr, 5861 ":", 5862 tap->txa_tid, 5863 atid->tid, 5864 status, 5865 attempts); 5866 5867 /* Note: This may update the BAW details */ 5868 sc->sc_bar_response(ni, tap, status); 5869 5870 /* Unpause the TID */ 5871 /* 5872 * XXX if this is attempt=50, the TID will be downgraded 5873 * XXX to a non-aggregate session. So we must unpause the 5874 * XXX TID here or it'll never be done. 5875 * 5876 * Also, don't call it if bar_tx/bar_wait are 0; something 5877 * has beaten us to the punch? (XXX figure out what?) 5878 */ 5879 if (status == 0 || attempts == 50) { 5880 ATH_TX_LOCK(sc); 5881 if (atid->bar_tx == 0 || atid->bar_wait == 0) 5882 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 5883 "%s: huh? bar_tx=%d, bar_wait=%d\n", 5884 __func__, 5885 atid->bar_tx, atid->bar_wait); 5886 else 5887 ath_tx_tid_bar_unsuspend(sc, atid); 5888 ATH_TX_UNLOCK(sc); 5889 } 5890 } 5891 5892 /* 5893 * This is called whenever the pending ADDBA request times out. 5894 * Unpause and reschedule the TID. 5895 */ 5896 void 5897 ath_addba_response_timeout(struct ieee80211_node *ni, 5898 struct ieee80211_tx_ampdu *tap) 5899 { 5900 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5901 int tid = tap->txa_tid; 5902 struct ath_node *an = ATH_NODE(ni); 5903 struct ath_tid *atid = &an->an_tid[tid]; 5904 5905 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5906 "%s: %6D: TID=%d, called; resuming\n", 5907 __func__, 5908 ni->ni_macaddr, 5909 ":", 5910 tid); 5911 5912 ATH_TX_LOCK(sc); 5913 atid->addba_tx_pending = 0; 5914 ATH_TX_UNLOCK(sc); 5915 5916 /* Note: This updates the aggregate state to (again) pending */ 5917 sc->sc_addba_response_timeout(ni, tap); 5918 5919 /* Unpause the TID; which reschedules it */ 5920 ATH_TX_LOCK(sc); 5921 ath_tx_tid_resume(sc, atid); 5922 ATH_TX_UNLOCK(sc); 5923 } 5924 5925 /* 5926 * Check if a node is asleep or not. 5927 */ 5928 int 5929 ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an) 5930 { 5931 5932 ATH_TX_LOCK_ASSERT(sc); 5933 5934 return (an->an_is_powersave); 5935 } 5936 5937 /* 5938 * Mark a node as currently "in powersaving." 5939 * This suspends all traffic on the node. 5940 * 5941 * This must be called with the node/tx locks free. 5942 * 5943 * XXX TODO: the locking silliness below is due to how the node 5944 * locking currently works. Right now, the node lock is grabbed 5945 * to do rate control lookups and these are done with the TX 5946 * queue lock held. This means the node lock can't be grabbed 5947 * first here or a LOR will occur. 5948 * 5949 * Eventually (hopefully!) the TX path code will only grab 5950 * the TXQ lock when transmitting and the ath_node lock when 5951 * doing node/TID operations. There are other complications - 5952 * the sched/unsched operations involve walking the per-txq 5953 * 'active tid' list and this requires both locks to be held. 5954 */ 5955 void 5956 ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an) 5957 { 5958 struct ath_tid *atid; 5959 struct ath_txq *txq; 5960 int tid; 5961 5962 ATH_TX_UNLOCK_ASSERT(sc); 5963 5964 /* Suspend all traffic on the node */ 5965 ATH_TX_LOCK(sc); 5966 5967 if (an->an_is_powersave) { 5968 DPRINTF(sc, ATH_DEBUG_XMIT, 5969 "%s: %6D: node was already asleep!\n", 5970 __func__, an->an_node.ni_macaddr, ":"); 5971 ATH_TX_UNLOCK(sc); 5972 return; 5973 } 5974 5975 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 5976 atid = &an->an_tid[tid]; 5977 txq = sc->sc_ac2q[atid->ac]; 5978 5979 ath_tx_tid_pause(sc, atid); 5980 } 5981 5982 /* Mark node as in powersaving */ 5983 an->an_is_powersave = 1; 5984 5985 ATH_TX_UNLOCK(sc); 5986 } 5987 5988 /* 5989 * Mark a node as currently "awake." 5990 * This resumes all traffic to the node. 5991 */ 5992 void 5993 ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an) 5994 { 5995 struct ath_tid *atid; 5996 struct ath_txq *txq; 5997 int tid; 5998 5999 ATH_TX_UNLOCK_ASSERT(sc); 6000 6001 ATH_TX_LOCK(sc); 6002 6003 /* !? */ 6004 if (an->an_is_powersave == 0) { 6005 ATH_TX_UNLOCK(sc); 6006 DPRINTF(sc, ATH_DEBUG_XMIT, 6007 "%s: an=%p: node was already awake\n", 6008 __func__, an); 6009 return; 6010 } 6011 6012 /* Mark node as awake */ 6013 an->an_is_powersave = 0; 6014 /* 6015 * Clear any pending leaked frame requests 6016 */ 6017 an->an_leak_count = 0; 6018 6019 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 6020 atid = &an->an_tid[tid]; 6021 txq = sc->sc_ac2q[atid->ac]; 6022 6023 ath_tx_tid_resume(sc, atid); 6024 } 6025 ATH_TX_UNLOCK(sc); 6026 } 6027 6028 static int 6029 ath_legacy_dma_txsetup(struct ath_softc *sc) 6030 { 6031 6032 /* nothing new needed */ 6033 return (0); 6034 } 6035 6036 static int 6037 ath_legacy_dma_txteardown(struct ath_softc *sc) 6038 { 6039 6040 /* nothing new needed */ 6041 return (0); 6042 } 6043 6044 void 6045 ath_xmit_setup_legacy(struct ath_softc *sc) 6046 { 6047 /* 6048 * For now, just set the descriptor length to sizeof(ath_desc); 6049 * worry about extracting the real length out of the HAL later. 6050 */ 6051 sc->sc_tx_desclen = sizeof(struct ath_desc); 6052 sc->sc_tx_statuslen = sizeof(struct ath_desc); 6053 sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */ 6054 6055 sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup; 6056 sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown; 6057 sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func; 6058 6059 sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart; 6060 sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff; 6061 6062 sc->sc_tx.xmit_drain = ath_legacy_tx_drain; 6063 } 6064