1 /*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 13 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 14 * redistribution must be conditioned upon including a substantially 15 * similar Disclaimer requirement for further binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGES. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * Driver for the Atheros Wireless LAN controller. 36 * 37 * This software is derived from work of Atsushi Onoe; his contribution 38 * is greatly appreciated. 39 */ 40 41 #include "opt_inet.h" 42 #include "opt_ath.h" 43 #include "opt_wlan.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/sysctl.h> 48 #include <sys/mbuf.h> 49 #include <sys/malloc.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/kernel.h> 53 #include <sys/socket.h> 54 #include <sys/sockio.h> 55 #include <sys/errno.h> 56 #include <sys/callout.h> 57 #include <sys/bus.h> 58 #include <sys/endian.h> 59 #include <sys/kthread.h> 60 #include <sys/taskqueue.h> 61 #include <sys/priv.h> 62 #include <sys/ktr.h> 63 64 #include <machine/bus.h> 65 66 #include <net/if.h> 67 #include <net/if_var.h> 68 #include <net/if_dl.h> 69 #include <net/if_media.h> 70 #include <net/if_types.h> 71 #include <net/if_arp.h> 72 #include <net/ethernet.h> 73 #include <net/if_llc.h> 74 75 #include <net80211/ieee80211_var.h> 76 #include <net80211/ieee80211_regdomain.h> 77 #ifdef IEEE80211_SUPPORT_SUPERG 78 #include <net80211/ieee80211_superg.h> 79 #endif 80 #ifdef IEEE80211_SUPPORT_TDMA 81 #include <net80211/ieee80211_tdma.h> 82 #endif 83 #include <net80211/ieee80211_ht.h> 84 85 #include <net/bpf.h> 86 87 #ifdef INET 88 #include <netinet/in.h> 89 #include <netinet/if_ether.h> 90 #endif 91 92 #include <dev/ath/if_athvar.h> 93 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 94 #include <dev/ath/ath_hal/ah_diagcodes.h> 95 96 #include <dev/ath/if_ath_debug.h> 97 98 #ifdef ATH_TX99_DIAG 99 #include <dev/ath/ath_tx99/ath_tx99.h> 100 #endif 101 102 #include <dev/ath/if_ath_misc.h> 103 #include <dev/ath/if_ath_tx.h> 104 #include <dev/ath/if_ath_tx_ht.h> 105 106 #ifdef ATH_DEBUG_ALQ 107 #include <dev/ath/if_ath_alq.h> 108 #endif 109 110 /* 111 * How many retries to perform in software 112 */ 113 #define SWMAX_RETRIES 10 114 115 /* 116 * What queue to throw the non-QoS TID traffic into 117 */ 118 #define ATH_NONQOS_TID_AC WME_AC_VO 119 120 #if 0 121 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an); 122 #endif 123 static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, 124 int tid); 125 static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, 126 int tid); 127 static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc, 128 struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0); 129 static int ath_tx_action_frame_override_queue(struct ath_softc *sc, 130 struct ieee80211_node *ni, struct mbuf *m0, int *tid); 131 static struct ath_buf * 132 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 133 struct ath_tid *tid, struct ath_buf *bf); 134 135 #ifdef ATH_DEBUG_ALQ 136 void 137 ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first) 138 { 139 struct ath_buf *bf; 140 int i, n; 141 const char *ds; 142 143 /* XXX we should skip out early if debugging isn't enabled! */ 144 bf = bf_first; 145 146 while (bf != NULL) { 147 /* XXX should ensure bf_nseg > 0! */ 148 if (bf->bf_nseg == 0) 149 break; 150 n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1; 151 for (i = 0, ds = (const char *) bf->bf_desc; 152 i < n; 153 i++, ds += sc->sc_tx_desclen) { 154 if_ath_alq_post(&sc->sc_alq, 155 ATH_ALQ_EDMA_TXDESC, 156 sc->sc_tx_desclen, 157 ds); 158 } 159 bf = bf->bf_next; 160 } 161 } 162 #endif /* ATH_DEBUG_ALQ */ 163 164 /* 165 * Whether to use the 11n rate scenario functions or not 166 */ 167 static inline int 168 ath_tx_is_11n(struct ath_softc *sc) 169 { 170 return ((sc->sc_ah->ah_magic == 0x20065416) || 171 (sc->sc_ah->ah_magic == 0x19741014)); 172 } 173 174 /* 175 * Obtain the current TID from the given frame. 176 * 177 * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.) 178 * This has implications for which AC/priority the packet is placed 179 * in. 180 */ 181 static int 182 ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0) 183 { 184 const struct ieee80211_frame *wh; 185 int pri = M_WME_GETAC(m0); 186 187 wh = mtod(m0, const struct ieee80211_frame *); 188 if (! IEEE80211_QOS_HAS_SEQ(wh)) 189 return IEEE80211_NONQOS_TID; 190 else 191 return WME_AC_TO_TID(pri); 192 } 193 194 static void 195 ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) 196 { 197 struct ieee80211_frame *wh; 198 199 wh = mtod(bf->bf_m, struct ieee80211_frame *); 200 /* Only update/resync if needed */ 201 if (bf->bf_state.bfs_isretried == 0) { 202 wh->i_fc[1] |= IEEE80211_FC1_RETRY; 203 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 204 BUS_DMASYNC_PREWRITE); 205 } 206 bf->bf_state.bfs_isretried = 1; 207 bf->bf_state.bfs_retries ++; 208 } 209 210 /* 211 * Determine what the correct AC queue for the given frame 212 * should be. 213 * 214 * This code assumes that the TIDs map consistently to 215 * the underlying hardware (or software) ath_txq. 216 * Since the sender may try to set an AC which is 217 * arbitrary, non-QoS TIDs may end up being put on 218 * completely different ACs. There's no way to put a 219 * TID into multiple ath_txq's for scheduling, so 220 * for now we override the AC/TXQ selection and set 221 * non-QOS TID frames into the BE queue. 222 * 223 * This may be completely incorrect - specifically, 224 * some management frames may end up out of order 225 * compared to the QoS traffic they're controlling. 226 * I'll look into this later. 227 */ 228 static int 229 ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0) 230 { 231 const struct ieee80211_frame *wh; 232 int pri = M_WME_GETAC(m0); 233 wh = mtod(m0, const struct ieee80211_frame *); 234 if (IEEE80211_QOS_HAS_SEQ(wh)) 235 return pri; 236 237 return ATH_NONQOS_TID_AC; 238 } 239 240 void 241 ath_txfrag_cleanup(struct ath_softc *sc, 242 ath_bufhead *frags, struct ieee80211_node *ni) 243 { 244 struct ath_buf *bf, *next; 245 246 ATH_TXBUF_LOCK_ASSERT(sc); 247 248 TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) { 249 /* NB: bf assumed clean */ 250 TAILQ_REMOVE(frags, bf, bf_list); 251 ath_returnbuf_head(sc, bf); 252 ieee80211_node_decref(ni); 253 } 254 } 255 256 /* 257 * Setup xmit of a fragmented frame. Allocate a buffer 258 * for each frag and bump the node reference count to 259 * reflect the held reference to be setup by ath_tx_start. 260 */ 261 int 262 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, 263 struct mbuf *m0, struct ieee80211_node *ni) 264 { 265 struct mbuf *m; 266 struct ath_buf *bf; 267 268 ATH_TXBUF_LOCK(sc); 269 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { 270 /* XXX non-management? */ 271 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 272 if (bf == NULL) { /* out of buffers, cleanup */ 273 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n", 274 __func__); 275 ath_txfrag_cleanup(sc, frags, ni); 276 break; 277 } 278 ieee80211_node_incref(ni); 279 TAILQ_INSERT_TAIL(frags, bf, bf_list); 280 } 281 ATH_TXBUF_UNLOCK(sc); 282 283 return !TAILQ_EMPTY(frags); 284 } 285 286 /* 287 * Reclaim mbuf resources. For fragmented frames we 288 * need to claim each frag chained with m_nextpkt. 289 */ 290 void 291 ath_freetx(struct mbuf *m) 292 { 293 struct mbuf *next; 294 295 do { 296 next = m->m_nextpkt; 297 m->m_nextpkt = NULL; 298 m_freem(m); 299 } while ((m = next) != NULL); 300 } 301 302 static int 303 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) 304 { 305 struct mbuf *m; 306 int error; 307 308 /* 309 * Load the DMA map so any coalescing is done. This 310 * also calculates the number of descriptors we need. 311 */ 312 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 313 bf->bf_segs, &bf->bf_nseg, 314 BUS_DMA_NOWAIT); 315 if (error == EFBIG) { 316 /* XXX packet requires too many descriptors */ 317 bf->bf_nseg = ATH_MAX_SCATTER + 1; 318 } else if (error != 0) { 319 sc->sc_stats.ast_tx_busdma++; 320 ath_freetx(m0); 321 return error; 322 } 323 /* 324 * Discard null packets and check for packets that 325 * require too many TX descriptors. We try to convert 326 * the latter to a cluster. 327 */ 328 if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */ 329 sc->sc_stats.ast_tx_linear++; 330 m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER); 331 if (m == NULL) { 332 ath_freetx(m0); 333 sc->sc_stats.ast_tx_nombuf++; 334 return ENOMEM; 335 } 336 m0 = m; 337 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 338 bf->bf_segs, &bf->bf_nseg, 339 BUS_DMA_NOWAIT); 340 if (error != 0) { 341 sc->sc_stats.ast_tx_busdma++; 342 ath_freetx(m0); 343 return error; 344 } 345 KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER, 346 ("too many segments after defrag; nseg %u", bf->bf_nseg)); 347 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 348 sc->sc_stats.ast_tx_nodata++; 349 ath_freetx(m0); 350 return EIO; 351 } 352 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", 353 __func__, m0, m0->m_pkthdr.len); 354 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 355 bf->bf_m = m0; 356 357 return 0; 358 } 359 360 /* 361 * Chain together segments+descriptors for a frame - 11n or otherwise. 362 * 363 * For aggregates, this is called on each frame in the aggregate. 364 */ 365 static void 366 ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0, 367 struct ath_buf *bf, int is_aggr, int is_first_subframe, 368 int is_last_subframe) 369 { 370 struct ath_hal *ah = sc->sc_ah; 371 char *ds; 372 int i, bp, dsp; 373 HAL_DMA_ADDR bufAddrList[4]; 374 uint32_t segLenList[4]; 375 int numTxMaps = 1; 376 int isFirstDesc = 1; 377 378 /* 379 * XXX There's txdma and txdma_mgmt; the descriptor 380 * sizes must match. 381 */ 382 struct ath_descdma *dd = &sc->sc_txdma; 383 384 /* 385 * Fillin the remainder of the descriptor info. 386 */ 387 388 /* 389 * We need the number of TX data pointers in each descriptor. 390 * EDMA and later chips support 4 TX buffers per descriptor; 391 * previous chips just support one. 392 */ 393 numTxMaps = sc->sc_tx_nmaps; 394 395 /* 396 * For EDMA and later chips ensure the TX map is fully populated 397 * before advancing to the next descriptor. 398 */ 399 ds = (char *) bf->bf_desc; 400 bp = dsp = 0; 401 bzero(bufAddrList, sizeof(bufAddrList)); 402 bzero(segLenList, sizeof(segLenList)); 403 for (i = 0; i < bf->bf_nseg; i++) { 404 bufAddrList[bp] = bf->bf_segs[i].ds_addr; 405 segLenList[bp] = bf->bf_segs[i].ds_len; 406 bp++; 407 408 /* 409 * Go to the next segment if this isn't the last segment 410 * and there's space in the current TX map. 411 */ 412 if ((i != bf->bf_nseg - 1) && (bp < numTxMaps)) 413 continue; 414 415 /* 416 * Last segment or we're out of buffer pointers. 417 */ 418 bp = 0; 419 420 if (i == bf->bf_nseg - 1) 421 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0); 422 else 423 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 424 bf->bf_daddr + dd->dd_descsize * (dsp + 1)); 425 426 /* 427 * XXX This assumes that bfs_txq is the actual destination 428 * hardware queue at this point. It may not have been 429 * assigned, it may actually be pointing to the multicast 430 * software TXQ id. These must be fixed! 431 */ 432 ath_hal_filltxdesc(ah, (struct ath_desc *) ds 433 , bufAddrList 434 , segLenList 435 , bf->bf_descid /* XXX desc id */ 436 , bf->bf_state.bfs_tx_queue 437 , isFirstDesc /* first segment */ 438 , i == bf->bf_nseg - 1 /* last segment */ 439 , (struct ath_desc *) ds0 /* first descriptor */ 440 ); 441 442 /* 443 * Make sure the 11n aggregate fields are cleared. 444 * 445 * XXX TODO: this doesn't need to be called for 446 * aggregate frames; as it'll be called on all 447 * sub-frames. Since the descriptors are in 448 * non-cacheable memory, this leads to some 449 * rather slow writes on MIPS/ARM platforms. 450 */ 451 if (ath_tx_is_11n(sc)) 452 ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds); 453 454 /* 455 * If 11n is enabled, set it up as if it's an aggregate 456 * frame. 457 */ 458 if (is_last_subframe) { 459 ath_hal_set11n_aggr_last(sc->sc_ah, 460 (struct ath_desc *) ds); 461 } else if (is_aggr) { 462 /* 463 * This clears the aggrlen field; so 464 * the caller needs to call set_aggr_first()! 465 * 466 * XXX TODO: don't call this for the first 467 * descriptor in the first frame in an 468 * aggregate! 469 */ 470 ath_hal_set11n_aggr_middle(sc->sc_ah, 471 (struct ath_desc *) ds, 472 bf->bf_state.bfs_ndelim); 473 } 474 isFirstDesc = 0; 475 bf->bf_lastds = (struct ath_desc *) ds; 476 477 /* 478 * Don't forget to skip to the next descriptor. 479 */ 480 ds += sc->sc_tx_desclen; 481 dsp++; 482 483 /* 484 * .. and don't forget to blank these out! 485 */ 486 bzero(bufAddrList, sizeof(bufAddrList)); 487 bzero(segLenList, sizeof(segLenList)); 488 } 489 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 490 } 491 492 /* 493 * Set the rate control fields in the given descriptor based on 494 * the bf_state fields and node state. 495 * 496 * The bfs fields should already be set with the relevant rate 497 * control information, including whether MRR is to be enabled. 498 * 499 * Since the FreeBSD HAL currently sets up the first TX rate 500 * in ath_hal_setuptxdesc(), this will setup the MRR 501 * conditionally for the pre-11n chips, and call ath_buf_set_rate 502 * unconditionally for 11n chips. These require the 11n rate 503 * scenario to be set if MCS rates are enabled, so it's easier 504 * to just always call it. The caller can then only set rates 2, 3 505 * and 4 if multi-rate retry is needed. 506 */ 507 static void 508 ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 509 struct ath_buf *bf) 510 { 511 struct ath_rc_series *rc = bf->bf_state.bfs_rc; 512 513 /* If mrr is disabled, blank tries 1, 2, 3 */ 514 if (! bf->bf_state.bfs_ismrr) 515 rc[1].tries = rc[2].tries = rc[3].tries = 0; 516 517 #if 0 518 /* 519 * If NOACK is set, just set ntries=1. 520 */ 521 else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) { 522 rc[1].tries = rc[2].tries = rc[3].tries = 0; 523 rc[0].tries = 1; 524 } 525 #endif 526 527 /* 528 * Always call - that way a retried descriptor will 529 * have the MRR fields overwritten. 530 * 531 * XXX TODO: see if this is really needed - setting up 532 * the first descriptor should set the MRR fields to 0 533 * for us anyway. 534 */ 535 if (ath_tx_is_11n(sc)) { 536 ath_buf_set_rate(sc, ni, bf); 537 } else { 538 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc 539 , rc[1].ratecode, rc[1].tries 540 , rc[2].ratecode, rc[2].tries 541 , rc[3].ratecode, rc[3].tries 542 ); 543 } 544 } 545 546 /* 547 * Setup segments+descriptors for an 11n aggregate. 548 * bf_first is the first buffer in the aggregate. 549 * The descriptor list must already been linked together using 550 * bf->bf_next. 551 */ 552 static void 553 ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first) 554 { 555 struct ath_buf *bf, *bf_prev = NULL; 556 struct ath_desc *ds0 = bf_first->bf_desc; 557 558 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n", 559 __func__, bf_first->bf_state.bfs_nframes, 560 bf_first->bf_state.bfs_al); 561 562 bf = bf_first; 563 564 if (bf->bf_state.bfs_txrate0 == 0) 565 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n", 566 __func__, bf, 0); 567 if (bf->bf_state.bfs_rc[0].ratecode == 0) 568 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n", 569 __func__, bf, 0); 570 571 /* 572 * Setup all descriptors of all subframes - this will 573 * call ath_hal_set11naggrmiddle() on every frame. 574 */ 575 while (bf != NULL) { 576 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 577 "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n", 578 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen, 579 SEQNO(bf->bf_state.bfs_seqno)); 580 581 /* 582 * Setup the initial fields for the first descriptor - all 583 * the non-11n specific stuff. 584 */ 585 ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc 586 , bf->bf_state.bfs_pktlen /* packet length */ 587 , bf->bf_state.bfs_hdrlen /* header length */ 588 , bf->bf_state.bfs_atype /* Atheros packet type */ 589 , bf->bf_state.bfs_txpower /* txpower */ 590 , bf->bf_state.bfs_txrate0 591 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 592 , bf->bf_state.bfs_keyix /* key cache index */ 593 , bf->bf_state.bfs_txantenna /* antenna mode */ 594 , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */ 595 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 596 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 597 ); 598 599 /* 600 * First descriptor? Setup the rate control and initial 601 * aggregate header information. 602 */ 603 if (bf == bf_first) { 604 /* 605 * setup first desc with rate and aggr info 606 */ 607 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 608 } 609 610 /* 611 * Setup the descriptors for a multi-descriptor frame. 612 * This is both aggregate and non-aggregate aware. 613 */ 614 ath_tx_chaindesclist(sc, ds0, bf, 615 1, /* is_aggr */ 616 !! (bf == bf_first), /* is_first_subframe */ 617 !! (bf->bf_next == NULL) /* is_last_subframe */ 618 ); 619 620 if (bf == bf_first) { 621 /* 622 * Initialise the first 11n aggregate with the 623 * aggregate length and aggregate enable bits. 624 */ 625 ath_hal_set11n_aggr_first(sc->sc_ah, 626 ds0, 627 bf->bf_state.bfs_al, 628 bf->bf_state.bfs_ndelim); 629 } 630 631 /* 632 * Link the last descriptor of the previous frame 633 * to the beginning descriptor of this frame. 634 */ 635 if (bf_prev != NULL) 636 ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds, 637 bf->bf_daddr); 638 639 /* Save a copy so we can link the next descriptor in */ 640 bf_prev = bf; 641 bf = bf->bf_next; 642 } 643 644 /* 645 * Set the first descriptor bf_lastds field to point to 646 * the last descriptor in the last subframe, that's where 647 * the status update will occur. 648 */ 649 bf_first->bf_lastds = bf_prev->bf_lastds; 650 651 /* 652 * And bf_last in the first descriptor points to the end of 653 * the aggregate list. 654 */ 655 bf_first->bf_last = bf_prev; 656 657 /* 658 * For non-AR9300 NICs, which require the rate control 659 * in the final descriptor - let's set that up now. 660 * 661 * This is because the filltxdesc() HAL call doesn't 662 * populate the last segment with rate control information 663 * if firstSeg is also true. For non-aggregate frames 664 * that is fine, as the first frame already has rate control 665 * info. But if the last frame in an aggregate has one 666 * descriptor, both firstseg and lastseg will be true and 667 * the rate info isn't copied. 668 * 669 * This is inefficient on MIPS/ARM platforms that have 670 * non-cachable memory for TX descriptors, but we'll just 671 * make do for now. 672 * 673 * As to why the rate table is stashed in the last descriptor 674 * rather than the first descriptor? Because proctxdesc() 675 * is called on the final descriptor in an MPDU or A-MPDU - 676 * ie, the one that gets updated by the hardware upon 677 * completion. That way proctxdesc() doesn't need to know 678 * about the first _and_ last TX descriptor. 679 */ 680 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0); 681 682 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__); 683 } 684 685 /* 686 * Hand-off a frame to the multicast TX queue. 687 * 688 * This is a software TXQ which will be appended to the CAB queue 689 * during the beacon setup code. 690 * 691 * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID 692 * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated 693 * with the actual hardware txq, or all of this will fall apart. 694 * 695 * XXX It may not be a bad idea to just stuff the QCU ID into bf_state 696 * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated 697 * correctly. 698 */ 699 static void 700 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, 701 struct ath_buf *bf) 702 { 703 ATH_TX_LOCK_ASSERT(sc); 704 705 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 706 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 707 708 /* 709 * Ensure that the tx queue is the cabq, so things get 710 * mapped correctly. 711 */ 712 if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) { 713 DPRINTF(sc, ATH_DEBUG_XMIT, 714 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", 715 __func__, bf, bf->bf_state.bfs_tx_queue, 716 txq->axq_qnum); 717 } 718 719 ATH_TXQ_LOCK(txq); 720 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) { 721 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s); 722 struct ieee80211_frame *wh; 723 724 /* mark previous frame */ 725 wh = mtod(bf_last->bf_m, struct ieee80211_frame *); 726 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 727 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, 728 BUS_DMASYNC_PREWRITE); 729 730 /* link descriptor */ 731 ath_hal_settxdesclink(sc->sc_ah, 732 bf_last->bf_lastds, 733 bf->bf_daddr); 734 } 735 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 736 ATH_TXQ_UNLOCK(txq); 737 } 738 739 /* 740 * Hand-off packet to a hardware queue. 741 */ 742 static void 743 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, 744 struct ath_buf *bf) 745 { 746 struct ath_hal *ah = sc->sc_ah; 747 struct ath_buf *bf_first; 748 749 /* 750 * Insert the frame on the outbound list and pass it on 751 * to the hardware. Multicast frames buffered for power 752 * save stations and transmit from the CAB queue are stored 753 * on a s/w only queue and loaded on to the CAB queue in 754 * the SWBA handler since frames only go out on DTIM and 755 * to avoid possible races. 756 */ 757 ATH_TX_LOCK_ASSERT(sc); 758 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 759 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 760 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ, 761 ("ath_tx_handoff_hw called for mcast queue")); 762 763 /* 764 * XXX We should instead just verify that sc_txstart_cnt 765 * or ath_txproc_cnt > 0. That would mean that 766 * the reset is going to be waiting for us to complete. 767 */ 768 if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) { 769 device_printf(sc->sc_dev, 770 "%s: TX dispatch without holding txcount/txstart refcnt!\n", 771 __func__); 772 } 773 774 /* 775 * XXX .. this is going to cause the hardware to get upset; 776 * so we really should find some way to drop or queue 777 * things. 778 */ 779 780 ATH_TXQ_LOCK(txq); 781 782 /* 783 * XXX TODO: if there's a holdingbf, then 784 * ATH_TXQ_PUTRUNNING should be clear. 785 * 786 * If there is a holdingbf and the list is empty, 787 * then axq_link should be pointing to the holdingbf. 788 * 789 * Otherwise it should point to the last descriptor 790 * in the last ath_buf. 791 * 792 * In any case, we should really ensure that we 793 * update the previous descriptor link pointer to 794 * this descriptor, regardless of all of the above state. 795 * 796 * For now this is captured by having axq_link point 797 * to either the holdingbf (if the TXQ list is empty) 798 * or the end of the list (if the TXQ list isn't empty.) 799 * I'd rather just kill axq_link here and do it as above. 800 */ 801 802 /* 803 * Append the frame to the TX queue. 804 */ 805 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 806 ATH_KTR(sc, ATH_KTR_TX, 3, 807 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p " 808 "depth=%d", 809 txq->axq_qnum, 810 bf, 811 txq->axq_depth); 812 813 /* 814 * If there's a link pointer, update it. 815 * 816 * XXX we should replace this with the above logic, just 817 * to kill axq_link with fire. 818 */ 819 if (txq->axq_link != NULL) { 820 *txq->axq_link = bf->bf_daddr; 821 DPRINTF(sc, ATH_DEBUG_XMIT, 822 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 823 txq->axq_qnum, txq->axq_link, 824 (caddr_t)bf->bf_daddr, bf->bf_desc, 825 txq->axq_depth); 826 ATH_KTR(sc, ATH_KTR_TX, 5, 827 "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) " 828 "lastds=%d", 829 txq->axq_qnum, txq->axq_link, 830 (caddr_t)bf->bf_daddr, bf->bf_desc, 831 bf->bf_lastds); 832 } 833 834 /* 835 * If we've not pushed anything into the hardware yet, 836 * push the head of the queue into the TxDP. 837 * 838 * Once we've started DMA, there's no guarantee that 839 * updating the TxDP with a new value will actually work. 840 * So we just don't do that - if we hit the end of the list, 841 * we keep that buffer around (the "holding buffer") and 842 * re-start DMA by updating the link pointer of _that_ 843 * descriptor and then restart DMA. 844 */ 845 if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) { 846 bf_first = TAILQ_FIRST(&txq->axq_q); 847 txq->axq_flags |= ATH_TXQ_PUTRUNNING; 848 ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr); 849 DPRINTF(sc, ATH_DEBUG_XMIT, 850 "%s: TXDP[%u] = %p (%p) depth %d\n", 851 __func__, txq->axq_qnum, 852 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, 853 txq->axq_depth); 854 ATH_KTR(sc, ATH_KTR_TX, 5, 855 "ath_tx_handoff: TXDP[%u] = %p (%p) " 856 "lastds=%p depth %d", 857 txq->axq_qnum, 858 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, 859 bf_first->bf_lastds, 860 txq->axq_depth); 861 } 862 863 /* 864 * Ensure that the bf TXQ matches this TXQ, so later 865 * checking and holding buffer manipulation is sane. 866 */ 867 if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) { 868 DPRINTF(sc, ATH_DEBUG_XMIT, 869 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", 870 __func__, bf, bf->bf_state.bfs_tx_queue, 871 txq->axq_qnum); 872 } 873 874 /* 875 * Track aggregate queue depth. 876 */ 877 if (bf->bf_state.bfs_aggr) 878 txq->axq_aggr_depth++; 879 880 /* 881 * Update the link pointer. 882 */ 883 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link); 884 885 /* 886 * Start DMA. 887 * 888 * If we wrote a TxDP above, DMA will start from here. 889 * 890 * If DMA is running, it'll do nothing. 891 * 892 * If the DMA engine hit the end of the QCU list (ie LINK=NULL, 893 * or VEOL) then it stops at the last transmitted write. 894 * We then append a new frame by updating the link pointer 895 * in that descriptor and then kick TxE here; it will re-read 896 * that last descriptor and find the new descriptor to transmit. 897 * 898 * This is why we keep the holding descriptor around. 899 */ 900 ath_hal_txstart(ah, txq->axq_qnum); 901 ATH_TXQ_UNLOCK(txq); 902 ATH_KTR(sc, ATH_KTR_TX, 1, 903 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum); 904 } 905 906 /* 907 * Restart TX DMA for the given TXQ. 908 * 909 * This must be called whether the queue is empty or not. 910 */ 911 static void 912 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq) 913 { 914 struct ath_buf *bf, *bf_last; 915 916 ATH_TXQ_LOCK_ASSERT(txq); 917 918 /* XXX make this ATH_TXQ_FIRST */ 919 bf = TAILQ_FIRST(&txq->axq_q); 920 bf_last = ATH_TXQ_LAST(txq, axq_q_s); 921 922 if (bf == NULL) 923 return; 924 925 DPRINTF(sc, ATH_DEBUG_RESET, 926 "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n", 927 __func__, 928 txq->axq_qnum, 929 bf, 930 bf_last, 931 (uint32_t) bf->bf_daddr); 932 933 #ifdef ATH_DEBUG 934 if (sc->sc_debug & ATH_DEBUG_RESET) 935 ath_tx_dump(sc, txq); 936 #endif 937 938 /* 939 * This is called from a restart, so DMA is known to be 940 * completely stopped. 941 */ 942 KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)), 943 ("%s: Q%d: called with PUTRUNNING=1\n", 944 __func__, 945 txq->axq_qnum)); 946 947 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); 948 txq->axq_flags |= ATH_TXQ_PUTRUNNING; 949 950 ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds, 951 &txq->axq_link); 952 ath_hal_txstart(sc->sc_ah, txq->axq_qnum); 953 } 954 955 /* 956 * Hand off a packet to the hardware (or mcast queue.) 957 * 958 * The relevant hardware txq should be locked. 959 */ 960 static void 961 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, 962 struct ath_buf *bf) 963 { 964 ATH_TX_LOCK_ASSERT(sc); 965 966 #ifdef ATH_DEBUG_ALQ 967 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 968 ath_tx_alq_post(sc, bf); 969 #endif 970 971 if (txq->axq_qnum == ATH_TXQ_SWQ) 972 ath_tx_handoff_mcast(sc, txq, bf); 973 else 974 ath_tx_handoff_hw(sc, txq, bf); 975 } 976 977 static int 978 ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni, 979 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, 980 int *keyix) 981 { 982 DPRINTF(sc, ATH_DEBUG_XMIT, 983 "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n", 984 __func__, 985 *hdrlen, 986 *pktlen, 987 isfrag, 988 iswep, 989 m0); 990 991 if (iswep) { 992 const struct ieee80211_cipher *cip; 993 struct ieee80211_key *k; 994 995 /* 996 * Construct the 802.11 header+trailer for an encrypted 997 * frame. The only reason this can fail is because of an 998 * unknown or unsupported cipher/key type. 999 */ 1000 k = ieee80211_crypto_encap(ni, m0); 1001 if (k == NULL) { 1002 /* 1003 * This can happen when the key is yanked after the 1004 * frame was queued. Just discard the frame; the 1005 * 802.11 layer counts failures and provides 1006 * debugging/diagnostics. 1007 */ 1008 return (0); 1009 } 1010 /* 1011 * Adjust the packet + header lengths for the crypto 1012 * additions and calculate the h/w key index. When 1013 * a s/w mic is done the frame will have had any mic 1014 * added to it prior to entry so m0->m_pkthdr.len will 1015 * account for it. Otherwise we need to add it to the 1016 * packet length. 1017 */ 1018 cip = k->wk_cipher; 1019 (*hdrlen) += cip->ic_header; 1020 (*pktlen) += cip->ic_header + cip->ic_trailer; 1021 /* NB: frags always have any TKIP MIC done in s/w */ 1022 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) 1023 (*pktlen) += cip->ic_miclen; 1024 (*keyix) = k->wk_keyix; 1025 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { 1026 /* 1027 * Use station key cache slot, if assigned. 1028 */ 1029 (*keyix) = ni->ni_ucastkey.wk_keyix; 1030 if ((*keyix) == IEEE80211_KEYIX_NONE) 1031 (*keyix) = HAL_TXKEYIX_INVALID; 1032 } else 1033 (*keyix) = HAL_TXKEYIX_INVALID; 1034 1035 return (1); 1036 } 1037 1038 /* 1039 * Calculate whether interoperability protection is required for 1040 * this frame. 1041 * 1042 * This requires the rate control information be filled in, 1043 * as the protection requirement depends upon the current 1044 * operating mode / PHY. 1045 */ 1046 static void 1047 ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf) 1048 { 1049 struct ieee80211_frame *wh; 1050 uint8_t rix; 1051 uint16_t flags; 1052 int shortPreamble; 1053 const HAL_RATE_TABLE *rt = sc->sc_currates; 1054 struct ieee80211com *ic = &sc->sc_ic; 1055 1056 flags = bf->bf_state.bfs_txflags; 1057 rix = bf->bf_state.bfs_rc[0].rix; 1058 shortPreamble = bf->bf_state.bfs_shpream; 1059 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1060 1061 /* 1062 * If 802.11g protection is enabled, determine whether 1063 * to use RTS/CTS or just CTS. Note that this is only 1064 * done for OFDM unicast frames. 1065 */ 1066 if ((ic->ic_flags & IEEE80211_F_USEPROT) && 1067 rt->info[rix].phy == IEEE80211_T_OFDM && 1068 (flags & HAL_TXDESC_NOACK) == 0) { 1069 bf->bf_state.bfs_doprot = 1; 1070 /* XXX fragments must use CCK rates w/ protection */ 1071 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { 1072 flags |= HAL_TXDESC_RTSENA; 1073 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { 1074 flags |= HAL_TXDESC_CTSENA; 1075 } 1076 /* 1077 * For frags it would be desirable to use the 1078 * highest CCK rate for RTS/CTS. But stations 1079 * farther away may detect it at a lower CCK rate 1080 * so use the configured protection rate instead 1081 * (for now). 1082 */ 1083 sc->sc_stats.ast_tx_protect++; 1084 } 1085 1086 /* 1087 * If 11n protection is enabled and it's a HT frame, 1088 * enable RTS. 1089 * 1090 * XXX ic_htprotmode or ic_curhtprotmode? 1091 * XXX should it_htprotmode only matter if ic_curhtprotmode 1092 * XXX indicates it's not a HT pure environment? 1093 */ 1094 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && 1095 rt->info[rix].phy == IEEE80211_T_HT && 1096 (flags & HAL_TXDESC_NOACK) == 0) { 1097 flags |= HAL_TXDESC_RTSENA; 1098 sc->sc_stats.ast_tx_htprotect++; 1099 } 1100 bf->bf_state.bfs_txflags = flags; 1101 } 1102 1103 /* 1104 * Update the frame duration given the currently selected rate. 1105 * 1106 * This also updates the frame duration value, so it will require 1107 * a DMA flush. 1108 */ 1109 static void 1110 ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf) 1111 { 1112 struct ieee80211_frame *wh; 1113 uint8_t rix; 1114 uint16_t flags; 1115 int shortPreamble; 1116 struct ath_hal *ah = sc->sc_ah; 1117 const HAL_RATE_TABLE *rt = sc->sc_currates; 1118 int isfrag = bf->bf_m->m_flags & M_FRAG; 1119 1120 flags = bf->bf_state.bfs_txflags; 1121 rix = bf->bf_state.bfs_rc[0].rix; 1122 shortPreamble = bf->bf_state.bfs_shpream; 1123 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1124 1125 /* 1126 * Calculate duration. This logically belongs in the 802.11 1127 * layer but it lacks sufficient information to calculate it. 1128 */ 1129 if ((flags & HAL_TXDESC_NOACK) == 0 && 1130 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 1131 u_int16_t dur; 1132 if (shortPreamble) 1133 dur = rt->info[rix].spAckDuration; 1134 else 1135 dur = rt->info[rix].lpAckDuration; 1136 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 1137 dur += dur; /* additional SIFS+ACK */ 1138 /* 1139 * Include the size of next fragment so NAV is 1140 * updated properly. The last fragment uses only 1141 * the ACK duration 1142 * 1143 * XXX TODO: ensure that the rate lookup for each 1144 * fragment is the same as the rate used by the 1145 * first fragment! 1146 */ 1147 dur += ath_hal_computetxtime(ah, 1148 rt, 1149 bf->bf_nextfraglen, 1150 rix, shortPreamble); 1151 } 1152 if (isfrag) { 1153 /* 1154 * Force hardware to use computed duration for next 1155 * fragment by disabling multi-rate retry which updates 1156 * duration based on the multi-rate duration table. 1157 */ 1158 bf->bf_state.bfs_ismrr = 0; 1159 bf->bf_state.bfs_try0 = ATH_TXMGTTRY; 1160 /* XXX update bfs_rc[0].try? */ 1161 } 1162 1163 /* Update the duration field itself */ 1164 *(u_int16_t *)wh->i_dur = htole16(dur); 1165 } 1166 } 1167 1168 static uint8_t 1169 ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt, 1170 int cix, int shortPreamble) 1171 { 1172 uint8_t ctsrate; 1173 1174 /* 1175 * CTS transmit rate is derived from the transmit rate 1176 * by looking in the h/w rate table. We must also factor 1177 * in whether or not a short preamble is to be used. 1178 */ 1179 /* NB: cix is set above where RTS/CTS is enabled */ 1180 KASSERT(cix != 0xff, ("cix not setup")); 1181 ctsrate = rt->info[cix].rateCode; 1182 1183 /* XXX this should only matter for legacy rates */ 1184 if (shortPreamble) 1185 ctsrate |= rt->info[cix].shortPreamble; 1186 1187 return (ctsrate); 1188 } 1189 1190 /* 1191 * Calculate the RTS/CTS duration for legacy frames. 1192 */ 1193 static int 1194 ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix, 1195 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt, 1196 int flags) 1197 { 1198 int ctsduration = 0; 1199 1200 /* This mustn't be called for HT modes */ 1201 if (rt->info[cix].phy == IEEE80211_T_HT) { 1202 printf("%s: HT rate where it shouldn't be (0x%x)\n", 1203 __func__, rt->info[cix].rateCode); 1204 return (-1); 1205 } 1206 1207 /* 1208 * Compute the transmit duration based on the frame 1209 * size and the size of an ACK frame. We call into the 1210 * HAL to do the computation since it depends on the 1211 * characteristics of the actual PHY being used. 1212 * 1213 * NB: CTS is assumed the same size as an ACK so we can 1214 * use the precalculated ACK durations. 1215 */ 1216 if (shortPreamble) { 1217 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1218 ctsduration += rt->info[cix].spAckDuration; 1219 ctsduration += ath_hal_computetxtime(ah, 1220 rt, pktlen, rix, AH_TRUE); 1221 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1222 ctsduration += rt->info[rix].spAckDuration; 1223 } else { 1224 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1225 ctsduration += rt->info[cix].lpAckDuration; 1226 ctsduration += ath_hal_computetxtime(ah, 1227 rt, pktlen, rix, AH_FALSE); 1228 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1229 ctsduration += rt->info[rix].lpAckDuration; 1230 } 1231 1232 return (ctsduration); 1233 } 1234 1235 /* 1236 * Update the given ath_buf with updated rts/cts setup and duration 1237 * values. 1238 * 1239 * To support rate lookups for each software retry, the rts/cts rate 1240 * and cts duration must be re-calculated. 1241 * 1242 * This function assumes the RTS/CTS flags have been set as needed; 1243 * mrr has been disabled; and the rate control lookup has been done. 1244 * 1245 * XXX TODO: MRR need only be disabled for the pre-11n NICs. 1246 * XXX The 11n NICs support per-rate RTS/CTS configuration. 1247 */ 1248 static void 1249 ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf) 1250 { 1251 uint16_t ctsduration = 0; 1252 uint8_t ctsrate = 0; 1253 uint8_t rix = bf->bf_state.bfs_rc[0].rix; 1254 uint8_t cix = 0; 1255 const HAL_RATE_TABLE *rt = sc->sc_currates; 1256 1257 /* 1258 * No RTS/CTS enabled? Don't bother. 1259 */ 1260 if ((bf->bf_state.bfs_txflags & 1261 (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) { 1262 /* XXX is this really needed? */ 1263 bf->bf_state.bfs_ctsrate = 0; 1264 bf->bf_state.bfs_ctsduration = 0; 1265 return; 1266 } 1267 1268 /* 1269 * If protection is enabled, use the protection rix control 1270 * rate. Otherwise use the rate0 control rate. 1271 */ 1272 if (bf->bf_state.bfs_doprot) 1273 rix = sc->sc_protrix; 1274 else 1275 rix = bf->bf_state.bfs_rc[0].rix; 1276 1277 /* 1278 * If the raw path has hard-coded ctsrate0 to something, 1279 * use it. 1280 */ 1281 if (bf->bf_state.bfs_ctsrate0 != 0) 1282 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0); 1283 else 1284 /* Control rate from above */ 1285 cix = rt->info[rix].controlRate; 1286 1287 /* Calculate the rtscts rate for the given cix */ 1288 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix, 1289 bf->bf_state.bfs_shpream); 1290 1291 /* The 11n chipsets do ctsduration calculations for you */ 1292 if (! ath_tx_is_11n(sc)) 1293 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix, 1294 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, 1295 rt, bf->bf_state.bfs_txflags); 1296 1297 /* Squirrel away in ath_buf */ 1298 bf->bf_state.bfs_ctsrate = ctsrate; 1299 bf->bf_state.bfs_ctsduration = ctsduration; 1300 1301 /* 1302 * Must disable multi-rate retry when using RTS/CTS. 1303 */ 1304 if (!sc->sc_mrrprot) { 1305 bf->bf_state.bfs_ismrr = 0; 1306 bf->bf_state.bfs_try0 = 1307 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */ 1308 } 1309 } 1310 1311 /* 1312 * Setup the descriptor chain for a normal or fast-frame 1313 * frame. 1314 * 1315 * XXX TODO: extend to include the destination hardware QCU ID. 1316 * Make sure that is correct. Make sure that when being added 1317 * to the mcastq, the CABQ QCUID is set or things will get a bit 1318 * odd. 1319 */ 1320 static void 1321 ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf) 1322 { 1323 struct ath_desc *ds = bf->bf_desc; 1324 struct ath_hal *ah = sc->sc_ah; 1325 1326 if (bf->bf_state.bfs_txrate0 == 0) 1327 DPRINTF(sc, ATH_DEBUG_XMIT, 1328 "%s: bf=%p, txrate0=%d\n", __func__, bf, 0); 1329 1330 ath_hal_setuptxdesc(ah, ds 1331 , bf->bf_state.bfs_pktlen /* packet length */ 1332 , bf->bf_state.bfs_hdrlen /* header length */ 1333 , bf->bf_state.bfs_atype /* Atheros packet type */ 1334 , bf->bf_state.bfs_txpower /* txpower */ 1335 , bf->bf_state.bfs_txrate0 1336 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 1337 , bf->bf_state.bfs_keyix /* key cache index */ 1338 , bf->bf_state.bfs_txantenna /* antenna mode */ 1339 , bf->bf_state.bfs_txflags /* flags */ 1340 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 1341 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 1342 ); 1343 1344 /* 1345 * This will be overriden when the descriptor chain is written. 1346 */ 1347 bf->bf_lastds = ds; 1348 bf->bf_last = bf; 1349 1350 /* Set rate control and descriptor chain for this frame */ 1351 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 1352 ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0); 1353 } 1354 1355 /* 1356 * Do a rate lookup. 1357 * 1358 * This performs a rate lookup for the given ath_buf only if it's required. 1359 * Non-data frames and raw frames don't require it. 1360 * 1361 * This populates the primary and MRR entries; MRR values are 1362 * then disabled later on if something requires it (eg RTS/CTS on 1363 * pre-11n chipsets. 1364 * 1365 * This needs to be done before the RTS/CTS fields are calculated 1366 * as they may depend upon the rate chosen. 1367 */ 1368 static void 1369 ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf) 1370 { 1371 uint8_t rate, rix; 1372 int try0; 1373 1374 if (! bf->bf_state.bfs_doratelookup) 1375 return; 1376 1377 /* Get rid of any previous state */ 1378 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1379 1380 ATH_NODE_LOCK(ATH_NODE(bf->bf_node)); 1381 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream, 1382 bf->bf_state.bfs_pktlen, &rix, &try0, &rate); 1383 1384 /* In case MRR is disabled, make sure rc[0] is setup correctly */ 1385 bf->bf_state.bfs_rc[0].rix = rix; 1386 bf->bf_state.bfs_rc[0].ratecode = rate; 1387 bf->bf_state.bfs_rc[0].tries = try0; 1388 1389 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY) 1390 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix, 1391 bf->bf_state.bfs_rc); 1392 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node)); 1393 1394 sc->sc_txrix = rix; /* for LED blinking */ 1395 sc->sc_lastdatarix = rix; /* for fast frames */ 1396 bf->bf_state.bfs_try0 = try0; 1397 bf->bf_state.bfs_txrate0 = rate; 1398 } 1399 1400 /* 1401 * Update the CLRDMASK bit in the ath_buf if it needs to be set. 1402 */ 1403 static void 1404 ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid, 1405 struct ath_buf *bf) 1406 { 1407 struct ath_node *an = ATH_NODE(bf->bf_node); 1408 1409 ATH_TX_LOCK_ASSERT(sc); 1410 1411 if (an->clrdmask == 1) { 1412 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1413 an->clrdmask = 0; 1414 } 1415 } 1416 1417 /* 1418 * Return whether this frame should be software queued or 1419 * direct dispatched. 1420 * 1421 * When doing powersave, BAR frames should be queued but other management 1422 * frames should be directly sent. 1423 * 1424 * When not doing powersave, stick BAR frames into the hardware queue 1425 * so it goes out even though the queue is paused. 1426 * 1427 * For now, management frames are also software queued by default. 1428 */ 1429 static int 1430 ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an, 1431 struct mbuf *m0, int *queue_to_head) 1432 { 1433 struct ieee80211_node *ni = &an->an_node; 1434 struct ieee80211_frame *wh; 1435 uint8_t type, subtype; 1436 1437 wh = mtod(m0, struct ieee80211_frame *); 1438 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1439 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1440 1441 (*queue_to_head) = 0; 1442 1443 /* If it's not in powersave - direct-dispatch BAR */ 1444 if ((ATH_NODE(ni)->an_is_powersave == 0) 1445 && type == IEEE80211_FC0_TYPE_CTL && 1446 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1447 DPRINTF(sc, ATH_DEBUG_SW_TX, 1448 "%s: BAR: TX'ing direct\n", __func__); 1449 return (0); 1450 } else if ((ATH_NODE(ni)->an_is_powersave == 1) 1451 && type == IEEE80211_FC0_TYPE_CTL && 1452 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1453 /* BAR TX whilst asleep; queue */ 1454 DPRINTF(sc, ATH_DEBUG_SW_TX, 1455 "%s: swq: TX'ing\n", __func__); 1456 (*queue_to_head) = 1; 1457 return (1); 1458 } else if ((ATH_NODE(ni)->an_is_powersave == 1) 1459 && (type == IEEE80211_FC0_TYPE_MGT || 1460 type == IEEE80211_FC0_TYPE_CTL)) { 1461 /* 1462 * Other control/mgmt frame; bypass software queuing 1463 * for now! 1464 */ 1465 DPRINTF(sc, ATH_DEBUG_XMIT, 1466 "%s: %6D: Node is asleep; sending mgmt " 1467 "(type=%d, subtype=%d)\n", 1468 __func__, ni->ni_macaddr, ":", type, subtype); 1469 return (0); 1470 } else { 1471 return (1); 1472 } 1473 } 1474 1475 1476 /* 1477 * Transmit the given frame to the hardware. 1478 * 1479 * The frame must already be setup; rate control must already have 1480 * been done. 1481 * 1482 * XXX since the TXQ lock is being held here (and I dislike holding 1483 * it for this long when not doing software aggregation), later on 1484 * break this function into "setup_normal" and "xmit_normal". The 1485 * lock only needs to be held for the ath_tx_handoff call. 1486 * 1487 * XXX we don't update the leak count here - if we're doing 1488 * direct frame dispatch, we need to be able to do it without 1489 * decrementing the leak count (eg multicast queue frames.) 1490 */ 1491 static void 1492 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq, 1493 struct ath_buf *bf) 1494 { 1495 struct ath_node *an = ATH_NODE(bf->bf_node); 1496 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 1497 1498 ATH_TX_LOCK_ASSERT(sc); 1499 1500 /* 1501 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does 1502 * set a completion handler however it doesn't (yet) properly 1503 * handle the strict ordering requirements needed for normal, 1504 * non-aggregate session frames. 1505 * 1506 * Once this is implemented, only set CLRDMASK like this for 1507 * frames that must go out - eg management/raw frames. 1508 */ 1509 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1510 1511 /* Setup the descriptor before handoff */ 1512 ath_tx_do_ratelookup(sc, bf); 1513 ath_tx_calc_duration(sc, bf); 1514 ath_tx_calc_protection(sc, bf); 1515 ath_tx_set_rtscts(sc, bf); 1516 ath_tx_rate_fill_rcflags(sc, bf); 1517 ath_tx_setds(sc, bf); 1518 1519 /* Track per-TID hardware queue depth correctly */ 1520 tid->hwq_depth++; 1521 1522 /* Assign the completion handler */ 1523 bf->bf_comp = ath_tx_normal_comp; 1524 1525 /* Hand off to hardware */ 1526 ath_tx_handoff(sc, txq, bf); 1527 } 1528 1529 /* 1530 * Do the basic frame setup stuff that's required before the frame 1531 * is added to a software queue. 1532 * 1533 * All frames get mostly the same treatment and it's done once. 1534 * Retransmits fiddle with things like the rate control setup, 1535 * setting the retransmit bit in the packet; doing relevant DMA/bus 1536 * syncing and relinking it (back) into the hardware TX queue. 1537 * 1538 * Note that this may cause the mbuf to be reallocated, so 1539 * m0 may not be valid. 1540 */ 1541 static int 1542 ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni, 1543 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq) 1544 { 1545 struct ieee80211vap *vap = ni->ni_vap; 1546 struct ath_hal *ah = sc->sc_ah; 1547 struct ieee80211com *ic = &sc->sc_ic; 1548 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; 1549 int error, iswep, ismcast, isfrag, ismrr; 1550 int keyix, hdrlen, pktlen, try0 = 0; 1551 u_int8_t rix = 0, txrate = 0; 1552 struct ath_desc *ds; 1553 struct ieee80211_frame *wh; 1554 u_int subtype, flags; 1555 HAL_PKT_TYPE atype; 1556 const HAL_RATE_TABLE *rt; 1557 HAL_BOOL shortPreamble; 1558 struct ath_node *an; 1559 u_int pri; 1560 1561 /* 1562 * To ensure that both sequence numbers and the CCMP PN handling 1563 * is "correct", make sure that the relevant TID queue is locked. 1564 * Otherwise the CCMP PN and seqno may appear out of order, causing 1565 * re-ordered frames to have out of order CCMP PN's, resulting 1566 * in many, many frame drops. 1567 */ 1568 ATH_TX_LOCK_ASSERT(sc); 1569 1570 wh = mtod(m0, struct ieee80211_frame *); 1571 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; 1572 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1573 isfrag = m0->m_flags & M_FRAG; 1574 hdrlen = ieee80211_anyhdrsize(wh); 1575 /* 1576 * Packet length must not include any 1577 * pad bytes; deduct them here. 1578 */ 1579 pktlen = m0->m_pkthdr.len - (hdrlen & 3); 1580 1581 /* Handle encryption twiddling if needed */ 1582 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, 1583 &pktlen, &keyix)) { 1584 ath_freetx(m0); 1585 return EIO; 1586 } 1587 1588 /* packet header may have moved, reset our local pointer */ 1589 wh = mtod(m0, struct ieee80211_frame *); 1590 1591 pktlen += IEEE80211_CRC_LEN; 1592 1593 /* 1594 * Load the DMA map so any coalescing is done. This 1595 * also calculates the number of descriptors we need. 1596 */ 1597 error = ath_tx_dmasetup(sc, bf, m0); 1598 if (error != 0) 1599 return error; 1600 KASSERT((ni != NULL), ("%s: ni=NULL!", __func__)); 1601 bf->bf_node = ni; /* NB: held reference */ 1602 m0 = bf->bf_m; /* NB: may have changed */ 1603 wh = mtod(m0, struct ieee80211_frame *); 1604 1605 /* setup descriptors */ 1606 ds = bf->bf_desc; 1607 rt = sc->sc_currates; 1608 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1609 1610 /* 1611 * NB: the 802.11 layer marks whether or not we should 1612 * use short preamble based on the current mode and 1613 * negotiated parameters. 1614 */ 1615 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 1616 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 1617 shortPreamble = AH_TRUE; 1618 sc->sc_stats.ast_tx_shortpre++; 1619 } else { 1620 shortPreamble = AH_FALSE; 1621 } 1622 1623 an = ATH_NODE(ni); 1624 //flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 1625 flags = 0; 1626 ismrr = 0; /* default no multi-rate retry*/ 1627 pri = M_WME_GETAC(m0); /* honor classification */ 1628 /* XXX use txparams instead of fixed values */ 1629 /* 1630 * Calculate Atheros packet type from IEEE80211 packet header, 1631 * setup for rate calculations, and select h/w transmit queue. 1632 */ 1633 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 1634 case IEEE80211_FC0_TYPE_MGT: 1635 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1636 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 1637 atype = HAL_PKT_TYPE_BEACON; 1638 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 1639 atype = HAL_PKT_TYPE_PROBE_RESP; 1640 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 1641 atype = HAL_PKT_TYPE_ATIM; 1642 else 1643 atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 1644 rix = an->an_mgmtrix; 1645 txrate = rt->info[rix].rateCode; 1646 if (shortPreamble) 1647 txrate |= rt->info[rix].shortPreamble; 1648 try0 = ATH_TXMGTTRY; 1649 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1650 break; 1651 case IEEE80211_FC0_TYPE_CTL: 1652 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 1653 rix = an->an_mgmtrix; 1654 txrate = rt->info[rix].rateCode; 1655 if (shortPreamble) 1656 txrate |= rt->info[rix].shortPreamble; 1657 try0 = ATH_TXMGTTRY; 1658 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1659 break; 1660 case IEEE80211_FC0_TYPE_DATA: 1661 atype = HAL_PKT_TYPE_NORMAL; /* default */ 1662 /* 1663 * Data frames: multicast frames go out at a fixed rate, 1664 * EAPOL frames use the mgmt frame rate; otherwise consult 1665 * the rate control module for the rate to use. 1666 */ 1667 if (ismcast) { 1668 rix = an->an_mcastrix; 1669 txrate = rt->info[rix].rateCode; 1670 if (shortPreamble) 1671 txrate |= rt->info[rix].shortPreamble; 1672 try0 = 1; 1673 } else if (m0->m_flags & M_EAPOL) { 1674 /* XXX? maybe always use long preamble? */ 1675 rix = an->an_mgmtrix; 1676 txrate = rt->info[rix].rateCode; 1677 if (shortPreamble) 1678 txrate |= rt->info[rix].shortPreamble; 1679 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 1680 } else { 1681 /* 1682 * Do rate lookup on each TX, rather than using 1683 * the hard-coded TX information decided here. 1684 */ 1685 ismrr = 1; 1686 bf->bf_state.bfs_doratelookup = 1; 1687 } 1688 if (cap->cap_wmeParams[pri].wmep_noackPolicy) 1689 flags |= HAL_TXDESC_NOACK; 1690 break; 1691 default: 1692 device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n", 1693 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 1694 /* XXX statistic */ 1695 /* XXX free tx dmamap */ 1696 ath_freetx(m0); 1697 return EIO; 1698 } 1699 1700 /* 1701 * There are two known scenarios where the frame AC doesn't match 1702 * what the destination TXQ is. 1703 * 1704 * + non-QoS frames (eg management?) that the net80211 stack has 1705 * assigned a higher AC to, but since it's a non-QoS TID, it's 1706 * being thrown into TID 16. TID 16 gets the AC_BE queue. 1707 * It's quite possible that management frames should just be 1708 * direct dispatched to hardware rather than go via the software 1709 * queue; that should be investigated in the future. There are 1710 * some specific scenarios where this doesn't make sense, mostly 1711 * surrounding ADDBA request/response - hence why that is special 1712 * cased. 1713 * 1714 * + Multicast frames going into the VAP mcast queue. That shows up 1715 * as "TXQ 11". 1716 * 1717 * This driver should eventually support separate TID and TXQ locking, 1718 * allowing for arbitrary AC frames to appear on arbitrary software 1719 * queues, being queued to the "correct" hardware queue when needed. 1720 */ 1721 #if 0 1722 if (txq != sc->sc_ac2q[pri]) { 1723 DPRINTF(sc, ATH_DEBUG_XMIT, 1724 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n", 1725 __func__, 1726 txq, 1727 txq->axq_qnum, 1728 pri, 1729 sc->sc_ac2q[pri], 1730 sc->sc_ac2q[pri]->axq_qnum); 1731 } 1732 #endif 1733 1734 /* 1735 * Calculate miscellaneous flags. 1736 */ 1737 if (ismcast) { 1738 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 1739 } else if (pktlen > vap->iv_rtsthreshold && 1740 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { 1741 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 1742 sc->sc_stats.ast_tx_rts++; 1743 } 1744 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ 1745 sc->sc_stats.ast_tx_noack++; 1746 #ifdef IEEE80211_SUPPORT_TDMA 1747 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) { 1748 DPRINTF(sc, ATH_DEBUG_TDMA, 1749 "%s: discard frame, ACK required w/ TDMA\n", __func__); 1750 sc->sc_stats.ast_tdma_ack++; 1751 /* XXX free tx dmamap */ 1752 ath_freetx(m0); 1753 return EIO; 1754 } 1755 #endif 1756 1757 /* 1758 * Determine if a tx interrupt should be generated for 1759 * this descriptor. We take a tx interrupt to reap 1760 * descriptors when the h/w hits an EOL condition or 1761 * when the descriptor is specifically marked to generate 1762 * an interrupt. We periodically mark descriptors in this 1763 * way to insure timely replenishing of the supply needed 1764 * for sending frames. Defering interrupts reduces system 1765 * load and potentially allows more concurrent work to be 1766 * done but if done to aggressively can cause senders to 1767 * backup. 1768 * 1769 * NB: use >= to deal with sc_txintrperiod changing 1770 * dynamically through sysctl. 1771 */ 1772 if (flags & HAL_TXDESC_INTREQ) { 1773 txq->axq_intrcnt = 0; 1774 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 1775 flags |= HAL_TXDESC_INTREQ; 1776 txq->axq_intrcnt = 0; 1777 } 1778 1779 /* This point forward is actual TX bits */ 1780 1781 /* 1782 * At this point we are committed to sending the frame 1783 * and we don't need to look at m_nextpkt; clear it in 1784 * case this frame is part of frag chain. 1785 */ 1786 m0->m_nextpkt = NULL; 1787 1788 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 1789 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 1790 sc->sc_hwmap[rix].ieeerate, -1); 1791 1792 if (ieee80211_radiotap_active_vap(vap)) { 1793 u_int64_t tsf = ath_hal_gettsf64(ah); 1794 1795 sc->sc_tx_th.wt_tsf = htole64(tsf); 1796 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 1797 if (iswep) 1798 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 1799 if (isfrag) 1800 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 1801 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 1802 sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni); 1803 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 1804 1805 ieee80211_radiotap_tx(vap, m0); 1806 } 1807 1808 /* Blank the legacy rate array */ 1809 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1810 1811 /* 1812 * ath_buf_set_rate needs at least one rate/try to setup 1813 * the rate scenario. 1814 */ 1815 bf->bf_state.bfs_rc[0].rix = rix; 1816 bf->bf_state.bfs_rc[0].tries = try0; 1817 bf->bf_state.bfs_rc[0].ratecode = txrate; 1818 1819 /* Store the decided rate index values away */ 1820 bf->bf_state.bfs_pktlen = pktlen; 1821 bf->bf_state.bfs_hdrlen = hdrlen; 1822 bf->bf_state.bfs_atype = atype; 1823 bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni); 1824 bf->bf_state.bfs_txrate0 = txrate; 1825 bf->bf_state.bfs_try0 = try0; 1826 bf->bf_state.bfs_keyix = keyix; 1827 bf->bf_state.bfs_txantenna = sc->sc_txantenna; 1828 bf->bf_state.bfs_txflags = flags; 1829 bf->bf_state.bfs_shpream = shortPreamble; 1830 1831 /* XXX this should be done in ath_tx_setrate() */ 1832 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */ 1833 bf->bf_state.bfs_ctsrate = 0; /* calculated later */ 1834 bf->bf_state.bfs_ctsduration = 0; 1835 bf->bf_state.bfs_ismrr = ismrr; 1836 1837 return 0; 1838 } 1839 1840 /* 1841 * Queue a frame to the hardware or software queue. 1842 * 1843 * This can be called by the net80211 code. 1844 * 1845 * XXX what about locking? Or, push the seqno assign into the 1846 * XXX aggregate scheduler so its serialised? 1847 * 1848 * XXX When sending management frames via ath_raw_xmit(), 1849 * should CLRDMASK be set unconditionally? 1850 */ 1851 int 1852 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, 1853 struct ath_buf *bf, struct mbuf *m0) 1854 { 1855 struct ieee80211vap *vap = ni->ni_vap; 1856 struct ath_vap *avp = ATH_VAP(vap); 1857 int r = 0; 1858 u_int pri; 1859 int tid; 1860 struct ath_txq *txq; 1861 int ismcast; 1862 const struct ieee80211_frame *wh; 1863 int is_ampdu, is_ampdu_tx, is_ampdu_pending; 1864 ieee80211_seq seqno; 1865 uint8_t type, subtype; 1866 int queue_to_head; 1867 1868 ATH_TX_LOCK_ASSERT(sc); 1869 1870 /* 1871 * Determine the target hardware queue. 1872 * 1873 * For multicast frames, the txq gets overridden appropriately 1874 * depending upon the state of PS. 1875 * 1876 * For any other frame, we do a TID/QoS lookup inside the frame 1877 * to see what the TID should be. If it's a non-QoS frame, the 1878 * AC and TID are overridden. The TID/TXQ code assumes the 1879 * TID is on a predictable hardware TXQ, so we don't support 1880 * having a node TID queued to multiple hardware TXQs. 1881 * This may change in the future but would require some locking 1882 * fudgery. 1883 */ 1884 pri = ath_tx_getac(sc, m0); 1885 tid = ath_tx_gettid(sc, m0); 1886 1887 txq = sc->sc_ac2q[pri]; 1888 wh = mtod(m0, struct ieee80211_frame *); 1889 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1890 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1891 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1892 1893 /* 1894 * Enforce how deep the multicast queue can grow. 1895 * 1896 * XXX duplicated in ath_raw_xmit(). 1897 */ 1898 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1899 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 1900 > sc->sc_txq_mcastq_maxdepth) { 1901 sc->sc_stats.ast_tx_mcastq_overflow++; 1902 m_freem(m0); 1903 return (ENOBUFS); 1904 } 1905 } 1906 1907 /* 1908 * Enforce how deep the unicast queue can grow. 1909 * 1910 * If the node is in power save then we don't want 1911 * the software queue to grow too deep, or a node may 1912 * end up consuming all of the ath_buf entries. 1913 * 1914 * For now, only do this for DATA frames. 1915 * 1916 * We will want to cap how many management/control 1917 * frames get punted to the software queue so it doesn't 1918 * fill up. But the correct solution isn't yet obvious. 1919 * In any case, this check should at least let frames pass 1920 * that we are direct-dispatching. 1921 * 1922 * XXX TODO: duplicate this to the raw xmit path! 1923 */ 1924 if (type == IEEE80211_FC0_TYPE_DATA && 1925 ATH_NODE(ni)->an_is_powersave && 1926 ATH_NODE(ni)->an_swq_depth > 1927 sc->sc_txq_node_psq_maxdepth) { 1928 sc->sc_stats.ast_tx_node_psq_overflow++; 1929 m_freem(m0); 1930 return (ENOBUFS); 1931 } 1932 1933 /* A-MPDU TX */ 1934 is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid); 1935 is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid); 1936 is_ampdu = is_ampdu_tx | is_ampdu_pending; 1937 1938 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n", 1939 __func__, tid, pri, is_ampdu); 1940 1941 /* Set local packet state, used to queue packets to hardware */ 1942 bf->bf_state.bfs_tid = tid; 1943 bf->bf_state.bfs_tx_queue = txq->axq_qnum; 1944 bf->bf_state.bfs_pri = pri; 1945 1946 #if 1 1947 /* 1948 * When servicing one or more stations in power-save mode 1949 * (or) if there is some mcast data waiting on the mcast 1950 * queue (to prevent out of order delivery) multicast frames 1951 * must be bufferd until after the beacon. 1952 * 1953 * TODO: we should lock the mcastq before we check the length. 1954 */ 1955 if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { 1956 txq = &avp->av_mcastq; 1957 /* 1958 * Mark the frame as eventually belonging on the CAB 1959 * queue, so the descriptor setup functions will 1960 * correctly initialise the descriptor 'qcuId' field. 1961 */ 1962 bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum; 1963 } 1964 #endif 1965 1966 /* Do the generic frame setup */ 1967 /* XXX should just bzero the bf_state? */ 1968 bf->bf_state.bfs_dobaw = 0; 1969 1970 /* A-MPDU TX? Manually set sequence number */ 1971 /* 1972 * Don't do it whilst pending; the net80211 layer still 1973 * assigns them. 1974 */ 1975 if (is_ampdu_tx) { 1976 /* 1977 * Always call; this function will 1978 * handle making sure that null data frames 1979 * don't get a sequence number from the current 1980 * TID and thus mess with the BAW. 1981 */ 1982 seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0); 1983 1984 /* 1985 * Don't add QoS NULL frames to the BAW. 1986 */ 1987 if (IEEE80211_QOS_HAS_SEQ(wh) && 1988 subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) { 1989 bf->bf_state.bfs_dobaw = 1; 1990 } 1991 } 1992 1993 /* 1994 * If needed, the sequence number has been assigned. 1995 * Squirrel it away somewhere easy to get to. 1996 */ 1997 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT; 1998 1999 /* Is ampdu pending? fetch the seqno and print it out */ 2000 if (is_ampdu_pending) 2001 DPRINTF(sc, ATH_DEBUG_SW_TX, 2002 "%s: tid %d: ampdu pending, seqno %d\n", 2003 __func__, tid, M_SEQNO_GET(m0)); 2004 2005 /* This also sets up the DMA map */ 2006 r = ath_tx_normal_setup(sc, ni, bf, m0, txq); 2007 2008 if (r != 0) 2009 goto done; 2010 2011 /* At this point m0 could have changed! */ 2012 m0 = bf->bf_m; 2013 2014 #if 1 2015 /* 2016 * If it's a multicast frame, do a direct-dispatch to the 2017 * destination hardware queue. Don't bother software 2018 * queuing it. 2019 */ 2020 /* 2021 * If it's a BAR frame, do a direct dispatch to the 2022 * destination hardware queue. Don't bother software 2023 * queuing it, as the TID will now be paused. 2024 * Sending a BAR frame can occur from the net80211 txa timer 2025 * (ie, retries) or from the ath txtask (completion call.) 2026 * It queues directly to hardware because the TID is paused 2027 * at this point (and won't be unpaused until the BAR has 2028 * either been TXed successfully or max retries has been 2029 * reached.) 2030 */ 2031 /* 2032 * Until things are better debugged - if this node is asleep 2033 * and we're sending it a non-BAR frame, direct dispatch it. 2034 * Why? Because we need to figure out what's actually being 2035 * sent - eg, during reassociation/reauthentication after 2036 * the node (last) disappeared whilst asleep, the driver should 2037 * have unpaused/unsleep'ed the node. So until that is 2038 * sorted out, use this workaround. 2039 */ 2040 if (txq == &avp->av_mcastq) { 2041 DPRINTF(sc, ATH_DEBUG_SW_TX, 2042 "%s: bf=%p: mcastq: TX'ing\n", __func__, bf); 2043 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2044 ath_tx_xmit_normal(sc, txq, bf); 2045 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, 2046 &queue_to_head)) { 2047 ath_tx_swq(sc, ni, txq, queue_to_head, bf); 2048 } else { 2049 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2050 ath_tx_xmit_normal(sc, txq, bf); 2051 } 2052 #else 2053 /* 2054 * For now, since there's no software queue, 2055 * direct-dispatch to the hardware. 2056 */ 2057 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2058 /* 2059 * Update the current leak count if 2060 * we're leaking frames; and set the 2061 * MORE flag as appropriate. 2062 */ 2063 ath_tx_leak_count_update(sc, tid, bf); 2064 ath_tx_xmit_normal(sc, txq, bf); 2065 #endif 2066 done: 2067 return 0; 2068 } 2069 2070 static int 2071 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, 2072 struct ath_buf *bf, struct mbuf *m0, 2073 const struct ieee80211_bpf_params *params) 2074 { 2075 struct ieee80211com *ic = &sc->sc_ic; 2076 struct ath_hal *ah = sc->sc_ah; 2077 struct ieee80211vap *vap = ni->ni_vap; 2078 int error, ismcast, ismrr; 2079 int keyix, hdrlen, pktlen, try0, txantenna; 2080 u_int8_t rix, txrate; 2081 struct ieee80211_frame *wh; 2082 u_int flags; 2083 HAL_PKT_TYPE atype; 2084 const HAL_RATE_TABLE *rt; 2085 struct ath_desc *ds; 2086 u_int pri; 2087 int o_tid = -1; 2088 int do_override; 2089 uint8_t type, subtype; 2090 int queue_to_head; 2091 struct ath_node *an = ATH_NODE(ni); 2092 2093 ATH_TX_LOCK_ASSERT(sc); 2094 2095 wh = mtod(m0, struct ieee80211_frame *); 2096 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2097 hdrlen = ieee80211_anyhdrsize(wh); 2098 /* 2099 * Packet length must not include any 2100 * pad bytes; deduct them here. 2101 */ 2102 /* XXX honor IEEE80211_BPF_DATAPAD */ 2103 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; 2104 2105 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2106 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2107 2108 ATH_KTR(sc, ATH_KTR_TX, 2, 2109 "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf); 2110 2111 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n", 2112 __func__, ismcast); 2113 2114 pri = params->ibp_pri & 3; 2115 /* Override pri if the frame isn't a QoS one */ 2116 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2117 pri = ath_tx_getac(sc, m0); 2118 2119 /* XXX If it's an ADDBA, override the correct queue */ 2120 do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid); 2121 2122 /* Map ADDBA to the correct priority */ 2123 if (do_override) { 2124 #if 0 2125 DPRINTF(sc, ATH_DEBUG_XMIT, 2126 "%s: overriding tid %d pri %d -> %d\n", 2127 __func__, o_tid, pri, TID_TO_WME_AC(o_tid)); 2128 #endif 2129 pri = TID_TO_WME_AC(o_tid); 2130 } 2131 2132 /* Handle encryption twiddling if needed */ 2133 if (! ath_tx_tag_crypto(sc, ni, 2134 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, 2135 &hdrlen, &pktlen, &keyix)) { 2136 ath_freetx(m0); 2137 return EIO; 2138 } 2139 /* packet header may have moved, reset our local pointer */ 2140 wh = mtod(m0, struct ieee80211_frame *); 2141 2142 /* Do the generic frame setup */ 2143 /* XXX should just bzero the bf_state? */ 2144 bf->bf_state.bfs_dobaw = 0; 2145 2146 error = ath_tx_dmasetup(sc, bf, m0); 2147 if (error != 0) 2148 return error; 2149 m0 = bf->bf_m; /* NB: may have changed */ 2150 wh = mtod(m0, struct ieee80211_frame *); 2151 KASSERT((ni != NULL), ("%s: ni=NULL!", __func__)); 2152 bf->bf_node = ni; /* NB: held reference */ 2153 2154 /* Always enable CLRDMASK for raw frames for now.. */ 2155 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 2156 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 2157 if (params->ibp_flags & IEEE80211_BPF_RTS) 2158 flags |= HAL_TXDESC_RTSENA; 2159 else if (params->ibp_flags & IEEE80211_BPF_CTS) { 2160 /* XXX assume 11g/11n protection? */ 2161 bf->bf_state.bfs_doprot = 1; 2162 flags |= HAL_TXDESC_CTSENA; 2163 } 2164 /* XXX leave ismcast to injector? */ 2165 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) 2166 flags |= HAL_TXDESC_NOACK; 2167 2168 rt = sc->sc_currates; 2169 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 2170 2171 /* Fetch first rate information */ 2172 rix = ath_tx_findrix(sc, params->ibp_rate0); 2173 try0 = params->ibp_try0; 2174 2175 /* 2176 * Override EAPOL rate as appropriate. 2177 */ 2178 if (m0->m_flags & M_EAPOL) { 2179 /* XXX? maybe always use long preamble? */ 2180 rix = an->an_mgmtrix; 2181 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 2182 } 2183 2184 txrate = rt->info[rix].rateCode; 2185 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 2186 txrate |= rt->info[rix].shortPreamble; 2187 sc->sc_txrix = rix; 2188 ismrr = (params->ibp_try1 != 0); 2189 txantenna = params->ibp_pri >> 2; 2190 if (txantenna == 0) /* XXX? */ 2191 txantenna = sc->sc_txantenna; 2192 2193 /* 2194 * Since ctsrate is fixed, store it away for later 2195 * use when the descriptor fields are being set. 2196 */ 2197 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) 2198 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate; 2199 2200 /* 2201 * NB: we mark all packets as type PSPOLL so the h/w won't 2202 * set the sequence number, duration, etc. 2203 */ 2204 atype = HAL_PKT_TYPE_PSPOLL; 2205 2206 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 2207 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, 2208 sc->sc_hwmap[rix].ieeerate, -1); 2209 2210 if (ieee80211_radiotap_active_vap(vap)) { 2211 u_int64_t tsf = ath_hal_gettsf64(ah); 2212 2213 sc->sc_tx_th.wt_tsf = htole64(tsf); 2214 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 2215 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) 2216 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2217 if (m0->m_flags & M_FRAG) 2218 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 2219 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 2220 sc->sc_tx_th.wt_txpower = MIN(params->ibp_power, 2221 ieee80211_get_node_txpower(ni)); 2222 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 2223 2224 ieee80211_radiotap_tx(vap, m0); 2225 } 2226 2227 /* 2228 * Formulate first tx descriptor with tx controls. 2229 */ 2230 ds = bf->bf_desc; 2231 /* XXX check return value? */ 2232 2233 /* Store the decided rate index values away */ 2234 bf->bf_state.bfs_pktlen = pktlen; 2235 bf->bf_state.bfs_hdrlen = hdrlen; 2236 bf->bf_state.bfs_atype = atype; 2237 bf->bf_state.bfs_txpower = MIN(params->ibp_power, 2238 ieee80211_get_node_txpower(ni)); 2239 bf->bf_state.bfs_txrate0 = txrate; 2240 bf->bf_state.bfs_try0 = try0; 2241 bf->bf_state.bfs_keyix = keyix; 2242 bf->bf_state.bfs_txantenna = txantenna; 2243 bf->bf_state.bfs_txflags = flags; 2244 bf->bf_state.bfs_shpream = 2245 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE); 2246 2247 /* Set local packet state, used to queue packets to hardware */ 2248 bf->bf_state.bfs_tid = WME_AC_TO_TID(pri); 2249 bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum; 2250 bf->bf_state.bfs_pri = pri; 2251 2252 /* XXX this should be done in ath_tx_setrate() */ 2253 bf->bf_state.bfs_ctsrate = 0; 2254 bf->bf_state.bfs_ctsduration = 0; 2255 bf->bf_state.bfs_ismrr = ismrr; 2256 2257 /* Blank the legacy rate array */ 2258 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 2259 2260 bf->bf_state.bfs_rc[0].rix = rix; 2261 bf->bf_state.bfs_rc[0].tries = try0; 2262 bf->bf_state.bfs_rc[0].ratecode = txrate; 2263 2264 if (ismrr) { 2265 int rix; 2266 2267 rix = ath_tx_findrix(sc, params->ibp_rate1); 2268 bf->bf_state.bfs_rc[1].rix = rix; 2269 bf->bf_state.bfs_rc[1].tries = params->ibp_try1; 2270 2271 rix = ath_tx_findrix(sc, params->ibp_rate2); 2272 bf->bf_state.bfs_rc[2].rix = rix; 2273 bf->bf_state.bfs_rc[2].tries = params->ibp_try2; 2274 2275 rix = ath_tx_findrix(sc, params->ibp_rate3); 2276 bf->bf_state.bfs_rc[3].rix = rix; 2277 bf->bf_state.bfs_rc[3].tries = params->ibp_try3; 2278 } 2279 /* 2280 * All the required rate control decisions have been made; 2281 * fill in the rc flags. 2282 */ 2283 ath_tx_rate_fill_rcflags(sc, bf); 2284 2285 /* NB: no buffered multicast in power save support */ 2286 2287 /* 2288 * If we're overiding the ADDBA destination, dump directly 2289 * into the hardware queue, right after any pending 2290 * frames to that node are. 2291 */ 2292 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n", 2293 __func__, do_override); 2294 2295 #if 1 2296 /* 2297 * Put addba frames in the right place in the right TID/HWQ. 2298 */ 2299 if (do_override) { 2300 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2301 /* 2302 * XXX if it's addba frames, should we be leaking 2303 * them out via the frame leak method? 2304 * XXX for now let's not risk it; but we may wish 2305 * to investigate this later. 2306 */ 2307 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2308 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, 2309 &queue_to_head)) { 2310 /* Queue to software queue */ 2311 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf); 2312 } else { 2313 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2314 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2315 } 2316 #else 2317 /* Direct-dispatch to the hardware */ 2318 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2319 /* 2320 * Update the current leak count if 2321 * we're leaking frames; and set the 2322 * MORE flag as appropriate. 2323 */ 2324 ath_tx_leak_count_update(sc, tid, bf); 2325 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2326 #endif 2327 return 0; 2328 } 2329 2330 /* 2331 * Send a raw frame. 2332 * 2333 * This can be called by net80211. 2334 */ 2335 int 2336 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 2337 const struct ieee80211_bpf_params *params) 2338 { 2339 struct ieee80211com *ic = ni->ni_ic; 2340 struct ath_softc *sc = ic->ic_softc; 2341 struct ath_buf *bf; 2342 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 2343 int error = 0; 2344 2345 ATH_PCU_LOCK(sc); 2346 if (sc->sc_inreset_cnt > 0) { 2347 DPRINTF(sc, ATH_DEBUG_XMIT, 2348 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2349 error = EIO; 2350 ATH_PCU_UNLOCK(sc); 2351 goto badbad; 2352 } 2353 sc->sc_txstart_cnt++; 2354 ATH_PCU_UNLOCK(sc); 2355 2356 /* Wake the hardware up already */ 2357 ATH_LOCK(sc); 2358 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2359 ATH_UNLOCK(sc); 2360 2361 ATH_TX_LOCK(sc); 2362 2363 if (!sc->sc_running || sc->sc_invalid) { 2364 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, r/i: %d/%d", 2365 __func__, sc->sc_running, sc->sc_invalid); 2366 m_freem(m); 2367 error = ENETDOWN; 2368 goto bad; 2369 } 2370 2371 /* 2372 * Enforce how deep the multicast queue can grow. 2373 * 2374 * XXX duplicated in ath_tx_start(). 2375 */ 2376 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2377 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 2378 > sc->sc_txq_mcastq_maxdepth) { 2379 sc->sc_stats.ast_tx_mcastq_overflow++; 2380 error = ENOBUFS; 2381 } 2382 2383 if (error != 0) { 2384 m_freem(m); 2385 goto bad; 2386 } 2387 } 2388 2389 /* 2390 * Grab a TX buffer and associated resources. 2391 */ 2392 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); 2393 if (bf == NULL) { 2394 sc->sc_stats.ast_tx_nobuf++; 2395 m_freem(m); 2396 error = ENOBUFS; 2397 goto bad; 2398 } 2399 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n", 2400 m, params, bf); 2401 2402 if (params == NULL) { 2403 /* 2404 * Legacy path; interpret frame contents to decide 2405 * precisely how to send the frame. 2406 */ 2407 if (ath_tx_start(sc, ni, bf, m)) { 2408 error = EIO; /* XXX */ 2409 goto bad2; 2410 } 2411 } else { 2412 /* 2413 * Caller supplied explicit parameters to use in 2414 * sending the frame. 2415 */ 2416 if (ath_tx_raw_start(sc, ni, bf, m, params)) { 2417 error = EIO; /* XXX */ 2418 goto bad2; 2419 } 2420 } 2421 sc->sc_wd_timer = 5; 2422 sc->sc_stats.ast_tx_raw++; 2423 2424 /* 2425 * Update the TIM - if there's anything queued to the 2426 * software queue and power save is enabled, we should 2427 * set the TIM. 2428 */ 2429 ath_tx_update_tim(sc, ni, 1); 2430 2431 ATH_TX_UNLOCK(sc); 2432 2433 ATH_PCU_LOCK(sc); 2434 sc->sc_txstart_cnt--; 2435 ATH_PCU_UNLOCK(sc); 2436 2437 2438 /* Put the hardware back to sleep if required */ 2439 ATH_LOCK(sc); 2440 ath_power_restore_power_state(sc); 2441 ATH_UNLOCK(sc); 2442 2443 return 0; 2444 2445 bad2: 2446 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, " 2447 "bf=%p", 2448 m, 2449 params, 2450 bf); 2451 ATH_TXBUF_LOCK(sc); 2452 ath_returnbuf_head(sc, bf); 2453 ATH_TXBUF_UNLOCK(sc); 2454 2455 bad: 2456 ATH_TX_UNLOCK(sc); 2457 2458 ATH_PCU_LOCK(sc); 2459 sc->sc_txstart_cnt--; 2460 ATH_PCU_UNLOCK(sc); 2461 2462 /* Put the hardware back to sleep if required */ 2463 ATH_LOCK(sc); 2464 ath_power_restore_power_state(sc); 2465 ATH_UNLOCK(sc); 2466 2467 badbad: 2468 ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p", 2469 m, params); 2470 sc->sc_stats.ast_tx_raw_fail++; 2471 ieee80211_free_node(ni); 2472 2473 return error; 2474 } 2475 2476 /* Some helper functions */ 2477 2478 /* 2479 * ADDBA (and potentially others) need to be placed in the same 2480 * hardware queue as the TID/node it's relating to. This is so 2481 * it goes out after any pending non-aggregate frames to the 2482 * same node/TID. 2483 * 2484 * If this isn't done, the ADDBA can go out before the frames 2485 * queued in hardware. Even though these frames have a sequence 2486 * number -earlier- than the ADDBA can be transmitted (but 2487 * no frames whose sequence numbers are after the ADDBA should 2488 * be!) they'll arrive after the ADDBA - and the receiving end 2489 * will simply drop them as being out of the BAW. 2490 * 2491 * The frames can't be appended to the TID software queue - it'll 2492 * never be sent out. So these frames have to be directly 2493 * dispatched to the hardware, rather than queued in software. 2494 * So if this function returns true, the TXQ has to be 2495 * overridden and it has to be directly dispatched. 2496 * 2497 * It's a dirty hack, but someone's gotta do it. 2498 */ 2499 2500 /* 2501 * XXX doesn't belong here! 2502 */ 2503 static int 2504 ieee80211_is_action(struct ieee80211_frame *wh) 2505 { 2506 /* Type: Management frame? */ 2507 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 2508 IEEE80211_FC0_TYPE_MGT) 2509 return 0; 2510 2511 /* Subtype: Action frame? */ 2512 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) != 2513 IEEE80211_FC0_SUBTYPE_ACTION) 2514 return 0; 2515 2516 return 1; 2517 } 2518 2519 #define MS(_v, _f) (((_v) & _f) >> _f##_S) 2520 /* 2521 * Return an alternate TID for ADDBA request frames. 2522 * 2523 * Yes, this likely should be done in the net80211 layer. 2524 */ 2525 static int 2526 ath_tx_action_frame_override_queue(struct ath_softc *sc, 2527 struct ieee80211_node *ni, 2528 struct mbuf *m0, int *tid) 2529 { 2530 struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *); 2531 struct ieee80211_action_ba_addbarequest *ia; 2532 uint8_t *frm; 2533 uint16_t baparamset; 2534 2535 /* Not action frame? Bail */ 2536 if (! ieee80211_is_action(wh)) 2537 return 0; 2538 2539 /* XXX Not needed for frames we send? */ 2540 #if 0 2541 /* Correct length? */ 2542 if (! ieee80211_parse_action(ni, m)) 2543 return 0; 2544 #endif 2545 2546 /* Extract out action frame */ 2547 frm = (u_int8_t *)&wh[1]; 2548 ia = (struct ieee80211_action_ba_addbarequest *) frm; 2549 2550 /* Not ADDBA? Bail */ 2551 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA) 2552 return 0; 2553 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST) 2554 return 0; 2555 2556 /* Extract TID, return it */ 2557 baparamset = le16toh(ia->rq_baparamset); 2558 *tid = (int) MS(baparamset, IEEE80211_BAPS_TID); 2559 2560 return 1; 2561 } 2562 #undef MS 2563 2564 /* Per-node software queue operations */ 2565 2566 /* 2567 * Add the current packet to the given BAW. 2568 * It is assumed that the current packet 2569 * 2570 * + fits inside the BAW; 2571 * + already has had a sequence number allocated. 2572 * 2573 * Since the BAW status may be modified by both the ath task and 2574 * the net80211/ifnet contexts, the TID must be locked. 2575 */ 2576 void 2577 ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an, 2578 struct ath_tid *tid, struct ath_buf *bf) 2579 { 2580 int index, cindex; 2581 struct ieee80211_tx_ampdu *tap; 2582 2583 ATH_TX_LOCK_ASSERT(sc); 2584 2585 if (bf->bf_state.bfs_isretried) 2586 return; 2587 2588 tap = ath_tx_get_tx_tid(an, tid->tid); 2589 2590 if (! bf->bf_state.bfs_dobaw) { 2591 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2592 "%s: dobaw=0, seqno=%d, window %d:%d\n", 2593 __func__, SEQNO(bf->bf_state.bfs_seqno), 2594 tap->txa_start, tap->txa_wnd); 2595 } 2596 2597 if (bf->bf_state.bfs_addedbaw) 2598 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2599 "%s: re-added? tid=%d, seqno %d; window %d:%d; " 2600 "baw head=%d tail=%d\n", 2601 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2602 tap->txa_start, tap->txa_wnd, tid->baw_head, 2603 tid->baw_tail); 2604 2605 /* 2606 * Verify that the given sequence number is not outside of the 2607 * BAW. Complain loudly if that's the case. 2608 */ 2609 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2610 SEQNO(bf->bf_state.bfs_seqno))) { 2611 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2612 "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; " 2613 "baw head=%d tail=%d\n", 2614 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2615 tap->txa_start, tap->txa_wnd, tid->baw_head, 2616 tid->baw_tail); 2617 } 2618 2619 /* 2620 * ni->ni_txseqs[] is the currently allocated seqno. 2621 * the txa state contains the current baw start. 2622 */ 2623 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno)); 2624 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2625 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2626 "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d " 2627 "baw head=%d tail=%d\n", 2628 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2629 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, 2630 tid->baw_tail); 2631 2632 2633 #if 0 2634 assert(tid->tx_buf[cindex] == NULL); 2635 #endif 2636 if (tid->tx_buf[cindex] != NULL) { 2637 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2638 "%s: ba packet dup (index=%d, cindex=%d, " 2639 "head=%d, tail=%d)\n", 2640 __func__, index, cindex, tid->baw_head, tid->baw_tail); 2641 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2642 "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n", 2643 __func__, 2644 tid->tx_buf[cindex], 2645 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno), 2646 bf, 2647 SEQNO(bf->bf_state.bfs_seqno) 2648 ); 2649 } 2650 tid->tx_buf[cindex] = bf; 2651 2652 if (index >= ((tid->baw_tail - tid->baw_head) & 2653 (ATH_TID_MAX_BUFS - 1))) { 2654 tid->baw_tail = cindex; 2655 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 2656 } 2657 } 2658 2659 /* 2660 * Flip the BAW buffer entry over from the existing one to the new one. 2661 * 2662 * When software retransmitting a (sub-)frame, it is entirely possible that 2663 * the frame ath_buf is marked as BUSY and can't be immediately reused. 2664 * In that instance the buffer is cloned and the new buffer is used for 2665 * retransmit. We thus need to update the ath_buf slot in the BAW buf 2666 * tracking array to maintain consistency. 2667 */ 2668 static void 2669 ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an, 2670 struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf) 2671 { 2672 int index, cindex; 2673 struct ieee80211_tx_ampdu *tap; 2674 int seqno = SEQNO(old_bf->bf_state.bfs_seqno); 2675 2676 ATH_TX_LOCK_ASSERT(sc); 2677 2678 tap = ath_tx_get_tx_tid(an, tid->tid); 2679 index = ATH_BA_INDEX(tap->txa_start, seqno); 2680 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2681 2682 /* 2683 * Just warn for now; if it happens then we should find out 2684 * about it. It's highly likely the aggregation session will 2685 * soon hang. 2686 */ 2687 if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) { 2688 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2689 "%s: retransmitted buffer" 2690 " has mismatching seqno's, BA session may hang.\n", 2691 __func__); 2692 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2693 "%s: old seqno=%d, new_seqno=%d\n", __func__, 2694 old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno); 2695 } 2696 2697 if (tid->tx_buf[cindex] != old_bf) { 2698 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2699 "%s: ath_buf pointer incorrect; " 2700 " has m BA session may hang.\n", __func__); 2701 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2702 "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf); 2703 } 2704 2705 tid->tx_buf[cindex] = new_bf; 2706 } 2707 2708 /* 2709 * seq_start - left edge of BAW 2710 * seq_next - current/next sequence number to allocate 2711 * 2712 * Since the BAW status may be modified by both the ath task and 2713 * the net80211/ifnet contexts, the TID must be locked. 2714 */ 2715 static void 2716 ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an, 2717 struct ath_tid *tid, const struct ath_buf *bf) 2718 { 2719 int index, cindex; 2720 struct ieee80211_tx_ampdu *tap; 2721 int seqno = SEQNO(bf->bf_state.bfs_seqno); 2722 2723 ATH_TX_LOCK_ASSERT(sc); 2724 2725 tap = ath_tx_get_tx_tid(an, tid->tid); 2726 index = ATH_BA_INDEX(tap->txa_start, seqno); 2727 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2728 2729 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2730 "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, " 2731 "baw head=%d, tail=%d\n", 2732 __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index, 2733 cindex, tid->baw_head, tid->baw_tail); 2734 2735 /* 2736 * If this occurs then we have a big problem - something else 2737 * has slid tap->txa_start along without updating the BAW 2738 * tracking start/end pointers. Thus the TX BAW state is now 2739 * completely busted. 2740 * 2741 * But for now, since I haven't yet fixed TDMA and buffer cloning, 2742 * it's quite possible that a cloned buffer is making its way 2743 * here and causing it to fire off. Disable TDMA for now. 2744 */ 2745 if (tid->tx_buf[cindex] != bf) { 2746 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2747 "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n", 2748 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 2749 tid->tx_buf[cindex], 2750 (tid->tx_buf[cindex] != NULL) ? 2751 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1); 2752 } 2753 2754 tid->tx_buf[cindex] = NULL; 2755 2756 while (tid->baw_head != tid->baw_tail && 2757 !tid->tx_buf[tid->baw_head]) { 2758 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 2759 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 2760 } 2761 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2762 "%s: tid=%d: baw is now %d:%d, baw head=%d\n", 2763 __func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head); 2764 } 2765 2766 static void 2767 ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid, 2768 struct ath_buf *bf) 2769 { 2770 struct ieee80211_frame *wh; 2771 2772 ATH_TX_LOCK_ASSERT(sc); 2773 2774 if (tid->an->an_leak_count > 0) { 2775 wh = mtod(bf->bf_m, struct ieee80211_frame *); 2776 2777 /* 2778 * Update MORE based on the software/net80211 queue states. 2779 */ 2780 if ((tid->an->an_stack_psq > 0) 2781 || (tid->an->an_swq_depth > 0)) 2782 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 2783 else 2784 wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA; 2785 2786 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 2787 "%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n", 2788 __func__, 2789 tid->an->an_node.ni_macaddr, 2790 ":", 2791 tid->an->an_leak_count, 2792 tid->an->an_stack_psq, 2793 tid->an->an_swq_depth, 2794 !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA)); 2795 2796 /* 2797 * Re-sync the underlying buffer. 2798 */ 2799 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 2800 BUS_DMASYNC_PREWRITE); 2801 2802 tid->an->an_leak_count --; 2803 } 2804 } 2805 2806 static int 2807 ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid) 2808 { 2809 2810 ATH_TX_LOCK_ASSERT(sc); 2811 2812 if (tid->an->an_leak_count > 0) { 2813 return (1); 2814 } 2815 if (tid->paused) 2816 return (0); 2817 return (1); 2818 } 2819 2820 /* 2821 * Mark the current node/TID as ready to TX. 2822 * 2823 * This is done to make it easy for the software scheduler to 2824 * find which nodes have data to send. 2825 * 2826 * The TXQ lock must be held. 2827 */ 2828 void 2829 ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid) 2830 { 2831 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2832 2833 ATH_TX_LOCK_ASSERT(sc); 2834 2835 /* 2836 * If we are leaking out a frame to this destination 2837 * for PS-POLL, ensure that we allow scheduling to 2838 * occur. 2839 */ 2840 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 2841 return; /* paused, can't schedule yet */ 2842 2843 if (tid->sched) 2844 return; /* already scheduled */ 2845 2846 tid->sched = 1; 2847 2848 #if 0 2849 /* 2850 * If this is a sleeping node we're leaking to, given 2851 * it a higher priority. This is so bad for QoS it hurts. 2852 */ 2853 if (tid->an->an_leak_count) { 2854 TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem); 2855 } else { 2856 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2857 } 2858 #endif 2859 2860 /* 2861 * We can't do the above - it'll confuse the TXQ software 2862 * scheduler which will keep checking the _head_ TID 2863 * in the list to see if it has traffic. If we queue 2864 * a TID to the head of the list and it doesn't transmit, 2865 * we'll check it again. 2866 * 2867 * So, get the rest of this leaking frames support working 2868 * and reliable first and _then_ optimise it so they're 2869 * pushed out in front of any other pending software 2870 * queued nodes. 2871 */ 2872 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2873 } 2874 2875 /* 2876 * Mark the current node as no longer needing to be polled for 2877 * TX packets. 2878 * 2879 * The TXQ lock must be held. 2880 */ 2881 static void 2882 ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid) 2883 { 2884 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2885 2886 ATH_TX_LOCK_ASSERT(sc); 2887 2888 if (tid->sched == 0) 2889 return; 2890 2891 tid->sched = 0; 2892 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem); 2893 } 2894 2895 /* 2896 * Assign a sequence number manually to the given frame. 2897 * 2898 * This should only be called for A-MPDU TX frames. 2899 */ 2900 static ieee80211_seq 2901 ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, 2902 struct ath_buf *bf, struct mbuf *m0) 2903 { 2904 struct ieee80211_frame *wh; 2905 int tid, pri; 2906 ieee80211_seq seqno; 2907 uint8_t subtype; 2908 2909 /* TID lookup */ 2910 wh = mtod(m0, struct ieee80211_frame *); 2911 pri = M_WME_GETAC(m0); /* honor classification */ 2912 tid = WME_AC_TO_TID(pri); 2913 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n", 2914 __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 2915 2916 /* XXX Is it a control frame? Ignore */ 2917 2918 /* Does the packet require a sequence number? */ 2919 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2920 return -1; 2921 2922 ATH_TX_LOCK_ASSERT(sc); 2923 2924 /* 2925 * Is it a QOS NULL Data frame? Give it a sequence number from 2926 * the default TID (IEEE80211_NONQOS_TID.) 2927 * 2928 * The RX path of everything I've looked at doesn't include the NULL 2929 * data frame sequence number in the aggregation state updates, so 2930 * assigning it a sequence number there will cause a BAW hole on the 2931 * RX side. 2932 */ 2933 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2934 if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) { 2935 /* XXX no locking for this TID? This is a bit of a problem. */ 2936 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; 2937 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); 2938 } else { 2939 /* Manually assign sequence number */ 2940 seqno = ni->ni_txseqs[tid]; 2941 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE); 2942 } 2943 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 2944 M_SEQNO_SET(m0, seqno); 2945 2946 /* Return so caller can do something with it if needed */ 2947 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: -> seqno=%d\n", __func__, seqno); 2948 return seqno; 2949 } 2950 2951 /* 2952 * Attempt to direct dispatch an aggregate frame to hardware. 2953 * If the frame is out of BAW, queue. 2954 * Otherwise, schedule it as a single frame. 2955 */ 2956 static void 2957 ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an, 2958 struct ath_txq *txq, struct ath_buf *bf) 2959 { 2960 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 2961 struct ieee80211_tx_ampdu *tap; 2962 2963 ATH_TX_LOCK_ASSERT(sc); 2964 2965 tap = ath_tx_get_tx_tid(an, tid->tid); 2966 2967 /* paused? queue */ 2968 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { 2969 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 2970 /* XXX don't sched - we're paused! */ 2971 return; 2972 } 2973 2974 /* outside baw? queue */ 2975 if (bf->bf_state.bfs_dobaw && 2976 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2977 SEQNO(bf->bf_state.bfs_seqno)))) { 2978 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 2979 ath_tx_tid_sched(sc, tid); 2980 return; 2981 } 2982 2983 /* 2984 * This is a temporary check and should be removed once 2985 * all the relevant code paths have been fixed. 2986 * 2987 * During aggregate retries, it's possible that the head 2988 * frame will fail (which has the bfs_aggr and bfs_nframes 2989 * fields set for said aggregate) and will be retried as 2990 * a single frame. In this instance, the values should 2991 * be reset or the completion code will get upset with you. 2992 */ 2993 if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) { 2994 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 2995 "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__, 2996 bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes); 2997 bf->bf_state.bfs_aggr = 0; 2998 bf->bf_state.bfs_nframes = 1; 2999 } 3000 3001 /* Update CLRDMASK just before this frame is queued */ 3002 ath_tx_update_clrdmask(sc, tid, bf); 3003 3004 /* Direct dispatch to hardware */ 3005 ath_tx_do_ratelookup(sc, bf); 3006 ath_tx_calc_duration(sc, bf); 3007 ath_tx_calc_protection(sc, bf); 3008 ath_tx_set_rtscts(sc, bf); 3009 ath_tx_rate_fill_rcflags(sc, bf); 3010 ath_tx_setds(sc, bf); 3011 3012 /* Statistics */ 3013 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++; 3014 3015 /* Track per-TID hardware queue depth correctly */ 3016 tid->hwq_depth++; 3017 3018 /* Add to BAW */ 3019 if (bf->bf_state.bfs_dobaw) { 3020 ath_tx_addto_baw(sc, an, tid, bf); 3021 bf->bf_state.bfs_addedbaw = 1; 3022 } 3023 3024 /* Set completion handler, multi-frame aggregate or not */ 3025 bf->bf_comp = ath_tx_aggr_comp; 3026 3027 /* 3028 * Update the current leak count if 3029 * we're leaking frames; and set the 3030 * MORE flag as appropriate. 3031 */ 3032 ath_tx_leak_count_update(sc, tid, bf); 3033 3034 /* Hand off to hardware */ 3035 ath_tx_handoff(sc, txq, bf); 3036 } 3037 3038 /* 3039 * Attempt to send the packet. 3040 * If the queue isn't busy, direct-dispatch. 3041 * If the queue is busy enough, queue the given packet on the 3042 * relevant software queue. 3043 */ 3044 void 3045 ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, 3046 struct ath_txq *txq, int queue_to_head, struct ath_buf *bf) 3047 { 3048 struct ath_node *an = ATH_NODE(ni); 3049 struct ieee80211_frame *wh; 3050 struct ath_tid *atid; 3051 int pri, tid; 3052 struct mbuf *m0 = bf->bf_m; 3053 3054 ATH_TX_LOCK_ASSERT(sc); 3055 3056 /* Fetch the TID - non-QoS frames get assigned to TID 16 */ 3057 wh = mtod(m0, struct ieee80211_frame *); 3058 pri = ath_tx_getac(sc, m0); 3059 tid = ath_tx_gettid(sc, m0); 3060 atid = &an->an_tid[tid]; 3061 3062 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n", 3063 __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 3064 3065 /* Set local packet state, used to queue packets to hardware */ 3066 /* XXX potentially duplicate info, re-check */ 3067 bf->bf_state.bfs_tid = tid; 3068 bf->bf_state.bfs_tx_queue = txq->axq_qnum; 3069 bf->bf_state.bfs_pri = pri; 3070 3071 /* 3072 * If the hardware queue isn't busy, queue it directly. 3073 * If the hardware queue is busy, queue it. 3074 * If the TID is paused or the traffic it outside BAW, software 3075 * queue it. 3076 * 3077 * If the node is in power-save and we're leaking a frame, 3078 * leak a single frame. 3079 */ 3080 if (! ath_tx_tid_can_tx_or_sched(sc, atid)) { 3081 /* TID is paused, queue */ 3082 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__); 3083 /* 3084 * If the caller requested that it be sent at a high 3085 * priority, queue it at the head of the list. 3086 */ 3087 if (queue_to_head) 3088 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 3089 else 3090 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3091 } else if (ath_tx_ampdu_pending(sc, an, tid)) { 3092 /* AMPDU pending; queue */ 3093 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__); 3094 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3095 /* XXX sched? */ 3096 } else if (ath_tx_ampdu_running(sc, an, tid)) { 3097 /* AMPDU running, attempt direct dispatch if possible */ 3098 3099 /* 3100 * Always queue the frame to the tail of the list. 3101 */ 3102 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3103 3104 /* 3105 * If the hardware queue isn't busy, direct dispatch 3106 * the head frame in the list. Don't schedule the 3107 * TID - let it build some more frames first? 3108 * 3109 * When running A-MPDU, always just check the hardware 3110 * queue depth against the aggregate frame limit. 3111 * We don't want to burst a large number of single frames 3112 * out to the hardware; we want to aggressively hold back. 3113 * 3114 * Otherwise, schedule the TID. 3115 */ 3116 /* XXX TXQ locking */ 3117 if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_aggr) { 3118 bf = ATH_TID_FIRST(atid); 3119 ATH_TID_REMOVE(atid, bf, bf_list); 3120 3121 /* 3122 * Ensure it's definitely treated as a non-AMPDU 3123 * frame - this information may have been left 3124 * over from a previous attempt. 3125 */ 3126 bf->bf_state.bfs_aggr = 0; 3127 bf->bf_state.bfs_nframes = 1; 3128 3129 /* Queue to the hardware */ 3130 ath_tx_xmit_aggr(sc, an, txq, bf); 3131 DPRINTF(sc, ATH_DEBUG_SW_TX, 3132 "%s: xmit_aggr\n", 3133 __func__); 3134 } else { 3135 DPRINTF(sc, ATH_DEBUG_SW_TX, 3136 "%s: ampdu; swq'ing\n", 3137 __func__); 3138 3139 ath_tx_tid_sched(sc, atid); 3140 } 3141 /* 3142 * If we're not doing A-MPDU, be prepared to direct dispatch 3143 * up to both limits if possible. This particular corner 3144 * case may end up with packet starvation between aggregate 3145 * traffic and non-aggregate traffic: we wnat to ensure 3146 * that non-aggregate stations get a few frames queued to the 3147 * hardware before the aggregate station(s) get their chance. 3148 * 3149 * So if you only ever see a couple of frames direct dispatched 3150 * to the hardware from a non-AMPDU client, check both here 3151 * and in the software queue dispatcher to ensure that those 3152 * non-AMPDU stations get a fair chance to transmit. 3153 */ 3154 /* XXX TXQ locking */ 3155 } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) && 3156 (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) { 3157 /* AMPDU not running, attempt direct dispatch */ 3158 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__); 3159 /* See if clrdmask needs to be set */ 3160 ath_tx_update_clrdmask(sc, atid, bf); 3161 3162 /* 3163 * Update the current leak count if 3164 * we're leaking frames; and set the 3165 * MORE flag as appropriate. 3166 */ 3167 ath_tx_leak_count_update(sc, atid, bf); 3168 3169 /* 3170 * Dispatch the frame. 3171 */ 3172 ath_tx_xmit_normal(sc, txq, bf); 3173 } else { 3174 /* Busy; queue */ 3175 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__); 3176 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3177 ath_tx_tid_sched(sc, atid); 3178 } 3179 } 3180 3181 /* 3182 * Only set the clrdmask bit if none of the nodes are currently 3183 * filtered. 3184 * 3185 * XXX TODO: go through all the callers and check to see 3186 * which are being called in the context of looping over all 3187 * TIDs (eg, if all tids are being paused, resumed, etc.) 3188 * That'll avoid O(n^2) complexity here. 3189 */ 3190 static void 3191 ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an) 3192 { 3193 int i; 3194 3195 ATH_TX_LOCK_ASSERT(sc); 3196 3197 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 3198 if (an->an_tid[i].isfiltered == 1) 3199 return; 3200 } 3201 an->clrdmask = 1; 3202 } 3203 3204 /* 3205 * Configure the per-TID node state. 3206 * 3207 * This likely belongs in if_ath_node.c but I can't think of anywhere 3208 * else to put it just yet. 3209 * 3210 * This sets up the SLISTs and the mutex as appropriate. 3211 */ 3212 void 3213 ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an) 3214 { 3215 int i, j; 3216 struct ath_tid *atid; 3217 3218 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 3219 atid = &an->an_tid[i]; 3220 3221 /* XXX now with this bzer(), is the field 0'ing needed? */ 3222 bzero(atid, sizeof(*atid)); 3223 3224 TAILQ_INIT(&atid->tid_q); 3225 TAILQ_INIT(&atid->filtq.tid_q); 3226 atid->tid = i; 3227 atid->an = an; 3228 for (j = 0; j < ATH_TID_MAX_BUFS; j++) 3229 atid->tx_buf[j] = NULL; 3230 atid->baw_head = atid->baw_tail = 0; 3231 atid->paused = 0; 3232 atid->sched = 0; 3233 atid->hwq_depth = 0; 3234 atid->cleanup_inprogress = 0; 3235 if (i == IEEE80211_NONQOS_TID) 3236 atid->ac = ATH_NONQOS_TID_AC; 3237 else 3238 atid->ac = TID_TO_WME_AC(i); 3239 } 3240 an->clrdmask = 1; /* Always start by setting this bit */ 3241 } 3242 3243 /* 3244 * Pause the current TID. This stops packets from being transmitted 3245 * on it. 3246 * 3247 * Since this is also called from upper layers as well as the driver, 3248 * it will get the TID lock. 3249 */ 3250 static void 3251 ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid) 3252 { 3253 3254 ATH_TX_LOCK_ASSERT(sc); 3255 tid->paused++; 3256 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%6D]: tid=%d, paused = %d\n", 3257 __func__, 3258 tid->an->an_node.ni_macaddr, ":", 3259 tid->tid, 3260 tid->paused); 3261 } 3262 3263 /* 3264 * Unpause the current TID, and schedule it if needed. 3265 */ 3266 static void 3267 ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid) 3268 { 3269 ATH_TX_LOCK_ASSERT(sc); 3270 3271 /* 3272 * There's some odd places where ath_tx_tid_resume() is called 3273 * when it shouldn't be; this works around that particular issue 3274 * until it's actually resolved. 3275 */ 3276 if (tid->paused == 0) { 3277 device_printf(sc->sc_dev, 3278 "%s: [%6D]: tid=%d, paused=0?\n", 3279 __func__, 3280 tid->an->an_node.ni_macaddr, ":", 3281 tid->tid); 3282 } else { 3283 tid->paused--; 3284 } 3285 3286 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3287 "%s: [%6D]: tid=%d, unpaused = %d\n", 3288 __func__, 3289 tid->an->an_node.ni_macaddr, ":", 3290 tid->tid, 3291 tid->paused); 3292 3293 if (tid->paused) 3294 return; 3295 3296 /* 3297 * Override the clrdmask configuration for the next frame 3298 * from this TID, just to get the ball rolling. 3299 */ 3300 ath_tx_set_clrdmask(sc, tid->an); 3301 3302 if (tid->axq_depth == 0) 3303 return; 3304 3305 /* XXX isfiltered shouldn't ever be 0 at this point */ 3306 if (tid->isfiltered == 1) { 3307 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n", 3308 __func__); 3309 return; 3310 } 3311 3312 ath_tx_tid_sched(sc, tid); 3313 3314 /* 3315 * Queue the software TX scheduler. 3316 */ 3317 ath_tx_swq_kick(sc); 3318 } 3319 3320 /* 3321 * Add the given ath_buf to the TID filtered frame list. 3322 * This requires the TID be filtered. 3323 */ 3324 static void 3325 ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid, 3326 struct ath_buf *bf) 3327 { 3328 3329 ATH_TX_LOCK_ASSERT(sc); 3330 3331 if (!tid->isfiltered) 3332 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n", 3333 __func__); 3334 3335 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf); 3336 3337 /* Set the retry bit and bump the retry counter */ 3338 ath_tx_set_retry(sc, bf); 3339 sc->sc_stats.ast_tx_swfiltered++; 3340 3341 ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list); 3342 } 3343 3344 /* 3345 * Handle a completed filtered frame from the given TID. 3346 * This just enables/pauses the filtered frame state if required 3347 * and appends the filtered frame to the filtered queue. 3348 */ 3349 static void 3350 ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid, 3351 struct ath_buf *bf) 3352 { 3353 3354 ATH_TX_LOCK_ASSERT(sc); 3355 3356 if (! tid->isfiltered) { 3357 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d; filter transition\n", 3358 __func__, tid->tid); 3359 tid->isfiltered = 1; 3360 ath_tx_tid_pause(sc, tid); 3361 } 3362 3363 /* Add the frame to the filter queue */ 3364 ath_tx_tid_filt_addbuf(sc, tid, bf); 3365 } 3366 3367 /* 3368 * Complete the filtered frame TX completion. 3369 * 3370 * If there are no more frames in the hardware queue, unpause/unfilter 3371 * the TID if applicable. Otherwise we will wait for a node PS transition 3372 * to unfilter. 3373 */ 3374 static void 3375 ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid) 3376 { 3377 struct ath_buf *bf; 3378 int do_resume = 0; 3379 3380 ATH_TX_LOCK_ASSERT(sc); 3381 3382 if (tid->hwq_depth != 0) 3383 return; 3384 3385 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, hwq=0, transition back\n", 3386 __func__, tid->tid); 3387 if (tid->isfiltered == 1) { 3388 tid->isfiltered = 0; 3389 do_resume = 1; 3390 } 3391 3392 /* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */ 3393 ath_tx_set_clrdmask(sc, tid->an); 3394 3395 /* XXX this is really quite inefficient */ 3396 while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) { 3397 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3398 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3399 } 3400 3401 /* And only resume if we had paused before */ 3402 if (do_resume) 3403 ath_tx_tid_resume(sc, tid); 3404 } 3405 3406 /* 3407 * Called when a single (aggregate or otherwise) frame is completed. 3408 * 3409 * Returns 0 if the buffer could be added to the filtered list 3410 * (cloned or otherwise), 1 if the buffer couldn't be added to the 3411 * filtered list (failed clone; expired retry) and the caller should 3412 * free it and handle it like a failure (eg by sending a BAR.) 3413 * 3414 * since the buffer may be cloned, bf must be not touched after this 3415 * if the return value is 0. 3416 */ 3417 static int 3418 ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid, 3419 struct ath_buf *bf) 3420 { 3421 struct ath_buf *nbf; 3422 int retval; 3423 3424 ATH_TX_LOCK_ASSERT(sc); 3425 3426 /* 3427 * Don't allow a filtered frame to live forever. 3428 */ 3429 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3430 sc->sc_stats.ast_tx_swretrymax++; 3431 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3432 "%s: bf=%p, seqno=%d, exceeded retries\n", 3433 __func__, 3434 bf, 3435 SEQNO(bf->bf_state.bfs_seqno)); 3436 retval = 1; /* error */ 3437 goto finish; 3438 } 3439 3440 /* 3441 * A busy buffer can't be added to the retry list. 3442 * It needs to be cloned. 3443 */ 3444 if (bf->bf_flags & ATH_BUF_BUSY) { 3445 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3446 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3447 "%s: busy buffer clone: %p -> %p\n", 3448 __func__, bf, nbf); 3449 } else { 3450 nbf = bf; 3451 } 3452 3453 if (nbf == NULL) { 3454 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3455 "%s: busy buffer couldn't be cloned (%p)!\n", 3456 __func__, bf); 3457 retval = 1; /* error */ 3458 } else { 3459 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3460 retval = 0; /* ok */ 3461 } 3462 finish: 3463 ath_tx_tid_filt_comp_complete(sc, tid); 3464 3465 return (retval); 3466 } 3467 3468 static void 3469 ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid, 3470 struct ath_buf *bf_first, ath_bufhead *bf_q) 3471 { 3472 struct ath_buf *bf, *bf_next, *nbf; 3473 3474 ATH_TX_LOCK_ASSERT(sc); 3475 3476 bf = bf_first; 3477 while (bf) { 3478 bf_next = bf->bf_next; 3479 bf->bf_next = NULL; /* Remove it from the aggr list */ 3480 3481 /* 3482 * Don't allow a filtered frame to live forever. 3483 */ 3484 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3485 sc->sc_stats.ast_tx_swretrymax++; 3486 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3487 "%s: tid=%d, bf=%p, seqno=%d, exceeded retries\n", 3488 __func__, 3489 tid->tid, 3490 bf, 3491 SEQNO(bf->bf_state.bfs_seqno)); 3492 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3493 goto next; 3494 } 3495 3496 if (bf->bf_flags & ATH_BUF_BUSY) { 3497 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3498 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3499 "%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n", 3500 __func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno)); 3501 } else { 3502 nbf = bf; 3503 } 3504 3505 /* 3506 * If the buffer couldn't be cloned, add it to bf_q; 3507 * the caller will free the buffer(s) as required. 3508 */ 3509 if (nbf == NULL) { 3510 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3511 "%s: tid=%d, buffer couldn't be cloned! (%p) seqno=%d\n", 3512 __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno)); 3513 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3514 } else { 3515 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3516 } 3517 next: 3518 bf = bf_next; 3519 } 3520 3521 ath_tx_tid_filt_comp_complete(sc, tid); 3522 } 3523 3524 /* 3525 * Suspend the queue because we need to TX a BAR. 3526 */ 3527 static void 3528 ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid) 3529 { 3530 3531 ATH_TX_LOCK_ASSERT(sc); 3532 3533 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3534 "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n", 3535 __func__, 3536 tid->tid, 3537 tid->bar_wait, 3538 tid->bar_tx); 3539 3540 /* We shouldn't be called when bar_tx is 1 */ 3541 if (tid->bar_tx) { 3542 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3543 "%s: bar_tx is 1?!\n", __func__); 3544 } 3545 3546 /* If we've already been called, just be patient. */ 3547 if (tid->bar_wait) 3548 return; 3549 3550 /* Wait! */ 3551 tid->bar_wait = 1; 3552 3553 /* Only one pause, no matter how many frames fail */ 3554 ath_tx_tid_pause(sc, tid); 3555 } 3556 3557 /* 3558 * We've finished with BAR handling - either we succeeded or 3559 * failed. Either way, unsuspend TX. 3560 */ 3561 static void 3562 ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid) 3563 { 3564 3565 ATH_TX_LOCK_ASSERT(sc); 3566 3567 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3568 "%s: %6D: TID=%d, called\n", 3569 __func__, 3570 tid->an->an_node.ni_macaddr, 3571 ":", 3572 tid->tid); 3573 3574 if (tid->bar_tx == 0 || tid->bar_wait == 0) { 3575 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3576 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3577 __func__, tid->an->an_node.ni_macaddr, ":", 3578 tid->tid, tid->bar_tx, tid->bar_wait); 3579 } 3580 3581 tid->bar_tx = tid->bar_wait = 0; 3582 ath_tx_tid_resume(sc, tid); 3583 } 3584 3585 /* 3586 * Return whether we're ready to TX a BAR frame. 3587 * 3588 * Requires the TID lock be held. 3589 */ 3590 static int 3591 ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid) 3592 { 3593 3594 ATH_TX_LOCK_ASSERT(sc); 3595 3596 if (tid->bar_wait == 0 || tid->hwq_depth > 0) 3597 return (0); 3598 3599 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3600 "%s: %6D: TID=%d, bar ready\n", 3601 __func__, 3602 tid->an->an_node.ni_macaddr, 3603 ":", 3604 tid->tid); 3605 3606 return (1); 3607 } 3608 3609 /* 3610 * Check whether the current TID is ready to have a BAR 3611 * TXed and if so, do the TX. 3612 * 3613 * Since the TID/TXQ lock can't be held during a call to 3614 * ieee80211_send_bar(), we have to do the dirty thing of unlocking it, 3615 * sending the BAR and locking it again. 3616 * 3617 * Eventually, the code to send the BAR should be broken out 3618 * from this routine so the lock doesn't have to be reacquired 3619 * just to be immediately dropped by the caller. 3620 */ 3621 static void 3622 ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid) 3623 { 3624 struct ieee80211_tx_ampdu *tap; 3625 3626 ATH_TX_LOCK_ASSERT(sc); 3627 3628 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3629 "%s: %6D: TID=%d, called\n", 3630 __func__, 3631 tid->an->an_node.ni_macaddr, 3632 ":", 3633 tid->tid); 3634 3635 tap = ath_tx_get_tx_tid(tid->an, tid->tid); 3636 3637 /* 3638 * This is an error condition! 3639 */ 3640 if (tid->bar_wait == 0 || tid->bar_tx == 1) { 3641 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3642 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3643 __func__, tid->an->an_node.ni_macaddr, ":", 3644 tid->tid, tid->bar_tx, tid->bar_wait); 3645 return; 3646 } 3647 3648 /* Don't do anything if we still have pending frames */ 3649 if (tid->hwq_depth > 0) { 3650 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3651 "%s: %6D: TID=%d, hwq_depth=%d, waiting\n", 3652 __func__, 3653 tid->an->an_node.ni_macaddr, 3654 ":", 3655 tid->tid, 3656 tid->hwq_depth); 3657 return; 3658 } 3659 3660 /* We're now about to TX */ 3661 tid->bar_tx = 1; 3662 3663 /* 3664 * Override the clrdmask configuration for the next frame, 3665 * just to get the ball rolling. 3666 */ 3667 ath_tx_set_clrdmask(sc, tid->an); 3668 3669 /* 3670 * Calculate new BAW left edge, now that all frames have either 3671 * succeeded or failed. 3672 * 3673 * XXX verify this is _actually_ the valid value to begin at! 3674 */ 3675 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3676 "%s: %6D: TID=%d, new BAW left edge=%d\n", 3677 __func__, 3678 tid->an->an_node.ni_macaddr, 3679 ":", 3680 tid->tid, 3681 tap->txa_start); 3682 3683 /* Try sending the BAR frame */ 3684 /* We can't hold the lock here! */ 3685 3686 ATH_TX_UNLOCK(sc); 3687 if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) { 3688 /* Success? Now we wait for notification that it's done */ 3689 ATH_TX_LOCK(sc); 3690 return; 3691 } 3692 3693 /* Failure? For now, warn loudly and continue */ 3694 ATH_TX_LOCK(sc); 3695 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3696 "%s: %6D: TID=%d, failed to TX BAR, continue!\n", 3697 __func__, tid->an->an_node.ni_macaddr, ":", 3698 tid->tid); 3699 ath_tx_tid_bar_unsuspend(sc, tid); 3700 } 3701 3702 static void 3703 ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an, 3704 struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf) 3705 { 3706 3707 ATH_TX_LOCK_ASSERT(sc); 3708 3709 /* 3710 * If the current TID is running AMPDU, update 3711 * the BAW. 3712 */ 3713 if (ath_tx_ampdu_running(sc, an, tid->tid) && 3714 bf->bf_state.bfs_dobaw) { 3715 /* 3716 * Only remove the frame from the BAW if it's 3717 * been transmitted at least once; this means 3718 * the frame was in the BAW to begin with. 3719 */ 3720 if (bf->bf_state.bfs_retries > 0) { 3721 ath_tx_update_baw(sc, an, tid, bf); 3722 bf->bf_state.bfs_dobaw = 0; 3723 } 3724 #if 0 3725 /* 3726 * This has become a non-fatal error now 3727 */ 3728 if (! bf->bf_state.bfs_addedbaw) 3729 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW 3730 "%s: wasn't added: seqno %d\n", 3731 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3732 #endif 3733 } 3734 3735 /* Strip it out of an aggregate list if it was in one */ 3736 bf->bf_next = NULL; 3737 3738 /* Insert on the free queue to be freed by the caller */ 3739 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 3740 } 3741 3742 static void 3743 ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an, 3744 const char *pfx, struct ath_tid *tid, struct ath_buf *bf) 3745 { 3746 struct ieee80211_node *ni = &an->an_node; 3747 struct ath_txq *txq; 3748 struct ieee80211_tx_ampdu *tap; 3749 3750 txq = sc->sc_ac2q[tid->ac]; 3751 tap = ath_tx_get_tx_tid(an, tid->tid); 3752 3753 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3754 "%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, " 3755 "seqno=%d, retry=%d\n", 3756 __func__, 3757 pfx, 3758 ni->ni_macaddr, 3759 ":", 3760 bf, 3761 bf->bf_state.bfs_addedbaw, 3762 bf->bf_state.bfs_dobaw, 3763 SEQNO(bf->bf_state.bfs_seqno), 3764 bf->bf_state.bfs_retries); 3765 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3766 "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n", 3767 __func__, 3768 pfx, 3769 ni->ni_macaddr, 3770 ":", 3771 bf, 3772 txq->axq_qnum, 3773 txq->axq_depth, 3774 txq->axq_aggr_depth); 3775 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3776 "%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, " 3777 "isfiltered=%d\n", 3778 __func__, 3779 pfx, 3780 ni->ni_macaddr, 3781 ":", 3782 bf, 3783 tid->axq_depth, 3784 tid->hwq_depth, 3785 tid->bar_wait, 3786 tid->isfiltered); 3787 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3788 "%s: %s: %6D: tid %d: " 3789 "sched=%d, paused=%d, " 3790 "incomp=%d, baw_head=%d, " 3791 "baw_tail=%d txa_start=%d, ni_txseqs=%d\n", 3792 __func__, 3793 pfx, 3794 ni->ni_macaddr, 3795 ":", 3796 tid->tid, 3797 tid->sched, tid->paused, 3798 tid->incomp, tid->baw_head, 3799 tid->baw_tail, tap == NULL ? -1 : tap->txa_start, 3800 ni->ni_txseqs[tid->tid]); 3801 3802 /* XXX Dump the frame, see what it is? */ 3803 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 3804 ieee80211_dump_pkt(ni->ni_ic, 3805 mtod(bf->bf_m, const uint8_t *), 3806 bf->bf_m->m_len, 0, -1); 3807 } 3808 3809 /* 3810 * Free any packets currently pending in the software TX queue. 3811 * 3812 * This will be called when a node is being deleted. 3813 * 3814 * It can also be called on an active node during an interface 3815 * reset or state transition. 3816 * 3817 * (From Linux/reference): 3818 * 3819 * TODO: For frame(s) that are in the retry state, we will reuse the 3820 * sequence number(s) without setting the retry bit. The 3821 * alternative is to give up on these and BAR the receiver's window 3822 * forward. 3823 */ 3824 static void 3825 ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an, 3826 struct ath_tid *tid, ath_bufhead *bf_cq) 3827 { 3828 struct ath_buf *bf; 3829 struct ieee80211_tx_ampdu *tap; 3830 struct ieee80211_node *ni = &an->an_node; 3831 int t; 3832 3833 tap = ath_tx_get_tx_tid(an, tid->tid); 3834 3835 ATH_TX_LOCK_ASSERT(sc); 3836 3837 /* Walk the queue, free frames */ 3838 t = 0; 3839 for (;;) { 3840 bf = ATH_TID_FIRST(tid); 3841 if (bf == NULL) { 3842 break; 3843 } 3844 3845 if (t == 0) { 3846 ath_tx_tid_drain_print(sc, an, "norm", tid, bf); 3847 // t = 1; 3848 } 3849 3850 ATH_TID_REMOVE(tid, bf, bf_list); 3851 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3852 } 3853 3854 /* And now, drain the filtered frame queue */ 3855 t = 0; 3856 for (;;) { 3857 bf = ATH_TID_FILT_FIRST(tid); 3858 if (bf == NULL) 3859 break; 3860 3861 if (t == 0) { 3862 ath_tx_tid_drain_print(sc, an, "filt", tid, bf); 3863 // t = 1; 3864 } 3865 3866 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3867 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3868 } 3869 3870 /* 3871 * Override the clrdmask configuration for the next frame 3872 * in case there is some future transmission, just to get 3873 * the ball rolling. 3874 * 3875 * This won't hurt things if the TID is about to be freed. 3876 */ 3877 ath_tx_set_clrdmask(sc, tid->an); 3878 3879 /* 3880 * Now that it's completed, grab the TID lock and update 3881 * the sequence number and BAW window. 3882 * Because sequence numbers have been assigned to frames 3883 * that haven't been sent yet, it's entirely possible 3884 * we'll be called with some pending frames that have not 3885 * been transmitted. 3886 * 3887 * The cleaner solution is to do the sequence number allocation 3888 * when the packet is first transmitted - and thus the "retries" 3889 * check above would be enough to update the BAW/seqno. 3890 */ 3891 3892 /* But don't do it for non-QoS TIDs */ 3893 if (tap) { 3894 #if 1 3895 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3896 "%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n", 3897 __func__, 3898 ni->ni_macaddr, 3899 ":", 3900 an, 3901 tid->tid, 3902 tap->txa_start); 3903 #endif 3904 ni->ni_txseqs[tid->tid] = tap->txa_start; 3905 tid->baw_tail = tid->baw_head; 3906 } 3907 } 3908 3909 /* 3910 * Reset the TID state. This must be only called once the node has 3911 * had its frames flushed from this TID, to ensure that no other 3912 * pause / unpause logic can kick in. 3913 */ 3914 static void 3915 ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid) 3916 { 3917 3918 #if 0 3919 tid->bar_wait = tid->bar_tx = tid->isfiltered = 0; 3920 tid->paused = tid->sched = tid->addba_tx_pending = 0; 3921 tid->incomp = tid->cleanup_inprogress = 0; 3922 #endif 3923 3924 /* 3925 * If we have a bar_wait set, we need to unpause the TID 3926 * here. Otherwise once cleanup has finished, the TID won't 3927 * have the right paused counter. 3928 * 3929 * XXX I'm not going through resume here - I don't want the 3930 * node to be rescheuled just yet. This however should be 3931 * methodized! 3932 */ 3933 if (tid->bar_wait) { 3934 if (tid->paused > 0) { 3935 tid->paused --; 3936 } 3937 } 3938 3939 /* 3940 * XXX same with a currently filtered TID. 3941 * 3942 * Since this is being called during a flush, we assume that 3943 * the filtered frame list is actually empty. 3944 * 3945 * XXX TODO: add in a check to ensure that the filtered queue 3946 * depth is actually 0! 3947 */ 3948 if (tid->isfiltered) { 3949 if (tid->paused > 0) { 3950 tid->paused --; 3951 } 3952 } 3953 3954 /* 3955 * Clear BAR, filtered frames, scheduled and ADDBA pending. 3956 * The TID may be going through cleanup from the last association 3957 * where things in the BAW are still in the hardware queue. 3958 */ 3959 tid->bar_wait = 0; 3960 tid->bar_tx = 0; 3961 tid->isfiltered = 0; 3962 tid->sched = 0; 3963 tid->addba_tx_pending = 0; 3964 3965 /* 3966 * XXX TODO: it may just be enough to walk the HWQs and mark 3967 * frames for that node as non-aggregate; or mark the ath_node 3968 * with something that indicates that aggregation is no longer 3969 * occuring. Then we can just toss the BAW complaints and 3970 * do a complete hard reset of state here - no pause, no 3971 * complete counter, etc. 3972 */ 3973 3974 } 3975 3976 /* 3977 * Flush all software queued packets for the given node. 3978 * 3979 * This occurs when a completion handler frees the last buffer 3980 * for a node, and the node is thus freed. This causes the node 3981 * to be cleaned up, which ends up calling ath_tx_node_flush. 3982 */ 3983 void 3984 ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an) 3985 { 3986 int tid; 3987 ath_bufhead bf_cq; 3988 struct ath_buf *bf; 3989 3990 TAILQ_INIT(&bf_cq); 3991 3992 ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p", 3993 &an->an_node); 3994 3995 ATH_TX_LOCK(sc); 3996 DPRINTF(sc, ATH_DEBUG_NODE, 3997 "%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, " 3998 "swq_depth=%d, clrdmask=%d, leak_count=%d\n", 3999 __func__, 4000 an->an_node.ni_macaddr, 4001 ":", 4002 an->an_is_powersave, 4003 an->an_stack_psq, 4004 an->an_tim_set, 4005 an->an_swq_depth, 4006 an->clrdmask, 4007 an->an_leak_count); 4008 4009 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 4010 struct ath_tid *atid = &an->an_tid[tid]; 4011 4012 /* Free packets */ 4013 ath_tx_tid_drain(sc, an, atid, &bf_cq); 4014 4015 /* Remove this tid from the list of active tids */ 4016 ath_tx_tid_unsched(sc, atid); 4017 4018 /* Reset the per-TID pause, BAR, etc state */ 4019 ath_tx_tid_reset(sc, atid); 4020 } 4021 4022 /* 4023 * Clear global leak count 4024 */ 4025 an->an_leak_count = 0; 4026 ATH_TX_UNLOCK(sc); 4027 4028 /* Handle completed frames */ 4029 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4030 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4031 ath_tx_default_comp(sc, bf, 0); 4032 } 4033 } 4034 4035 /* 4036 * Drain all the software TXQs currently with traffic queued. 4037 */ 4038 void 4039 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq) 4040 { 4041 struct ath_tid *tid; 4042 ath_bufhead bf_cq; 4043 struct ath_buf *bf; 4044 4045 TAILQ_INIT(&bf_cq); 4046 ATH_TX_LOCK(sc); 4047 4048 /* 4049 * Iterate over all active tids for the given txq, 4050 * flushing and unsched'ing them 4051 */ 4052 while (! TAILQ_EMPTY(&txq->axq_tidq)) { 4053 tid = TAILQ_FIRST(&txq->axq_tidq); 4054 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq); 4055 ath_tx_tid_unsched(sc, tid); 4056 } 4057 4058 ATH_TX_UNLOCK(sc); 4059 4060 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4061 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4062 ath_tx_default_comp(sc, bf, 0); 4063 } 4064 } 4065 4066 /* 4067 * Handle completion of non-aggregate session frames. 4068 * 4069 * This (currently) doesn't implement software retransmission of 4070 * non-aggregate frames! 4071 * 4072 * Software retransmission of non-aggregate frames needs to obey 4073 * the strict sequence number ordering, and drop any frames that 4074 * will fail this. 4075 * 4076 * For now, filtered frames and frame transmission will cause 4077 * all kinds of issues. So we don't support them. 4078 * 4079 * So anyone queuing frames via ath_tx_normal_xmit() or 4080 * ath_tx_hw_queue_norm() must override and set CLRDMASK. 4081 */ 4082 void 4083 ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4084 { 4085 struct ieee80211_node *ni = bf->bf_node; 4086 struct ath_node *an = ATH_NODE(ni); 4087 int tid = bf->bf_state.bfs_tid; 4088 struct ath_tid *atid = &an->an_tid[tid]; 4089 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 4090 4091 /* The TID state is protected behind the TXQ lock */ 4092 ATH_TX_LOCK(sc); 4093 4094 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n", 4095 __func__, bf, fail, atid->hwq_depth - 1); 4096 4097 atid->hwq_depth--; 4098 4099 #if 0 4100 /* 4101 * If the frame was filtered, stick it on the filter frame 4102 * queue and complain about it. It shouldn't happen! 4103 */ 4104 if ((ts->ts_status & HAL_TXERR_FILT) || 4105 (ts->ts_status != 0 && atid->isfiltered)) { 4106 DPRINTF(sc, ATH_DEBUG_SW_TX, 4107 "%s: isfiltered=%d, ts_status=%d: huh?\n", 4108 __func__, 4109 atid->isfiltered, 4110 ts->ts_status); 4111 ath_tx_tid_filt_comp_buf(sc, atid, bf); 4112 } 4113 #endif 4114 if (atid->isfiltered) 4115 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__); 4116 if (atid->hwq_depth < 0) 4117 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", 4118 __func__, atid->hwq_depth); 4119 4120 /* If the TID is being cleaned up, track things */ 4121 /* XXX refactor! */ 4122 if (atid->cleanup_inprogress) { 4123 atid->incomp--; 4124 if (atid->incomp == 0) { 4125 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4126 "%s: TID %d: cleaned up! resume!\n", 4127 __func__, tid); 4128 atid->cleanup_inprogress = 0; 4129 ath_tx_tid_resume(sc, atid); 4130 } 4131 } 4132 4133 /* 4134 * If the queue is filtered, potentially mark it as complete 4135 * and reschedule it as needed. 4136 * 4137 * This is required as there may be a subsequent TX descriptor 4138 * for this end-node that has CLRDMASK set, so it's quite possible 4139 * that a filtered frame will be followed by a non-filtered 4140 * (complete or otherwise) frame. 4141 * 4142 * XXX should we do this before we complete the frame? 4143 */ 4144 if (atid->isfiltered) 4145 ath_tx_tid_filt_comp_complete(sc, atid); 4146 ATH_TX_UNLOCK(sc); 4147 4148 /* 4149 * punt to rate control if we're not being cleaned up 4150 * during a hw queue drain and the frame wanted an ACK. 4151 */ 4152 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 4153 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 4154 ts, bf->bf_state.bfs_pktlen, 4155 1, (ts->ts_status == 0) ? 0 : 1); 4156 4157 ath_tx_default_comp(sc, bf, fail); 4158 } 4159 4160 /* 4161 * Handle cleanup of aggregate session packets that aren't 4162 * an A-MPDU. 4163 * 4164 * There's no need to update the BAW here - the session is being 4165 * torn down. 4166 */ 4167 static void 4168 ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf) 4169 { 4170 struct ieee80211_node *ni = bf->bf_node; 4171 struct ath_node *an = ATH_NODE(ni); 4172 int tid = bf->bf_state.bfs_tid; 4173 struct ath_tid *atid = &an->an_tid[tid]; 4174 4175 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n", 4176 __func__, tid, atid->incomp); 4177 4178 ATH_TX_LOCK(sc); 4179 atid->incomp--; 4180 4181 /* XXX refactor! */ 4182 if (bf->bf_state.bfs_dobaw) { 4183 ath_tx_update_baw(sc, an, atid, bf); 4184 if (!bf->bf_state.bfs_addedbaw) 4185 DPRINTF(sc, ATH_DEBUG_SW_TX, 4186 "%s: wasn't added: seqno %d\n", 4187 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4188 } 4189 4190 if (atid->incomp == 0) { 4191 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4192 "%s: TID %d: cleaned up! resume!\n", 4193 __func__, tid); 4194 atid->cleanup_inprogress = 0; 4195 ath_tx_tid_resume(sc, atid); 4196 } 4197 ATH_TX_UNLOCK(sc); 4198 4199 ath_tx_default_comp(sc, bf, 0); 4200 } 4201 4202 4203 /* 4204 * This as it currently stands is a bit dumb. Ideally we'd just 4205 * fail the frame the normal way and have it permanently fail 4206 * via the normal aggregate completion path. 4207 */ 4208 static void 4209 ath_tx_tid_cleanup_frame(struct ath_softc *sc, struct ath_node *an, 4210 int tid, struct ath_buf *bf_head, ath_bufhead *bf_cq) 4211 { 4212 struct ath_tid *atid = &an->an_tid[tid]; 4213 struct ath_buf *bf, *bf_next; 4214 4215 ATH_TX_LOCK_ASSERT(sc); 4216 4217 /* 4218 * Remove this frame from the queue. 4219 */ 4220 ATH_TID_REMOVE(atid, bf_head, bf_list); 4221 4222 /* 4223 * Loop over all the frames in the aggregate. 4224 */ 4225 bf = bf_head; 4226 while (bf != NULL) { 4227 bf_next = bf->bf_next; /* next aggregate frame, or NULL */ 4228 4229 /* 4230 * If it's been added to the BAW we need to kick 4231 * it out of the BAW before we continue. 4232 * 4233 * XXX if it's an aggregate, assert that it's in the 4234 * BAW - we shouldn't have it be in an aggregate 4235 * otherwise! 4236 */ 4237 if (bf->bf_state.bfs_addedbaw) { 4238 ath_tx_update_baw(sc, an, atid, bf); 4239 bf->bf_state.bfs_dobaw = 0; 4240 } 4241 4242 /* 4243 * Give it the default completion handler. 4244 */ 4245 bf->bf_comp = ath_tx_normal_comp; 4246 bf->bf_next = NULL; 4247 4248 /* 4249 * Add it to the list to free. 4250 */ 4251 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 4252 4253 /* 4254 * Now advance to the next frame in the aggregate. 4255 */ 4256 bf = bf_next; 4257 } 4258 } 4259 4260 /* 4261 * Performs transmit side cleanup when TID changes from aggregated to 4262 * unaggregated and during reassociation. 4263 * 4264 * For now, this just tosses everything from the TID software queue 4265 * whether or not it has been retried and marks the TID as 4266 * pending completion if there's anything for this TID queued to 4267 * the hardware. 4268 * 4269 * The caller is responsible for pausing the TID and unpausing the 4270 * TID if no cleanup was required. Otherwise the cleanup path will 4271 * unpause the TID once the last hardware queued frame is completed. 4272 */ 4273 static void 4274 ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid, 4275 ath_bufhead *bf_cq) 4276 { 4277 struct ath_tid *atid = &an->an_tid[tid]; 4278 struct ath_buf *bf, *bf_next; 4279 4280 ATH_TX_LOCK_ASSERT(sc); 4281 4282 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4283 "%s: TID %d: called; inprogress=%d\n", __func__, tid, 4284 atid->cleanup_inprogress); 4285 4286 /* 4287 * Move the filtered frames to the TX queue, before 4288 * we run off and discard/process things. 4289 */ 4290 4291 /* XXX this is really quite inefficient */ 4292 while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) { 4293 ATH_TID_FILT_REMOVE(atid, bf, bf_list); 4294 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4295 } 4296 4297 /* 4298 * Update the frames in the software TX queue: 4299 * 4300 * + Discard retry frames in the queue 4301 * + Fix the completion function to be non-aggregate 4302 */ 4303 bf = ATH_TID_FIRST(atid); 4304 while (bf) { 4305 /* 4306 * Grab the next frame in the list, we may 4307 * be fiddling with the list. 4308 */ 4309 bf_next = TAILQ_NEXT(bf, bf_list); 4310 4311 /* 4312 * Free the frame and all subframes. 4313 */ 4314 ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq); 4315 4316 /* 4317 * Next frame! 4318 */ 4319 bf = bf_next; 4320 } 4321 4322 /* 4323 * If there's anything in the hardware queue we wait 4324 * for the TID HWQ to empty. 4325 */ 4326 if (atid->hwq_depth > 0) { 4327 /* 4328 * XXX how about we kill atid->incomp, and instead 4329 * replace it with a macro that checks that atid->hwq_depth 4330 * is 0? 4331 */ 4332 atid->incomp = atid->hwq_depth; 4333 atid->cleanup_inprogress = 1; 4334 } 4335 4336 if (atid->cleanup_inprogress) 4337 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4338 "%s: TID %d: cleanup needed: %d packets\n", 4339 __func__, tid, atid->incomp); 4340 4341 /* Owner now must free completed frames */ 4342 } 4343 4344 static struct ath_buf * 4345 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 4346 struct ath_tid *tid, struct ath_buf *bf) 4347 { 4348 struct ath_buf *nbf; 4349 int error; 4350 4351 /* 4352 * Clone the buffer. This will handle the dma unmap and 4353 * copy the node reference to the new buffer. If this 4354 * works out, 'bf' will have no DMA mapping, no mbuf 4355 * pointer and no node reference. 4356 */ 4357 nbf = ath_buf_clone(sc, bf); 4358 4359 #if 0 4360 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n", 4361 __func__); 4362 #endif 4363 4364 if (nbf == NULL) { 4365 /* Failed to clone */ 4366 DPRINTF(sc, ATH_DEBUG_XMIT, 4367 "%s: failed to clone a busy buffer\n", 4368 __func__); 4369 return NULL; 4370 } 4371 4372 /* Setup the dma for the new buffer */ 4373 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m); 4374 if (error != 0) { 4375 DPRINTF(sc, ATH_DEBUG_XMIT, 4376 "%s: failed to setup dma for clone\n", 4377 __func__); 4378 /* 4379 * Put this at the head of the list, not tail; 4380 * that way it doesn't interfere with the 4381 * busy buffer logic (which uses the tail of 4382 * the list.) 4383 */ 4384 ATH_TXBUF_LOCK(sc); 4385 ath_returnbuf_head(sc, nbf); 4386 ATH_TXBUF_UNLOCK(sc); 4387 return NULL; 4388 } 4389 4390 /* Update BAW if required, before we free the original buf */ 4391 if (bf->bf_state.bfs_dobaw) 4392 ath_tx_switch_baw_buf(sc, an, tid, bf, nbf); 4393 4394 /* Free original buffer; return new buffer */ 4395 ath_freebuf(sc, bf); 4396 4397 return nbf; 4398 } 4399 4400 /* 4401 * Handle retrying an unaggregate frame in an aggregate 4402 * session. 4403 * 4404 * If too many retries occur, pause the TID, wait for 4405 * any further retransmits (as there's no reason why 4406 * non-aggregate frames in an aggregate session are 4407 * transmitted in-order; they just have to be in-BAW) 4408 * and then queue a BAR. 4409 */ 4410 static void 4411 ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf) 4412 { 4413 struct ieee80211_node *ni = bf->bf_node; 4414 struct ath_node *an = ATH_NODE(ni); 4415 int tid = bf->bf_state.bfs_tid; 4416 struct ath_tid *atid = &an->an_tid[tid]; 4417 struct ieee80211_tx_ampdu *tap; 4418 4419 ATH_TX_LOCK(sc); 4420 4421 tap = ath_tx_get_tx_tid(an, tid); 4422 4423 /* 4424 * If the buffer is marked as busy, we can't directly 4425 * reuse it. Instead, try to clone the buffer. 4426 * If the clone is successful, recycle the old buffer. 4427 * If the clone is unsuccessful, set bfs_retries to max 4428 * to force the next bit of code to free the buffer 4429 * for us. 4430 */ 4431 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4432 (bf->bf_flags & ATH_BUF_BUSY)) { 4433 struct ath_buf *nbf; 4434 nbf = ath_tx_retry_clone(sc, an, atid, bf); 4435 if (nbf) 4436 /* bf has been freed at this point */ 4437 bf = nbf; 4438 else 4439 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4440 } 4441 4442 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4443 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4444 "%s: exceeded retries; seqno %d\n", 4445 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4446 sc->sc_stats.ast_tx_swretrymax++; 4447 4448 /* Update BAW anyway */ 4449 if (bf->bf_state.bfs_dobaw) { 4450 ath_tx_update_baw(sc, an, atid, bf); 4451 if (! bf->bf_state.bfs_addedbaw) 4452 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4453 "%s: wasn't added: seqno %d\n", 4454 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4455 } 4456 bf->bf_state.bfs_dobaw = 0; 4457 4458 /* Suspend the TX queue and get ready to send the BAR */ 4459 ath_tx_tid_bar_suspend(sc, atid); 4460 4461 /* Send the BAR if there are no other frames waiting */ 4462 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4463 ath_tx_tid_bar_tx(sc, atid); 4464 4465 ATH_TX_UNLOCK(sc); 4466 4467 /* Free buffer, bf is free after this call */ 4468 ath_tx_default_comp(sc, bf, 0); 4469 return; 4470 } 4471 4472 /* 4473 * This increments the retry counter as well as 4474 * sets the retry flag in the ath_buf and packet 4475 * body. 4476 */ 4477 ath_tx_set_retry(sc, bf); 4478 sc->sc_stats.ast_tx_swretries++; 4479 4480 /* 4481 * Insert this at the head of the queue, so it's 4482 * retried before any current/subsequent frames. 4483 */ 4484 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4485 ath_tx_tid_sched(sc, atid); 4486 /* Send the BAR if there are no other frames waiting */ 4487 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4488 ath_tx_tid_bar_tx(sc, atid); 4489 4490 ATH_TX_UNLOCK(sc); 4491 } 4492 4493 /* 4494 * Common code for aggregate excessive retry/subframe retry. 4495 * If retrying, queues buffers to bf_q. If not, frees the 4496 * buffers. 4497 * 4498 * XXX should unify this with ath_tx_aggr_retry_unaggr() 4499 */ 4500 static int 4501 ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf, 4502 ath_bufhead *bf_q) 4503 { 4504 struct ieee80211_node *ni = bf->bf_node; 4505 struct ath_node *an = ATH_NODE(ni); 4506 int tid = bf->bf_state.bfs_tid; 4507 struct ath_tid *atid = &an->an_tid[tid]; 4508 4509 ATH_TX_LOCK_ASSERT(sc); 4510 4511 /* XXX clr11naggr should be done for all subframes */ 4512 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 4513 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0); 4514 4515 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */ 4516 4517 /* 4518 * If the buffer is marked as busy, we can't directly 4519 * reuse it. Instead, try to clone the buffer. 4520 * If the clone is successful, recycle the old buffer. 4521 * If the clone is unsuccessful, set bfs_retries to max 4522 * to force the next bit of code to free the buffer 4523 * for us. 4524 */ 4525 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4526 (bf->bf_flags & ATH_BUF_BUSY)) { 4527 struct ath_buf *nbf; 4528 nbf = ath_tx_retry_clone(sc, an, atid, bf); 4529 if (nbf) 4530 /* bf has been freed at this point */ 4531 bf = nbf; 4532 else 4533 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4534 } 4535 4536 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4537 sc->sc_stats.ast_tx_swretrymax++; 4538 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4539 "%s: max retries: seqno %d\n", 4540 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4541 ath_tx_update_baw(sc, an, atid, bf); 4542 if (!bf->bf_state.bfs_addedbaw) 4543 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4544 "%s: wasn't added: seqno %d\n", 4545 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4546 bf->bf_state.bfs_dobaw = 0; 4547 return 1; 4548 } 4549 4550 ath_tx_set_retry(sc, bf); 4551 sc->sc_stats.ast_tx_swretries++; 4552 bf->bf_next = NULL; /* Just to make sure */ 4553 4554 /* Clear the aggregate state */ 4555 bf->bf_state.bfs_aggr = 0; 4556 bf->bf_state.bfs_ndelim = 0; /* ??? needed? */ 4557 bf->bf_state.bfs_nframes = 1; 4558 4559 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 4560 return 0; 4561 } 4562 4563 /* 4564 * error pkt completion for an aggregate destination 4565 */ 4566 static void 4567 ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first, 4568 struct ath_tid *tid) 4569 { 4570 struct ieee80211_node *ni = bf_first->bf_node; 4571 struct ath_node *an = ATH_NODE(ni); 4572 struct ath_buf *bf_next, *bf; 4573 ath_bufhead bf_q; 4574 int drops = 0; 4575 struct ieee80211_tx_ampdu *tap; 4576 ath_bufhead bf_cq; 4577 4578 TAILQ_INIT(&bf_q); 4579 TAILQ_INIT(&bf_cq); 4580 4581 /* 4582 * Update rate control - all frames have failed. 4583 * 4584 * XXX use the length in the first frame in the series; 4585 * XXX just so things are consistent for now. 4586 */ 4587 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc, 4588 &bf_first->bf_status.ds_txstat, 4589 bf_first->bf_state.bfs_pktlen, 4590 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes); 4591 4592 ATH_TX_LOCK(sc); 4593 tap = ath_tx_get_tx_tid(an, tid->tid); 4594 sc->sc_stats.ast_tx_aggr_failall++; 4595 4596 /* Retry all subframes */ 4597 bf = bf_first; 4598 while (bf) { 4599 bf_next = bf->bf_next; 4600 bf->bf_next = NULL; /* Remove it from the aggr list */ 4601 sc->sc_stats.ast_tx_aggr_fail++; 4602 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4603 drops++; 4604 bf->bf_next = NULL; 4605 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4606 } 4607 bf = bf_next; 4608 } 4609 4610 /* Prepend all frames to the beginning of the queue */ 4611 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 4612 TAILQ_REMOVE(&bf_q, bf, bf_list); 4613 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 4614 } 4615 4616 /* 4617 * Schedule the TID to be re-tried. 4618 */ 4619 ath_tx_tid_sched(sc, tid); 4620 4621 /* 4622 * send bar if we dropped any frames 4623 * 4624 * Keep the txq lock held for now, as we need to ensure 4625 * that ni_txseqs[] is consistent (as it's being updated 4626 * in the ifnet TX context or raw TX context.) 4627 */ 4628 if (drops) { 4629 /* Suspend the TX queue and get ready to send the BAR */ 4630 ath_tx_tid_bar_suspend(sc, tid); 4631 } 4632 4633 /* 4634 * Send BAR if required 4635 */ 4636 if (ath_tx_tid_bar_tx_ready(sc, tid)) 4637 ath_tx_tid_bar_tx(sc, tid); 4638 4639 ATH_TX_UNLOCK(sc); 4640 4641 /* Complete frames which errored out */ 4642 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4643 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4644 ath_tx_default_comp(sc, bf, 0); 4645 } 4646 } 4647 4648 /* 4649 * Handle clean-up of packets from an aggregate list. 4650 * 4651 * There's no need to update the BAW here - the session is being 4652 * torn down. 4653 */ 4654 static void 4655 ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first) 4656 { 4657 struct ath_buf *bf, *bf_next; 4658 struct ieee80211_node *ni = bf_first->bf_node; 4659 struct ath_node *an = ATH_NODE(ni); 4660 int tid = bf_first->bf_state.bfs_tid; 4661 struct ath_tid *atid = &an->an_tid[tid]; 4662 4663 ATH_TX_LOCK(sc); 4664 4665 /* update incomp */ 4666 atid->incomp--; 4667 4668 /* Update the BAW */ 4669 bf = bf_first; 4670 while (bf) { 4671 /* XXX refactor! */ 4672 if (bf->bf_state.bfs_dobaw) { 4673 ath_tx_update_baw(sc, an, atid, bf); 4674 if (!bf->bf_state.bfs_addedbaw) 4675 DPRINTF(sc, ATH_DEBUG_SW_TX, 4676 "%s: wasn't added: seqno %d\n", 4677 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4678 } 4679 bf = bf->bf_next; 4680 } 4681 4682 if (atid->incomp == 0) { 4683 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4684 "%s: TID %d: cleaned up! resume!\n", 4685 __func__, tid); 4686 atid->cleanup_inprogress = 0; 4687 ath_tx_tid_resume(sc, atid); 4688 } 4689 4690 /* Send BAR if required */ 4691 /* XXX why would we send a BAR when transitioning to non-aggregation? */ 4692 /* 4693 * XXX TODO: we should likely just tear down the BAR state here, 4694 * rather than sending a BAR. 4695 */ 4696 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4697 ath_tx_tid_bar_tx(sc, atid); 4698 4699 ATH_TX_UNLOCK(sc); 4700 4701 /* Handle frame completion as individual frames */ 4702 bf = bf_first; 4703 while (bf) { 4704 bf_next = bf->bf_next; 4705 bf->bf_next = NULL; 4706 ath_tx_default_comp(sc, bf, 1); 4707 bf = bf_next; 4708 } 4709 } 4710 4711 /* 4712 * Handle completion of an set of aggregate frames. 4713 * 4714 * Note: the completion handler is the last descriptor in the aggregate, 4715 * not the last descriptor in the first frame. 4716 */ 4717 static void 4718 ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first, 4719 int fail) 4720 { 4721 //struct ath_desc *ds = bf->bf_lastds; 4722 struct ieee80211_node *ni = bf_first->bf_node; 4723 struct ath_node *an = ATH_NODE(ni); 4724 int tid = bf_first->bf_state.bfs_tid; 4725 struct ath_tid *atid = &an->an_tid[tid]; 4726 struct ath_tx_status ts; 4727 struct ieee80211_tx_ampdu *tap; 4728 ath_bufhead bf_q; 4729 ath_bufhead bf_cq; 4730 int seq_st, tx_ok; 4731 int hasba, isaggr; 4732 uint32_t ba[2]; 4733 struct ath_buf *bf, *bf_next; 4734 int ba_index; 4735 int drops = 0; 4736 int nframes = 0, nbad = 0, nf; 4737 int pktlen; 4738 /* XXX there's too much on the stack? */ 4739 struct ath_rc_series rc[ATH_RC_NUM]; 4740 int txseq; 4741 4742 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n", 4743 __func__, atid->hwq_depth); 4744 4745 /* 4746 * Take a copy; this may be needed -after- bf_first 4747 * has been completed and freed. 4748 */ 4749 ts = bf_first->bf_status.ds_txstat; 4750 4751 TAILQ_INIT(&bf_q); 4752 TAILQ_INIT(&bf_cq); 4753 4754 /* The TID state is kept behind the TXQ lock */ 4755 ATH_TX_LOCK(sc); 4756 4757 atid->hwq_depth--; 4758 if (atid->hwq_depth < 0) 4759 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n", 4760 __func__, atid->hwq_depth); 4761 4762 /* 4763 * If the TID is filtered, handle completing the filter 4764 * transition before potentially kicking it to the cleanup 4765 * function. 4766 * 4767 * XXX this is duplicate work, ew. 4768 */ 4769 if (atid->isfiltered) 4770 ath_tx_tid_filt_comp_complete(sc, atid); 4771 4772 /* 4773 * Punt cleanup to the relevant function, not our problem now 4774 */ 4775 if (atid->cleanup_inprogress) { 4776 if (atid->isfiltered) 4777 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4778 "%s: isfiltered=1, normal_comp?\n", 4779 __func__); 4780 ATH_TX_UNLOCK(sc); 4781 ath_tx_comp_cleanup_aggr(sc, bf_first); 4782 return; 4783 } 4784 4785 /* 4786 * If the frame is filtered, transition to filtered frame 4787 * mode and add this to the filtered frame list. 4788 * 4789 * XXX TODO: figure out how this interoperates with 4790 * BAR, pause and cleanup states. 4791 */ 4792 if ((ts.ts_status & HAL_TXERR_FILT) || 4793 (ts.ts_status != 0 && atid->isfiltered)) { 4794 if (fail != 0) 4795 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4796 "%s: isfiltered=1, fail=%d\n", __func__, fail); 4797 ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq); 4798 4799 /* Remove from BAW */ 4800 TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) { 4801 if (bf->bf_state.bfs_addedbaw) 4802 drops++; 4803 if (bf->bf_state.bfs_dobaw) { 4804 ath_tx_update_baw(sc, an, atid, bf); 4805 if (!bf->bf_state.bfs_addedbaw) 4806 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4807 "%s: wasn't added: seqno %d\n", 4808 __func__, 4809 SEQNO(bf->bf_state.bfs_seqno)); 4810 } 4811 bf->bf_state.bfs_dobaw = 0; 4812 } 4813 /* 4814 * If any intermediate frames in the BAW were dropped when 4815 * handling filtering things, send a BAR. 4816 */ 4817 if (drops) 4818 ath_tx_tid_bar_suspend(sc, atid); 4819 4820 /* 4821 * Finish up by sending a BAR if required and freeing 4822 * the frames outside of the TX lock. 4823 */ 4824 goto finish_send_bar; 4825 } 4826 4827 /* 4828 * XXX for now, use the first frame in the aggregate for 4829 * XXX rate control completion; it's at least consistent. 4830 */ 4831 pktlen = bf_first->bf_state.bfs_pktlen; 4832 4833 /* 4834 * Handle errors first! 4835 * 4836 * Here, handle _any_ error as a "exceeded retries" error. 4837 * Later on (when filtered frames are to be specially handled) 4838 * it'll have to be expanded. 4839 */ 4840 #if 0 4841 if (ts.ts_status & HAL_TXERR_XRETRY) { 4842 #endif 4843 if (ts.ts_status != 0) { 4844 ATH_TX_UNLOCK(sc); 4845 ath_tx_comp_aggr_error(sc, bf_first, atid); 4846 return; 4847 } 4848 4849 tap = ath_tx_get_tx_tid(an, tid); 4850 4851 /* 4852 * extract starting sequence and block-ack bitmap 4853 */ 4854 /* XXX endian-ness of seq_st, ba? */ 4855 seq_st = ts.ts_seqnum; 4856 hasba = !! (ts.ts_flags & HAL_TX_BA); 4857 tx_ok = (ts.ts_status == 0); 4858 isaggr = bf_first->bf_state.bfs_aggr; 4859 ba[0] = ts.ts_ba_low; 4860 ba[1] = ts.ts_ba_high; 4861 4862 /* 4863 * Copy the TX completion status and the rate control 4864 * series from the first descriptor, as it may be freed 4865 * before the rate control code can get its grubby fingers 4866 * into things. 4867 */ 4868 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc)); 4869 4870 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4871 "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, " 4872 "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n", 4873 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags, 4874 isaggr, seq_st, hasba, ba[0], ba[1]); 4875 4876 /* 4877 * The reference driver doesn't do this; it simply ignores 4878 * this check in its entirety. 4879 * 4880 * I've seen this occur when using iperf to send traffic 4881 * out tid 1 - the aggregate frames are all marked as TID 1, 4882 * but the TXSTATUS has TID=0. So, let's just ignore this 4883 * check. 4884 */ 4885 #if 0 4886 /* Occasionally, the MAC sends a tx status for the wrong TID. */ 4887 if (tid != ts.ts_tid) { 4888 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n", 4889 __func__, tid, ts.ts_tid); 4890 tx_ok = 0; 4891 } 4892 #endif 4893 4894 /* AR5416 BA bug; this requires an interface reset */ 4895 if (isaggr && tx_ok && (! hasba)) { 4896 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4897 "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, " 4898 "seq_st=%d\n", 4899 __func__, hasba, tx_ok, isaggr, seq_st); 4900 /* XXX TODO: schedule an interface reset */ 4901 #ifdef ATH_DEBUG 4902 ath_printtxbuf(sc, bf_first, 4903 sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0); 4904 #endif 4905 } 4906 4907 /* 4908 * Walk the list of frames, figure out which ones were correctly 4909 * sent and which weren't. 4910 */ 4911 bf = bf_first; 4912 nf = bf_first->bf_state.bfs_nframes; 4913 4914 /* bf_first is going to be invalid once this list is walked */ 4915 bf_first = NULL; 4916 4917 /* 4918 * Walk the list of completed frames and determine 4919 * which need to be completed and which need to be 4920 * retransmitted. 4921 * 4922 * For completed frames, the completion functions need 4923 * to be called at the end of this function as the last 4924 * node reference may free the node. 4925 * 4926 * Finally, since the TXQ lock can't be held during the 4927 * completion callback (to avoid lock recursion), 4928 * the completion calls have to be done outside of the 4929 * lock. 4930 */ 4931 while (bf) { 4932 nframes++; 4933 ba_index = ATH_BA_INDEX(seq_st, 4934 SEQNO(bf->bf_state.bfs_seqno)); 4935 bf_next = bf->bf_next; 4936 bf->bf_next = NULL; /* Remove it from the aggr list */ 4937 4938 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4939 "%s: checking bf=%p seqno=%d; ack=%d\n", 4940 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 4941 ATH_BA_ISSET(ba, ba_index)); 4942 4943 if (tx_ok && ATH_BA_ISSET(ba, ba_index)) { 4944 sc->sc_stats.ast_tx_aggr_ok++; 4945 ath_tx_update_baw(sc, an, atid, bf); 4946 bf->bf_state.bfs_dobaw = 0; 4947 if (!bf->bf_state.bfs_addedbaw) 4948 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4949 "%s: wasn't added: seqno %d\n", 4950 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4951 bf->bf_next = NULL; 4952 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4953 } else { 4954 sc->sc_stats.ast_tx_aggr_fail++; 4955 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4956 drops++; 4957 bf->bf_next = NULL; 4958 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4959 } 4960 nbad++; 4961 } 4962 bf = bf_next; 4963 } 4964 4965 /* 4966 * Now that the BAW updates have been done, unlock 4967 * 4968 * txseq is grabbed before the lock is released so we 4969 * have a consistent view of what -was- in the BAW. 4970 * Anything after this point will not yet have been 4971 * TXed. 4972 */ 4973 txseq = tap->txa_start; 4974 ATH_TX_UNLOCK(sc); 4975 4976 if (nframes != nf) 4977 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4978 "%s: num frames seen=%d; bf nframes=%d\n", 4979 __func__, nframes, nf); 4980 4981 /* 4982 * Now we know how many frames were bad, call the rate 4983 * control code. 4984 */ 4985 if (fail == 0) 4986 ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes, 4987 nbad); 4988 4989 /* 4990 * send bar if we dropped any frames 4991 */ 4992 if (drops) { 4993 /* Suspend the TX queue and get ready to send the BAR */ 4994 ATH_TX_LOCK(sc); 4995 ath_tx_tid_bar_suspend(sc, atid); 4996 ATH_TX_UNLOCK(sc); 4997 } 4998 4999 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5000 "%s: txa_start now %d\n", __func__, tap->txa_start); 5001 5002 ATH_TX_LOCK(sc); 5003 5004 /* Prepend all frames to the beginning of the queue */ 5005 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 5006 TAILQ_REMOVE(&bf_q, bf, bf_list); 5007 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 5008 } 5009 5010 /* 5011 * Reschedule to grab some further frames. 5012 */ 5013 ath_tx_tid_sched(sc, atid); 5014 5015 /* 5016 * If the queue is filtered, re-schedule as required. 5017 * 5018 * This is required as there may be a subsequent TX descriptor 5019 * for this end-node that has CLRDMASK set, so it's quite possible 5020 * that a filtered frame will be followed by a non-filtered 5021 * (complete or otherwise) frame. 5022 * 5023 * XXX should we do this before we complete the frame? 5024 */ 5025 if (atid->isfiltered) 5026 ath_tx_tid_filt_comp_complete(sc, atid); 5027 5028 finish_send_bar: 5029 5030 /* 5031 * Send BAR if required 5032 */ 5033 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5034 ath_tx_tid_bar_tx(sc, atid); 5035 5036 ATH_TX_UNLOCK(sc); 5037 5038 /* Do deferred completion */ 5039 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 5040 TAILQ_REMOVE(&bf_cq, bf, bf_list); 5041 ath_tx_default_comp(sc, bf, 0); 5042 } 5043 } 5044 5045 /* 5046 * Handle completion of unaggregated frames in an ADDBA 5047 * session. 5048 * 5049 * Fail is set to 1 if the entry is being freed via a call to 5050 * ath_tx_draintxq(). 5051 */ 5052 static void 5053 ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail) 5054 { 5055 struct ieee80211_node *ni = bf->bf_node; 5056 struct ath_node *an = ATH_NODE(ni); 5057 int tid = bf->bf_state.bfs_tid; 5058 struct ath_tid *atid = &an->an_tid[tid]; 5059 struct ath_tx_status ts; 5060 int drops = 0; 5061 5062 /* 5063 * Take a copy of this; filtering/cloning the frame may free the 5064 * bf pointer. 5065 */ 5066 ts = bf->bf_status.ds_txstat; 5067 5068 /* 5069 * Update rate control status here, before we possibly 5070 * punt to retry or cleanup. 5071 * 5072 * Do it outside of the TXQ lock. 5073 */ 5074 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 5075 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 5076 &bf->bf_status.ds_txstat, 5077 bf->bf_state.bfs_pktlen, 5078 1, (ts.ts_status == 0) ? 0 : 1); 5079 5080 /* 5081 * This is called early so atid->hwq_depth can be tracked. 5082 * This unfortunately means that it's released and regrabbed 5083 * during retry and cleanup. That's rather inefficient. 5084 */ 5085 ATH_TX_LOCK(sc); 5086 5087 if (tid == IEEE80211_NONQOS_TID) 5088 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__); 5089 5090 DPRINTF(sc, ATH_DEBUG_SW_TX, 5091 "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n", 5092 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth, 5093 SEQNO(bf->bf_state.bfs_seqno)); 5094 5095 atid->hwq_depth--; 5096 if (atid->hwq_depth < 0) 5097 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", 5098 __func__, atid->hwq_depth); 5099 5100 /* 5101 * If the TID is filtered, handle completing the filter 5102 * transition before potentially kicking it to the cleanup 5103 * function. 5104 */ 5105 if (atid->isfiltered) 5106 ath_tx_tid_filt_comp_complete(sc, atid); 5107 5108 /* 5109 * If a cleanup is in progress, punt to comp_cleanup; 5110 * rather than handling it here. It's thus their 5111 * responsibility to clean up, call the completion 5112 * function in net80211, etc. 5113 */ 5114 if (atid->cleanup_inprogress) { 5115 if (atid->isfiltered) 5116 DPRINTF(sc, ATH_DEBUG_SW_TX, 5117 "%s: isfiltered=1, normal_comp?\n", 5118 __func__); 5119 ATH_TX_UNLOCK(sc); 5120 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n", 5121 __func__); 5122 ath_tx_comp_cleanup_unaggr(sc, bf); 5123 return; 5124 } 5125 5126 /* 5127 * XXX TODO: how does cleanup, BAR and filtered frame handling 5128 * overlap? 5129 * 5130 * If the frame is filtered OR if it's any failure but 5131 * the TID is filtered, the frame must be added to the 5132 * filtered frame list. 5133 * 5134 * However - a busy buffer can't be added to the filtered 5135 * list as it will end up being recycled without having 5136 * been made available for the hardware. 5137 */ 5138 if ((ts.ts_status & HAL_TXERR_FILT) || 5139 (ts.ts_status != 0 && atid->isfiltered)) { 5140 int freeframe; 5141 5142 if (fail != 0) 5143 DPRINTF(sc, ATH_DEBUG_SW_TX, 5144 "%s: isfiltered=1, fail=%d\n", 5145 __func__, fail); 5146 freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf); 5147 /* 5148 * If freeframe=0 then bf is no longer ours; don't 5149 * touch it. 5150 */ 5151 if (freeframe) { 5152 /* Remove from BAW */ 5153 if (bf->bf_state.bfs_addedbaw) 5154 drops++; 5155 if (bf->bf_state.bfs_dobaw) { 5156 ath_tx_update_baw(sc, an, atid, bf); 5157 if (!bf->bf_state.bfs_addedbaw) 5158 DPRINTF(sc, ATH_DEBUG_SW_TX, 5159 "%s: wasn't added: seqno %d\n", 5160 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5161 } 5162 bf->bf_state.bfs_dobaw = 0; 5163 } 5164 5165 /* 5166 * If the frame couldn't be filtered, treat it as a drop and 5167 * prepare to send a BAR. 5168 */ 5169 if (freeframe && drops) 5170 ath_tx_tid_bar_suspend(sc, atid); 5171 5172 /* 5173 * Send BAR if required 5174 */ 5175 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5176 ath_tx_tid_bar_tx(sc, atid); 5177 5178 ATH_TX_UNLOCK(sc); 5179 /* 5180 * If freeframe is set, then the frame couldn't be 5181 * cloned and bf is still valid. Just complete/free it. 5182 */ 5183 if (freeframe) 5184 ath_tx_default_comp(sc, bf, fail); 5185 5186 return; 5187 } 5188 /* 5189 * Don't bother with the retry check if all frames 5190 * are being failed (eg during queue deletion.) 5191 */ 5192 #if 0 5193 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) { 5194 #endif 5195 if (fail == 0 && ts.ts_status != 0) { 5196 ATH_TX_UNLOCK(sc); 5197 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n", 5198 __func__); 5199 ath_tx_aggr_retry_unaggr(sc, bf); 5200 return; 5201 } 5202 5203 /* Success? Complete */ 5204 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n", 5205 __func__, tid, SEQNO(bf->bf_state.bfs_seqno)); 5206 if (bf->bf_state.bfs_dobaw) { 5207 ath_tx_update_baw(sc, an, atid, bf); 5208 bf->bf_state.bfs_dobaw = 0; 5209 if (!bf->bf_state.bfs_addedbaw) 5210 DPRINTF(sc, ATH_DEBUG_SW_TX, 5211 "%s: wasn't added: seqno %d\n", 5212 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5213 } 5214 5215 /* 5216 * If the queue is filtered, re-schedule as required. 5217 * 5218 * This is required as there may be a subsequent TX descriptor 5219 * for this end-node that has CLRDMASK set, so it's quite possible 5220 * that a filtered frame will be followed by a non-filtered 5221 * (complete or otherwise) frame. 5222 * 5223 * XXX should we do this before we complete the frame? 5224 */ 5225 if (atid->isfiltered) 5226 ath_tx_tid_filt_comp_complete(sc, atid); 5227 5228 /* 5229 * Send BAR if required 5230 */ 5231 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5232 ath_tx_tid_bar_tx(sc, atid); 5233 5234 ATH_TX_UNLOCK(sc); 5235 5236 ath_tx_default_comp(sc, bf, fail); 5237 /* bf is freed at this point */ 5238 } 5239 5240 void 5241 ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 5242 { 5243 if (bf->bf_state.bfs_aggr) 5244 ath_tx_aggr_comp_aggr(sc, bf, fail); 5245 else 5246 ath_tx_aggr_comp_unaggr(sc, bf, fail); 5247 } 5248 5249 /* 5250 * Schedule some packets from the given node/TID to the hardware. 5251 * 5252 * This is the aggregate version. 5253 */ 5254 void 5255 ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an, 5256 struct ath_tid *tid) 5257 { 5258 struct ath_buf *bf; 5259 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 5260 struct ieee80211_tx_ampdu *tap; 5261 ATH_AGGR_STATUS status; 5262 ath_bufhead bf_q; 5263 5264 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid); 5265 ATH_TX_LOCK_ASSERT(sc); 5266 5267 /* 5268 * XXX TODO: If we're called for a queue that we're leaking frames to, 5269 * ensure we only leak one. 5270 */ 5271 5272 tap = ath_tx_get_tx_tid(an, tid->tid); 5273 5274 if (tid->tid == IEEE80211_NONQOS_TID) 5275 DPRINTF(sc, ATH_DEBUG_SW_TX, 5276 "%s: called for TID=NONQOS_TID?\n", __func__); 5277 5278 for (;;) { 5279 status = ATH_AGGR_DONE; 5280 5281 /* 5282 * If the upper layer has paused the TID, don't 5283 * queue any further packets. 5284 * 5285 * This can also occur from the completion task because 5286 * of packet loss; but as its serialised with this code, 5287 * it won't "appear" half way through queuing packets. 5288 */ 5289 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 5290 break; 5291 5292 bf = ATH_TID_FIRST(tid); 5293 if (bf == NULL) { 5294 break; 5295 } 5296 5297 /* 5298 * If the packet doesn't fall within the BAW (eg a NULL 5299 * data frame), schedule it directly; continue. 5300 */ 5301 if (! bf->bf_state.bfs_dobaw) { 5302 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5303 "%s: non-baw packet\n", 5304 __func__); 5305 ATH_TID_REMOVE(tid, bf, bf_list); 5306 5307 if (bf->bf_state.bfs_nframes > 1) 5308 DPRINTF(sc, ATH_DEBUG_SW_TX, 5309 "%s: aggr=%d, nframes=%d\n", 5310 __func__, 5311 bf->bf_state.bfs_aggr, 5312 bf->bf_state.bfs_nframes); 5313 5314 /* 5315 * This shouldn't happen - such frames shouldn't 5316 * ever have been queued as an aggregate in the 5317 * first place. However, make sure the fields 5318 * are correctly setup just to be totally sure. 5319 */ 5320 bf->bf_state.bfs_aggr = 0; 5321 bf->bf_state.bfs_nframes = 1; 5322 5323 /* Update CLRDMASK just before this frame is queued */ 5324 ath_tx_update_clrdmask(sc, tid, bf); 5325 5326 ath_tx_do_ratelookup(sc, bf); 5327 ath_tx_calc_duration(sc, bf); 5328 ath_tx_calc_protection(sc, bf); 5329 ath_tx_set_rtscts(sc, bf); 5330 ath_tx_rate_fill_rcflags(sc, bf); 5331 ath_tx_setds(sc, bf); 5332 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 5333 5334 sc->sc_aggr_stats.aggr_nonbaw_pkt++; 5335 5336 /* Queue the packet; continue */ 5337 goto queuepkt; 5338 } 5339 5340 TAILQ_INIT(&bf_q); 5341 5342 /* 5343 * Do a rate control lookup on the first frame in the 5344 * list. The rate control code needs that to occur 5345 * before it can determine whether to TX. 5346 * It's inaccurate because the rate control code doesn't 5347 * really "do" aggregate lookups, so it only considers 5348 * the size of the first frame. 5349 */ 5350 ath_tx_do_ratelookup(sc, bf); 5351 bf->bf_state.bfs_rc[3].rix = 0; 5352 bf->bf_state.bfs_rc[3].tries = 0; 5353 5354 ath_tx_calc_duration(sc, bf); 5355 ath_tx_calc_protection(sc, bf); 5356 5357 ath_tx_set_rtscts(sc, bf); 5358 ath_tx_rate_fill_rcflags(sc, bf); 5359 5360 status = ath_tx_form_aggr(sc, an, tid, &bf_q); 5361 5362 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5363 "%s: ath_tx_form_aggr() status=%d\n", __func__, status); 5364 5365 /* 5366 * No frames to be picked up - out of BAW 5367 */ 5368 if (TAILQ_EMPTY(&bf_q)) 5369 break; 5370 5371 /* 5372 * This assumes that the descriptor list in the ath_bufhead 5373 * are already linked together via bf_next pointers. 5374 */ 5375 bf = TAILQ_FIRST(&bf_q); 5376 5377 if (status == ATH_AGGR_8K_LIMITED) 5378 sc->sc_aggr_stats.aggr_rts_aggr_limited++; 5379 5380 /* 5381 * If it's the only frame send as non-aggregate 5382 * assume that ath_tx_form_aggr() has checked 5383 * whether it's in the BAW and added it appropriately. 5384 */ 5385 if (bf->bf_state.bfs_nframes == 1) { 5386 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5387 "%s: single-frame aggregate\n", __func__); 5388 5389 /* Update CLRDMASK just before this frame is queued */ 5390 ath_tx_update_clrdmask(sc, tid, bf); 5391 5392 bf->bf_state.bfs_aggr = 0; 5393 bf->bf_state.bfs_ndelim = 0; 5394 ath_tx_setds(sc, bf); 5395 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 5396 if (status == ATH_AGGR_BAW_CLOSED) 5397 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++; 5398 else 5399 sc->sc_aggr_stats.aggr_single_pkt++; 5400 } else { 5401 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5402 "%s: multi-frame aggregate: %d frames, " 5403 "length %d\n", 5404 __func__, bf->bf_state.bfs_nframes, 5405 bf->bf_state.bfs_al); 5406 bf->bf_state.bfs_aggr = 1; 5407 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++; 5408 sc->sc_aggr_stats.aggr_aggr_pkt++; 5409 5410 /* Update CLRDMASK just before this frame is queued */ 5411 ath_tx_update_clrdmask(sc, tid, bf); 5412 5413 /* 5414 * Calculate the duration/protection as required. 5415 */ 5416 ath_tx_calc_duration(sc, bf); 5417 ath_tx_calc_protection(sc, bf); 5418 5419 /* 5420 * Update the rate and rtscts information based on the 5421 * rate decision made by the rate control code; 5422 * the first frame in the aggregate needs it. 5423 */ 5424 ath_tx_set_rtscts(sc, bf); 5425 5426 /* 5427 * Setup the relevant descriptor fields 5428 * for aggregation. The first descriptor 5429 * already points to the rest in the chain. 5430 */ 5431 ath_tx_setds_11n(sc, bf); 5432 5433 } 5434 queuepkt: 5435 /* Set completion handler, multi-frame aggregate or not */ 5436 bf->bf_comp = ath_tx_aggr_comp; 5437 5438 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID) 5439 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__); 5440 5441 /* 5442 * Update leak count and frame config if were leaking frames. 5443 * 5444 * XXX TODO: it should update all frames in an aggregate 5445 * correctly! 5446 */ 5447 ath_tx_leak_count_update(sc, tid, bf); 5448 5449 /* Punt to txq */ 5450 ath_tx_handoff(sc, txq, bf); 5451 5452 /* Track outstanding buffer count to hardware */ 5453 /* aggregates are "one" buffer */ 5454 tid->hwq_depth++; 5455 5456 /* 5457 * Break out if ath_tx_form_aggr() indicated 5458 * there can't be any further progress (eg BAW is full.) 5459 * Checking for an empty txq is done above. 5460 * 5461 * XXX locking on txq here? 5462 */ 5463 /* XXX TXQ locking */ 5464 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr || 5465 (status == ATH_AGGR_BAW_CLOSED || 5466 status == ATH_AGGR_LEAK_CLOSED)) 5467 break; 5468 } 5469 } 5470 5471 /* 5472 * Schedule some packets from the given node/TID to the hardware. 5473 * 5474 * XXX TODO: this routine doesn't enforce the maximum TXQ depth. 5475 * It just dumps frames into the TXQ. We should limit how deep 5476 * the transmit queue can grow for frames dispatched to the given 5477 * TXQ. 5478 * 5479 * To avoid locking issues, either we need to own the TXQ lock 5480 * at this point, or we need to pass in the maximum frame count 5481 * from the caller. 5482 */ 5483 void 5484 ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an, 5485 struct ath_tid *tid) 5486 { 5487 struct ath_buf *bf; 5488 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 5489 5490 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n", 5491 __func__, an, tid->tid); 5492 5493 ATH_TX_LOCK_ASSERT(sc); 5494 5495 /* Check - is AMPDU pending or running? then print out something */ 5496 if (ath_tx_ampdu_pending(sc, an, tid->tid)) 5497 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n", 5498 __func__, tid->tid); 5499 if (ath_tx_ampdu_running(sc, an, tid->tid)) 5500 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n", 5501 __func__, tid->tid); 5502 5503 for (;;) { 5504 5505 /* 5506 * If the upper layers have paused the TID, don't 5507 * queue any further packets. 5508 * 5509 * XXX if we are leaking frames, make sure we decrement 5510 * that counter _and_ we continue here. 5511 */ 5512 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 5513 break; 5514 5515 bf = ATH_TID_FIRST(tid); 5516 if (bf == NULL) { 5517 break; 5518 } 5519 5520 ATH_TID_REMOVE(tid, bf, bf_list); 5521 5522 /* Sanity check! */ 5523 if (tid->tid != bf->bf_state.bfs_tid) { 5524 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !=" 5525 " tid %d\n", __func__, bf->bf_state.bfs_tid, 5526 tid->tid); 5527 } 5528 /* Normal completion handler */ 5529 bf->bf_comp = ath_tx_normal_comp; 5530 5531 /* 5532 * Override this for now, until the non-aggregate 5533 * completion handler correctly handles software retransmits. 5534 */ 5535 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 5536 5537 /* Update CLRDMASK just before this frame is queued */ 5538 ath_tx_update_clrdmask(sc, tid, bf); 5539 5540 /* Program descriptors + rate control */ 5541 ath_tx_do_ratelookup(sc, bf); 5542 ath_tx_calc_duration(sc, bf); 5543 ath_tx_calc_protection(sc, bf); 5544 ath_tx_set_rtscts(sc, bf); 5545 ath_tx_rate_fill_rcflags(sc, bf); 5546 ath_tx_setds(sc, bf); 5547 5548 /* 5549 * Update the current leak count if 5550 * we're leaking frames; and set the 5551 * MORE flag as appropriate. 5552 */ 5553 ath_tx_leak_count_update(sc, tid, bf); 5554 5555 /* Track outstanding buffer count to hardware */ 5556 /* aggregates are "one" buffer */ 5557 tid->hwq_depth++; 5558 5559 /* Punt to hardware or software txq */ 5560 ath_tx_handoff(sc, txq, bf); 5561 } 5562 } 5563 5564 /* 5565 * Schedule some packets to the given hardware queue. 5566 * 5567 * This function walks the list of TIDs (ie, ath_node TIDs 5568 * with queued traffic) and attempts to schedule traffic 5569 * from them. 5570 * 5571 * TID scheduling is implemented as a FIFO, with TIDs being 5572 * added to the end of the queue after some frames have been 5573 * scheduled. 5574 */ 5575 void 5576 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq) 5577 { 5578 struct ath_tid *tid, *next, *last; 5579 5580 ATH_TX_LOCK_ASSERT(sc); 5581 5582 /* 5583 * Don't schedule if the hardware queue is busy. 5584 * This (hopefully) gives some more time to aggregate 5585 * some packets in the aggregation queue. 5586 * 5587 * XXX It doesn't stop a parallel sender from sneaking 5588 * in transmitting a frame! 5589 */ 5590 /* XXX TXQ locking */ 5591 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { 5592 sc->sc_aggr_stats.aggr_sched_nopkt++; 5593 return; 5594 } 5595 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { 5596 sc->sc_aggr_stats.aggr_sched_nopkt++; 5597 return; 5598 } 5599 5600 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s); 5601 5602 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) { 5603 /* 5604 * Suspend paused queues here; they'll be resumed 5605 * once the addba completes or times out. 5606 */ 5607 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n", 5608 __func__, tid->tid, tid->paused); 5609 ath_tx_tid_unsched(sc, tid); 5610 /* 5611 * This node may be in power-save and we're leaking 5612 * a frame; be careful. 5613 */ 5614 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { 5615 goto loop_done; 5616 } 5617 if (ath_tx_ampdu_running(sc, tid->an, tid->tid)) 5618 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid); 5619 else 5620 ath_tx_tid_hw_queue_norm(sc, tid->an, tid); 5621 5622 /* Not empty? Re-schedule */ 5623 if (tid->axq_depth != 0) 5624 ath_tx_tid_sched(sc, tid); 5625 5626 /* 5627 * Give the software queue time to aggregate more 5628 * packets. If we aren't running aggregation then 5629 * we should still limit the hardware queue depth. 5630 */ 5631 /* XXX TXQ locking */ 5632 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { 5633 break; 5634 } 5635 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { 5636 break; 5637 } 5638 loop_done: 5639 /* 5640 * If this was the last entry on the original list, stop. 5641 * Otherwise nodes that have been rescheduled onto the end 5642 * of the TID FIFO list will just keep being rescheduled. 5643 * 5644 * XXX What should we do about nodes that were paused 5645 * but are pending a leaking frame in response to a ps-poll? 5646 * They'll be put at the front of the list; so they'll 5647 * prematurely trigger this condition! Ew. 5648 */ 5649 if (tid == last) 5650 break; 5651 } 5652 } 5653 5654 /* 5655 * TX addba handling 5656 */ 5657 5658 /* 5659 * Return net80211 TID struct pointer, or NULL for none 5660 */ 5661 struct ieee80211_tx_ampdu * 5662 ath_tx_get_tx_tid(struct ath_node *an, int tid) 5663 { 5664 struct ieee80211_node *ni = &an->an_node; 5665 struct ieee80211_tx_ampdu *tap; 5666 5667 if (tid == IEEE80211_NONQOS_TID) 5668 return NULL; 5669 5670 tap = &ni->ni_tx_ampdu[tid]; 5671 return tap; 5672 } 5673 5674 /* 5675 * Is AMPDU-TX running? 5676 */ 5677 static int 5678 ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid) 5679 { 5680 struct ieee80211_tx_ampdu *tap; 5681 5682 if (tid == IEEE80211_NONQOS_TID) 5683 return 0; 5684 5685 tap = ath_tx_get_tx_tid(an, tid); 5686 if (tap == NULL) 5687 return 0; /* Not valid; default to not running */ 5688 5689 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING); 5690 } 5691 5692 /* 5693 * Is AMPDU-TX negotiation pending? 5694 */ 5695 static int 5696 ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid) 5697 { 5698 struct ieee80211_tx_ampdu *tap; 5699 5700 if (tid == IEEE80211_NONQOS_TID) 5701 return 0; 5702 5703 tap = ath_tx_get_tx_tid(an, tid); 5704 if (tap == NULL) 5705 return 0; /* Not valid; default to not pending */ 5706 5707 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND); 5708 } 5709 5710 /* 5711 * Is AMPDU-TX pending for the given TID? 5712 */ 5713 5714 5715 /* 5716 * Method to handle sending an ADDBA request. 5717 * 5718 * We tap this so the relevant flags can be set to pause the TID 5719 * whilst waiting for the response. 5720 * 5721 * XXX there's no timeout handler we can override? 5722 */ 5723 int 5724 ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5725 int dialogtoken, int baparamset, int batimeout) 5726 { 5727 struct ath_softc *sc = ni->ni_ic->ic_softc; 5728 int tid = tap->txa_tid; 5729 struct ath_node *an = ATH_NODE(ni); 5730 struct ath_tid *atid = &an->an_tid[tid]; 5731 5732 /* 5733 * XXX danger Will Robinson! 5734 * 5735 * Although the taskqueue may be running and scheduling some more 5736 * packets, these should all be _before_ the addba sequence number. 5737 * However, net80211 will keep self-assigning sequence numbers 5738 * until addba has been negotiated. 5739 * 5740 * In the past, these packets would be "paused" (which still works 5741 * fine, as they're being scheduled to the driver in the same 5742 * serialised method which is calling the addba request routine) 5743 * and when the aggregation session begins, they'll be dequeued 5744 * as aggregate packets and added to the BAW. However, now there's 5745 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these 5746 * packets. Thus they never get included in the BAW tracking and 5747 * this can cause the initial burst of packets after the addba 5748 * negotiation to "hang", as they quickly fall outside the BAW. 5749 * 5750 * The "eventual" solution should be to tag these packets with 5751 * dobaw. Although net80211 has given us a sequence number, 5752 * it'll be "after" the left edge of the BAW and thus it'll 5753 * fall within it. 5754 */ 5755 ATH_TX_LOCK(sc); 5756 /* 5757 * This is a bit annoying. Until net80211 HT code inherits some 5758 * (any) locking, we may have this called in parallel BUT only 5759 * one response/timeout will be called. Grr. 5760 */ 5761 if (atid->addba_tx_pending == 0) { 5762 ath_tx_tid_pause(sc, atid); 5763 atid->addba_tx_pending = 1; 5764 } 5765 ATH_TX_UNLOCK(sc); 5766 5767 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5768 "%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n", 5769 __func__, 5770 ni->ni_macaddr, 5771 ":", 5772 dialogtoken, baparamset, batimeout); 5773 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5774 "%s: txa_start=%d, ni_txseqs=%d\n", 5775 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5776 5777 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 5778 batimeout); 5779 } 5780 5781 /* 5782 * Handle an ADDBA response. 5783 * 5784 * We unpause the queue so TX'ing can resume. 5785 * 5786 * Any packets TX'ed from this point should be "aggregate" (whether 5787 * aggregate or not) so the BAW is updated. 5788 * 5789 * Note! net80211 keeps self-assigning sequence numbers until 5790 * ampdu is negotiated. This means the initially-negotiated BAW left 5791 * edge won't match the ni->ni_txseq. 5792 * 5793 * So, being very dirty, the BAW left edge is "slid" here to match 5794 * ni->ni_txseq. 5795 * 5796 * What likely SHOULD happen is that all packets subsequent to the 5797 * addba request should be tagged as aggregate and queued as non-aggregate 5798 * frames; thus updating the BAW. For now though, I'll just slide the 5799 * window. 5800 */ 5801 int 5802 ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5803 int status, int code, int batimeout) 5804 { 5805 struct ath_softc *sc = ni->ni_ic->ic_softc; 5806 int tid = tap->txa_tid; 5807 struct ath_node *an = ATH_NODE(ni); 5808 struct ath_tid *atid = &an->an_tid[tid]; 5809 int r; 5810 5811 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5812 "%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__, 5813 ni->ni_macaddr, 5814 ":", 5815 status, code, batimeout); 5816 5817 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5818 "%s: txa_start=%d, ni_txseqs=%d\n", 5819 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5820 5821 /* 5822 * Call this first, so the interface flags get updated 5823 * before the TID is unpaused. Otherwise a race condition 5824 * exists where the unpaused TID still doesn't yet have 5825 * IEEE80211_AGGR_RUNNING set. 5826 */ 5827 r = sc->sc_addba_response(ni, tap, status, code, batimeout); 5828 5829 ATH_TX_LOCK(sc); 5830 atid->addba_tx_pending = 0; 5831 /* 5832 * XXX dirty! 5833 * Slide the BAW left edge to wherever net80211 left it for us. 5834 * Read above for more information. 5835 */ 5836 tap->txa_start = ni->ni_txseqs[tid]; 5837 ath_tx_tid_resume(sc, atid); 5838 ATH_TX_UNLOCK(sc); 5839 return r; 5840 } 5841 5842 5843 /* 5844 * Stop ADDBA on a queue. 5845 * 5846 * This can be called whilst BAR TX is currently active on the queue, 5847 * so make sure this is unblocked before continuing. 5848 */ 5849 void 5850 ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 5851 { 5852 struct ath_softc *sc = ni->ni_ic->ic_softc; 5853 int tid = tap->txa_tid; 5854 struct ath_node *an = ATH_NODE(ni); 5855 struct ath_tid *atid = &an->an_tid[tid]; 5856 ath_bufhead bf_cq; 5857 struct ath_buf *bf; 5858 5859 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n", 5860 __func__, 5861 ni->ni_macaddr, 5862 ":"); 5863 5864 /* 5865 * Pause TID traffic early, so there aren't any races 5866 * Unblock the pending BAR held traffic, if it's currently paused. 5867 */ 5868 ATH_TX_LOCK(sc); 5869 ath_tx_tid_pause(sc, atid); 5870 if (atid->bar_wait) { 5871 /* 5872 * bar_unsuspend() expects bar_tx == 1, as it should be 5873 * called from the TX completion path. This quietens 5874 * the warning. It's cleared for us anyway. 5875 */ 5876 atid->bar_tx = 1; 5877 ath_tx_tid_bar_unsuspend(sc, atid); 5878 } 5879 ATH_TX_UNLOCK(sc); 5880 5881 /* There's no need to hold the TXQ lock here */ 5882 sc->sc_addba_stop(ni, tap); 5883 5884 /* 5885 * ath_tx_tid_cleanup will resume the TID if possible, otherwise 5886 * it'll set the cleanup flag, and it'll be unpaused once 5887 * things have been cleaned up. 5888 */ 5889 TAILQ_INIT(&bf_cq); 5890 ATH_TX_LOCK(sc); 5891 5892 /* 5893 * In case there's a followup call to this, only call it 5894 * if we don't have a cleanup in progress. 5895 * 5896 * Since we've paused the queue above, we need to make 5897 * sure we unpause if there's already a cleanup in 5898 * progress - it means something else is also doing 5899 * this stuff, so we don't need to also keep it paused. 5900 */ 5901 if (atid->cleanup_inprogress) { 5902 ath_tx_tid_resume(sc, atid); 5903 } else { 5904 ath_tx_tid_cleanup(sc, an, tid, &bf_cq); 5905 /* 5906 * Unpause the TID if no cleanup is required. 5907 */ 5908 if (! atid->cleanup_inprogress) 5909 ath_tx_tid_resume(sc, atid); 5910 } 5911 ATH_TX_UNLOCK(sc); 5912 5913 /* Handle completing frames and fail them */ 5914 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 5915 TAILQ_REMOVE(&bf_cq, bf, bf_list); 5916 ath_tx_default_comp(sc, bf, 1); 5917 } 5918 5919 } 5920 5921 /* 5922 * Handle a node reassociation. 5923 * 5924 * We may have a bunch of frames queued to the hardware; those need 5925 * to be marked as cleanup. 5926 */ 5927 void 5928 ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an) 5929 { 5930 struct ath_tid *tid; 5931 int i; 5932 ath_bufhead bf_cq; 5933 struct ath_buf *bf; 5934 5935 TAILQ_INIT(&bf_cq); 5936 5937 ATH_TX_UNLOCK_ASSERT(sc); 5938 5939 ATH_TX_LOCK(sc); 5940 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 5941 tid = &an->an_tid[i]; 5942 if (tid->hwq_depth == 0) 5943 continue; 5944 DPRINTF(sc, ATH_DEBUG_NODE, 5945 "%s: %6D: TID %d: cleaning up TID\n", 5946 __func__, 5947 an->an_node.ni_macaddr, 5948 ":", 5949 i); 5950 /* 5951 * In case there's a followup call to this, only call it 5952 * if we don't have a cleanup in progress. 5953 */ 5954 if (! tid->cleanup_inprogress) { 5955 ath_tx_tid_pause(sc, tid); 5956 ath_tx_tid_cleanup(sc, an, i, &bf_cq); 5957 /* 5958 * Unpause the TID if no cleanup is required. 5959 */ 5960 if (! tid->cleanup_inprogress) 5961 ath_tx_tid_resume(sc, tid); 5962 } 5963 } 5964 ATH_TX_UNLOCK(sc); 5965 5966 /* Handle completing frames and fail them */ 5967 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 5968 TAILQ_REMOVE(&bf_cq, bf, bf_list); 5969 ath_tx_default_comp(sc, bf, 1); 5970 } 5971 } 5972 5973 /* 5974 * Note: net80211 bar_timeout() doesn't call this function on BAR failure; 5975 * it simply tears down the aggregation session. Ew. 5976 * 5977 * It however will call ieee80211_ampdu_stop() which will call 5978 * ic->ic_addba_stop(). 5979 * 5980 * XXX This uses a hard-coded max BAR count value; the whole 5981 * XXX BAR TX success or failure should be better handled! 5982 */ 5983 void 5984 ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5985 int status) 5986 { 5987 struct ath_softc *sc = ni->ni_ic->ic_softc; 5988 int tid = tap->txa_tid; 5989 struct ath_node *an = ATH_NODE(ni); 5990 struct ath_tid *atid = &an->an_tid[tid]; 5991 int attempts = tap->txa_attempts; 5992 int old_txa_start; 5993 5994 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 5995 "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n", 5996 __func__, 5997 ni->ni_macaddr, 5998 ":", 5999 tap->txa_tid, 6000 atid->tid, 6001 status, 6002 attempts, 6003 tap->txa_start, 6004 tap->txa_seqpending); 6005 6006 /* Note: This may update the BAW details */ 6007 /* 6008 * XXX What if this does slide the BAW along? We need to somehow 6009 * XXX either fix things when it does happen, or prevent the 6010 * XXX seqpending value to be anything other than exactly what 6011 * XXX the hell we want! 6012 * 6013 * XXX So for now, how I do this inside the TX lock for now 6014 * XXX and just correct it afterwards? The below condition should 6015 * XXX never happen and if it does I need to fix all kinds of things. 6016 */ 6017 ATH_TX_LOCK(sc); 6018 old_txa_start = tap->txa_start; 6019 sc->sc_bar_response(ni, tap, status); 6020 if (tap->txa_start != old_txa_start) { 6021 device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n", 6022 __func__, 6023 tid, 6024 tap->txa_start, 6025 old_txa_start); 6026 } 6027 tap->txa_start = old_txa_start; 6028 ATH_TX_UNLOCK(sc); 6029 6030 /* Unpause the TID */ 6031 /* 6032 * XXX if this is attempt=50, the TID will be downgraded 6033 * XXX to a non-aggregate session. So we must unpause the 6034 * XXX TID here or it'll never be done. 6035 * 6036 * Also, don't call it if bar_tx/bar_wait are 0; something 6037 * has beaten us to the punch? (XXX figure out what?) 6038 */ 6039 if (status == 0 || attempts == 50) { 6040 ATH_TX_LOCK(sc); 6041 if (atid->bar_tx == 0 || atid->bar_wait == 0) 6042 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 6043 "%s: huh? bar_tx=%d, bar_wait=%d\n", 6044 __func__, 6045 atid->bar_tx, atid->bar_wait); 6046 else 6047 ath_tx_tid_bar_unsuspend(sc, atid); 6048 ATH_TX_UNLOCK(sc); 6049 } 6050 } 6051 6052 /* 6053 * This is called whenever the pending ADDBA request times out. 6054 * Unpause and reschedule the TID. 6055 */ 6056 void 6057 ath_addba_response_timeout(struct ieee80211_node *ni, 6058 struct ieee80211_tx_ampdu *tap) 6059 { 6060 struct ath_softc *sc = ni->ni_ic->ic_softc; 6061 int tid = tap->txa_tid; 6062 struct ath_node *an = ATH_NODE(ni); 6063 struct ath_tid *atid = &an->an_tid[tid]; 6064 6065 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 6066 "%s: %6D: TID=%d, called; resuming\n", 6067 __func__, 6068 ni->ni_macaddr, 6069 ":", 6070 tid); 6071 6072 ATH_TX_LOCK(sc); 6073 atid->addba_tx_pending = 0; 6074 ATH_TX_UNLOCK(sc); 6075 6076 /* Note: This updates the aggregate state to (again) pending */ 6077 sc->sc_addba_response_timeout(ni, tap); 6078 6079 /* Unpause the TID; which reschedules it */ 6080 ATH_TX_LOCK(sc); 6081 ath_tx_tid_resume(sc, atid); 6082 ATH_TX_UNLOCK(sc); 6083 } 6084 6085 /* 6086 * Check if a node is asleep or not. 6087 */ 6088 int 6089 ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an) 6090 { 6091 6092 ATH_TX_LOCK_ASSERT(sc); 6093 6094 return (an->an_is_powersave); 6095 } 6096 6097 /* 6098 * Mark a node as currently "in powersaving." 6099 * This suspends all traffic on the node. 6100 * 6101 * This must be called with the node/tx locks free. 6102 * 6103 * XXX TODO: the locking silliness below is due to how the node 6104 * locking currently works. Right now, the node lock is grabbed 6105 * to do rate control lookups and these are done with the TX 6106 * queue lock held. This means the node lock can't be grabbed 6107 * first here or a LOR will occur. 6108 * 6109 * Eventually (hopefully!) the TX path code will only grab 6110 * the TXQ lock when transmitting and the ath_node lock when 6111 * doing node/TID operations. There are other complications - 6112 * the sched/unsched operations involve walking the per-txq 6113 * 'active tid' list and this requires both locks to be held. 6114 */ 6115 void 6116 ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an) 6117 { 6118 struct ath_tid *atid; 6119 struct ath_txq *txq; 6120 int tid; 6121 6122 ATH_TX_UNLOCK_ASSERT(sc); 6123 6124 /* Suspend all traffic on the node */ 6125 ATH_TX_LOCK(sc); 6126 6127 if (an->an_is_powersave) { 6128 DPRINTF(sc, ATH_DEBUG_XMIT, 6129 "%s: %6D: node was already asleep!\n", 6130 __func__, an->an_node.ni_macaddr, ":"); 6131 ATH_TX_UNLOCK(sc); 6132 return; 6133 } 6134 6135 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 6136 atid = &an->an_tid[tid]; 6137 txq = sc->sc_ac2q[atid->ac]; 6138 6139 ath_tx_tid_pause(sc, atid); 6140 } 6141 6142 /* Mark node as in powersaving */ 6143 an->an_is_powersave = 1; 6144 6145 ATH_TX_UNLOCK(sc); 6146 } 6147 6148 /* 6149 * Mark a node as currently "awake." 6150 * This resumes all traffic to the node. 6151 */ 6152 void 6153 ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an) 6154 { 6155 struct ath_tid *atid; 6156 struct ath_txq *txq; 6157 int tid; 6158 6159 ATH_TX_UNLOCK_ASSERT(sc); 6160 6161 ATH_TX_LOCK(sc); 6162 6163 /* !? */ 6164 if (an->an_is_powersave == 0) { 6165 ATH_TX_UNLOCK(sc); 6166 DPRINTF(sc, ATH_DEBUG_XMIT, 6167 "%s: an=%p: node was already awake\n", 6168 __func__, an); 6169 return; 6170 } 6171 6172 /* Mark node as awake */ 6173 an->an_is_powersave = 0; 6174 /* 6175 * Clear any pending leaked frame requests 6176 */ 6177 an->an_leak_count = 0; 6178 6179 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 6180 atid = &an->an_tid[tid]; 6181 txq = sc->sc_ac2q[atid->ac]; 6182 6183 ath_tx_tid_resume(sc, atid); 6184 } 6185 ATH_TX_UNLOCK(sc); 6186 } 6187 6188 static int 6189 ath_legacy_dma_txsetup(struct ath_softc *sc) 6190 { 6191 6192 /* nothing new needed */ 6193 return (0); 6194 } 6195 6196 static int 6197 ath_legacy_dma_txteardown(struct ath_softc *sc) 6198 { 6199 6200 /* nothing new needed */ 6201 return (0); 6202 } 6203 6204 void 6205 ath_xmit_setup_legacy(struct ath_softc *sc) 6206 { 6207 /* 6208 * For now, just set the descriptor length to sizeof(ath_desc); 6209 * worry about extracting the real length out of the HAL later. 6210 */ 6211 sc->sc_tx_desclen = sizeof(struct ath_desc); 6212 sc->sc_tx_statuslen = sizeof(struct ath_desc); 6213 sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */ 6214 6215 sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup; 6216 sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown; 6217 sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func; 6218 6219 sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart; 6220 sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff; 6221 6222 sc->sc_tx.xmit_drain = ath_legacy_tx_drain; 6223 } 6224