1 /*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 13 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 14 * redistribution must be conditioned upon including a substantially 15 * similar Disclaimer requirement for further binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGES. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * Driver for the Atheros Wireless LAN controller. 36 * 37 * This software is derived from work of Atsushi Onoe; his contribution 38 * is greatly appreciated. 39 */ 40 41 #include "opt_inet.h" 42 #include "opt_ath.h" 43 #include "opt_wlan.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/sysctl.h> 48 #include <sys/mbuf.h> 49 #include <sys/malloc.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/kernel.h> 53 #include <sys/socket.h> 54 #include <sys/sockio.h> 55 #include <sys/errno.h> 56 #include <sys/callout.h> 57 #include <sys/bus.h> 58 #include <sys/endian.h> 59 #include <sys/kthread.h> 60 #include <sys/taskqueue.h> 61 #include <sys/priv.h> 62 #include <sys/ktr.h> 63 64 #include <machine/bus.h> 65 66 #include <net/if.h> 67 #include <net/if_var.h> 68 #include <net/if_dl.h> 69 #include <net/if_media.h> 70 #include <net/if_types.h> 71 #include <net/if_arp.h> 72 #include <net/ethernet.h> 73 #include <net/if_llc.h> 74 75 #include <net80211/ieee80211_var.h> 76 #include <net80211/ieee80211_regdomain.h> 77 #ifdef IEEE80211_SUPPORT_SUPERG 78 #include <net80211/ieee80211_superg.h> 79 #endif 80 #ifdef IEEE80211_SUPPORT_TDMA 81 #include <net80211/ieee80211_tdma.h> 82 #endif 83 #include <net80211/ieee80211_ht.h> 84 85 #include <net/bpf.h> 86 87 #ifdef INET 88 #include <netinet/in.h> 89 #include <netinet/if_ether.h> 90 #endif 91 92 #include <dev/ath/if_athvar.h> 93 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 94 #include <dev/ath/ath_hal/ah_diagcodes.h> 95 96 #include <dev/ath/if_ath_debug.h> 97 98 #ifdef ATH_TX99_DIAG 99 #include <dev/ath/ath_tx99/ath_tx99.h> 100 #endif 101 102 #include <dev/ath/if_ath_misc.h> 103 #include <dev/ath/if_ath_tx.h> 104 #include <dev/ath/if_ath_tx_ht.h> 105 106 #ifdef ATH_DEBUG_ALQ 107 #include <dev/ath/if_ath_alq.h> 108 #endif 109 110 /* 111 * How many retries to perform in software 112 */ 113 #define SWMAX_RETRIES 10 114 115 /* 116 * What queue to throw the non-QoS TID traffic into 117 */ 118 #define ATH_NONQOS_TID_AC WME_AC_VO 119 120 #if 0 121 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an); 122 #endif 123 static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, 124 int tid); 125 static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, 126 int tid); 127 static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc, 128 struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0); 129 static int ath_tx_action_frame_override_queue(struct ath_softc *sc, 130 struct ieee80211_node *ni, struct mbuf *m0, int *tid); 131 static struct ath_buf * 132 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 133 struct ath_tid *tid, struct ath_buf *bf); 134 135 #ifdef ATH_DEBUG_ALQ 136 void 137 ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first) 138 { 139 struct ath_buf *bf; 140 int i, n; 141 const char *ds; 142 143 /* XXX we should skip out early if debugging isn't enabled! */ 144 bf = bf_first; 145 146 while (bf != NULL) { 147 /* XXX should ensure bf_nseg > 0! */ 148 if (bf->bf_nseg == 0) 149 break; 150 n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1; 151 for (i = 0, ds = (const char *) bf->bf_desc; 152 i < n; 153 i++, ds += sc->sc_tx_desclen) { 154 if_ath_alq_post(&sc->sc_alq, 155 ATH_ALQ_EDMA_TXDESC, 156 sc->sc_tx_desclen, 157 ds); 158 } 159 bf = bf->bf_next; 160 } 161 } 162 #endif /* ATH_DEBUG_ALQ */ 163 164 /* 165 * Whether to use the 11n rate scenario functions or not 166 */ 167 static inline int 168 ath_tx_is_11n(struct ath_softc *sc) 169 { 170 return ((sc->sc_ah->ah_magic == 0x20065416) || 171 (sc->sc_ah->ah_magic == 0x19741014)); 172 } 173 174 /* 175 * Obtain the current TID from the given frame. 176 * 177 * Non-QoS frames get mapped to a TID so frames consistently 178 * go on a sensible queue. 179 */ 180 static int 181 ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0) 182 { 183 const struct ieee80211_frame *wh; 184 185 wh = mtod(m0, const struct ieee80211_frame *); 186 187 /* Non-QoS: map frame to a TID queue for software queueing */ 188 if (! IEEE80211_QOS_HAS_SEQ(wh)) 189 return (WME_AC_TO_TID(M_WME_GETAC(m0))); 190 191 /* QoS - fetch the TID from the header, ignore mbuf WME */ 192 return (ieee80211_gettid(wh)); 193 } 194 195 static void 196 ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) 197 { 198 struct ieee80211_frame *wh; 199 200 wh = mtod(bf->bf_m, struct ieee80211_frame *); 201 /* Only update/resync if needed */ 202 if (bf->bf_state.bfs_isretried == 0) { 203 wh->i_fc[1] |= IEEE80211_FC1_RETRY; 204 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 205 BUS_DMASYNC_PREWRITE); 206 } 207 bf->bf_state.bfs_isretried = 1; 208 bf->bf_state.bfs_retries ++; 209 } 210 211 /* 212 * Determine what the correct AC queue for the given frame 213 * should be. 214 * 215 * For QoS frames, obey the TID. That way things like 216 * management frames that are related to a given TID 217 * are thus serialised with the rest of the TID traffic, 218 * regardless of net80211 overriding priority. 219 * 220 * For non-QoS frames, return the mbuf WMI priority. 221 * 222 * This has implications that higher priority non-QoS traffic 223 * may end up being scheduled before other non-QoS traffic, 224 * leading to out-of-sequence packets being emitted. 225 * 226 * (It'd be nice to log/count this so we can see if it 227 * really is a problem.) 228 * 229 * TODO: maybe we should throw multicast traffic, QoS or 230 * otherwise, into a separate TX queue? 231 */ 232 static int 233 ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0) 234 { 235 const struct ieee80211_frame *wh; 236 237 wh = mtod(m0, const struct ieee80211_frame *); 238 239 /* 240 * QoS data frame (sequence number or otherwise) - 241 * return hardware queue mapping for the underlying 242 * TID. 243 */ 244 if (IEEE80211_QOS_HAS_SEQ(wh)) 245 return TID_TO_WME_AC(ieee80211_gettid(wh)); 246 247 /* 248 * Otherwise - return mbuf QoS pri. 249 */ 250 return (M_WME_GETAC(m0)); 251 } 252 253 void 254 ath_txfrag_cleanup(struct ath_softc *sc, 255 ath_bufhead *frags, struct ieee80211_node *ni) 256 { 257 struct ath_buf *bf, *next; 258 259 ATH_TXBUF_LOCK_ASSERT(sc); 260 261 TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) { 262 /* NB: bf assumed clean */ 263 TAILQ_REMOVE(frags, bf, bf_list); 264 ath_returnbuf_head(sc, bf); 265 ieee80211_node_decref(ni); 266 } 267 } 268 269 /* 270 * Setup xmit of a fragmented frame. Allocate a buffer 271 * for each frag and bump the node reference count to 272 * reflect the held reference to be setup by ath_tx_start. 273 */ 274 int 275 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, 276 struct mbuf *m0, struct ieee80211_node *ni) 277 { 278 struct mbuf *m; 279 struct ath_buf *bf; 280 281 ATH_TXBUF_LOCK(sc); 282 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { 283 /* XXX non-management? */ 284 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 285 if (bf == NULL) { /* out of buffers, cleanup */ 286 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n", 287 __func__); 288 ath_txfrag_cleanup(sc, frags, ni); 289 break; 290 } 291 ieee80211_node_incref(ni); 292 TAILQ_INSERT_TAIL(frags, bf, bf_list); 293 } 294 ATH_TXBUF_UNLOCK(sc); 295 296 return !TAILQ_EMPTY(frags); 297 } 298 299 static int 300 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) 301 { 302 struct mbuf *m; 303 int error; 304 305 /* 306 * Load the DMA map so any coalescing is done. This 307 * also calculates the number of descriptors we need. 308 */ 309 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 310 bf->bf_segs, &bf->bf_nseg, 311 BUS_DMA_NOWAIT); 312 if (error == EFBIG) { 313 /* XXX packet requires too many descriptors */ 314 bf->bf_nseg = ATH_MAX_SCATTER + 1; 315 } else if (error != 0) { 316 sc->sc_stats.ast_tx_busdma++; 317 ieee80211_free_mbuf(m0); 318 return error; 319 } 320 /* 321 * Discard null packets and check for packets that 322 * require too many TX descriptors. We try to convert 323 * the latter to a cluster. 324 */ 325 if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */ 326 sc->sc_stats.ast_tx_linear++; 327 m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER); 328 if (m == NULL) { 329 ieee80211_free_mbuf(m0); 330 sc->sc_stats.ast_tx_nombuf++; 331 return ENOMEM; 332 } 333 m0 = m; 334 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 335 bf->bf_segs, &bf->bf_nseg, 336 BUS_DMA_NOWAIT); 337 if (error != 0) { 338 sc->sc_stats.ast_tx_busdma++; 339 ieee80211_free_mbuf(m0); 340 return error; 341 } 342 KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER, 343 ("too many segments after defrag; nseg %u", bf->bf_nseg)); 344 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 345 sc->sc_stats.ast_tx_nodata++; 346 ieee80211_free_mbuf(m0); 347 return EIO; 348 } 349 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", 350 __func__, m0, m0->m_pkthdr.len); 351 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 352 bf->bf_m = m0; 353 354 return 0; 355 } 356 357 /* 358 * Chain together segments+descriptors for a frame - 11n or otherwise. 359 * 360 * For aggregates, this is called on each frame in the aggregate. 361 */ 362 static void 363 ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0, 364 struct ath_buf *bf, int is_aggr, int is_first_subframe, 365 int is_last_subframe) 366 { 367 struct ath_hal *ah = sc->sc_ah; 368 char *ds; 369 int i, bp, dsp; 370 HAL_DMA_ADDR bufAddrList[4]; 371 uint32_t segLenList[4]; 372 int numTxMaps = 1; 373 int isFirstDesc = 1; 374 375 /* 376 * XXX There's txdma and txdma_mgmt; the descriptor 377 * sizes must match. 378 */ 379 struct ath_descdma *dd = &sc->sc_txdma; 380 381 /* 382 * Fillin the remainder of the descriptor info. 383 */ 384 385 /* 386 * We need the number of TX data pointers in each descriptor. 387 * EDMA and later chips support 4 TX buffers per descriptor; 388 * previous chips just support one. 389 */ 390 numTxMaps = sc->sc_tx_nmaps; 391 392 /* 393 * For EDMA and later chips ensure the TX map is fully populated 394 * before advancing to the next descriptor. 395 */ 396 ds = (char *) bf->bf_desc; 397 bp = dsp = 0; 398 bzero(bufAddrList, sizeof(bufAddrList)); 399 bzero(segLenList, sizeof(segLenList)); 400 for (i = 0; i < bf->bf_nseg; i++) { 401 bufAddrList[bp] = bf->bf_segs[i].ds_addr; 402 segLenList[bp] = bf->bf_segs[i].ds_len; 403 bp++; 404 405 /* 406 * Go to the next segment if this isn't the last segment 407 * and there's space in the current TX map. 408 */ 409 if ((i != bf->bf_nseg - 1) && (bp < numTxMaps)) 410 continue; 411 412 /* 413 * Last segment or we're out of buffer pointers. 414 */ 415 bp = 0; 416 417 if (i == bf->bf_nseg - 1) 418 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0); 419 else 420 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 421 bf->bf_daddr + dd->dd_descsize * (dsp + 1)); 422 423 /* 424 * XXX This assumes that bfs_txq is the actual destination 425 * hardware queue at this point. It may not have been 426 * assigned, it may actually be pointing to the multicast 427 * software TXQ id. These must be fixed! 428 */ 429 ath_hal_filltxdesc(ah, (struct ath_desc *) ds 430 , bufAddrList 431 , segLenList 432 , bf->bf_descid /* XXX desc id */ 433 , bf->bf_state.bfs_tx_queue 434 , isFirstDesc /* first segment */ 435 , i == bf->bf_nseg - 1 /* last segment */ 436 , (struct ath_desc *) ds0 /* first descriptor */ 437 ); 438 439 /* 440 * Make sure the 11n aggregate fields are cleared. 441 * 442 * XXX TODO: this doesn't need to be called for 443 * aggregate frames; as it'll be called on all 444 * sub-frames. Since the descriptors are in 445 * non-cacheable memory, this leads to some 446 * rather slow writes on MIPS/ARM platforms. 447 */ 448 if (ath_tx_is_11n(sc)) 449 ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds); 450 451 /* 452 * If 11n is enabled, set it up as if it's an aggregate 453 * frame. 454 */ 455 if (is_last_subframe) { 456 ath_hal_set11n_aggr_last(sc->sc_ah, 457 (struct ath_desc *) ds); 458 } else if (is_aggr) { 459 /* 460 * This clears the aggrlen field; so 461 * the caller needs to call set_aggr_first()! 462 * 463 * XXX TODO: don't call this for the first 464 * descriptor in the first frame in an 465 * aggregate! 466 */ 467 ath_hal_set11n_aggr_middle(sc->sc_ah, 468 (struct ath_desc *) ds, 469 bf->bf_state.bfs_ndelim); 470 } 471 isFirstDesc = 0; 472 bf->bf_lastds = (struct ath_desc *) ds; 473 474 /* 475 * Don't forget to skip to the next descriptor. 476 */ 477 ds += sc->sc_tx_desclen; 478 dsp++; 479 480 /* 481 * .. and don't forget to blank these out! 482 */ 483 bzero(bufAddrList, sizeof(bufAddrList)); 484 bzero(segLenList, sizeof(segLenList)); 485 } 486 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 487 } 488 489 /* 490 * Set the rate control fields in the given descriptor based on 491 * the bf_state fields and node state. 492 * 493 * The bfs fields should already be set with the relevant rate 494 * control information, including whether MRR is to be enabled. 495 * 496 * Since the FreeBSD HAL currently sets up the first TX rate 497 * in ath_hal_setuptxdesc(), this will setup the MRR 498 * conditionally for the pre-11n chips, and call ath_buf_set_rate 499 * unconditionally for 11n chips. These require the 11n rate 500 * scenario to be set if MCS rates are enabled, so it's easier 501 * to just always call it. The caller can then only set rates 2, 3 502 * and 4 if multi-rate retry is needed. 503 */ 504 static void 505 ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 506 struct ath_buf *bf) 507 { 508 struct ath_rc_series *rc = bf->bf_state.bfs_rc; 509 510 /* If mrr is disabled, blank tries 1, 2, 3 */ 511 if (! bf->bf_state.bfs_ismrr) 512 rc[1].tries = rc[2].tries = rc[3].tries = 0; 513 514 #if 0 515 /* 516 * If NOACK is set, just set ntries=1. 517 */ 518 else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) { 519 rc[1].tries = rc[2].tries = rc[3].tries = 0; 520 rc[0].tries = 1; 521 } 522 #endif 523 524 /* 525 * Always call - that way a retried descriptor will 526 * have the MRR fields overwritten. 527 * 528 * XXX TODO: see if this is really needed - setting up 529 * the first descriptor should set the MRR fields to 0 530 * for us anyway. 531 */ 532 if (ath_tx_is_11n(sc)) { 533 ath_buf_set_rate(sc, ni, bf); 534 } else { 535 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc 536 , rc[1].ratecode, rc[1].tries 537 , rc[2].ratecode, rc[2].tries 538 , rc[3].ratecode, rc[3].tries 539 ); 540 } 541 } 542 543 /* 544 * Setup segments+descriptors for an 11n aggregate. 545 * bf_first is the first buffer in the aggregate. 546 * The descriptor list must already been linked together using 547 * bf->bf_next. 548 */ 549 static void 550 ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first) 551 { 552 struct ath_buf *bf, *bf_prev = NULL; 553 struct ath_desc *ds0 = bf_first->bf_desc; 554 555 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n", 556 __func__, bf_first->bf_state.bfs_nframes, 557 bf_first->bf_state.bfs_al); 558 559 bf = bf_first; 560 561 if (bf->bf_state.bfs_txrate0 == 0) 562 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n", 563 __func__, bf, 0); 564 if (bf->bf_state.bfs_rc[0].ratecode == 0) 565 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n", 566 __func__, bf, 0); 567 568 /* 569 * Setup all descriptors of all subframes - this will 570 * call ath_hal_set11naggrmiddle() on every frame. 571 */ 572 while (bf != NULL) { 573 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 574 "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n", 575 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen, 576 SEQNO(bf->bf_state.bfs_seqno)); 577 578 /* 579 * Setup the initial fields for the first descriptor - all 580 * the non-11n specific stuff. 581 */ 582 ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc 583 , bf->bf_state.bfs_pktlen /* packet length */ 584 , bf->bf_state.bfs_hdrlen /* header length */ 585 , bf->bf_state.bfs_atype /* Atheros packet type */ 586 , bf->bf_state.bfs_txpower /* txpower */ 587 , bf->bf_state.bfs_txrate0 588 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 589 , bf->bf_state.bfs_keyix /* key cache index */ 590 , bf->bf_state.bfs_txantenna /* antenna mode */ 591 , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */ 592 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 593 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 594 ); 595 596 /* 597 * First descriptor? Setup the rate control and initial 598 * aggregate header information. 599 */ 600 if (bf == bf_first) { 601 /* 602 * setup first desc with rate and aggr info 603 */ 604 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 605 } 606 607 /* 608 * Setup the descriptors for a multi-descriptor frame. 609 * This is both aggregate and non-aggregate aware. 610 */ 611 ath_tx_chaindesclist(sc, ds0, bf, 612 1, /* is_aggr */ 613 !! (bf == bf_first), /* is_first_subframe */ 614 !! (bf->bf_next == NULL) /* is_last_subframe */ 615 ); 616 617 if (bf == bf_first) { 618 /* 619 * Initialise the first 11n aggregate with the 620 * aggregate length and aggregate enable bits. 621 */ 622 ath_hal_set11n_aggr_first(sc->sc_ah, 623 ds0, 624 bf->bf_state.bfs_al, 625 bf->bf_state.bfs_ndelim); 626 } 627 628 /* 629 * Link the last descriptor of the previous frame 630 * to the beginning descriptor of this frame. 631 */ 632 if (bf_prev != NULL) 633 ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds, 634 bf->bf_daddr); 635 636 /* Save a copy so we can link the next descriptor in */ 637 bf_prev = bf; 638 bf = bf->bf_next; 639 } 640 641 /* 642 * Set the first descriptor bf_lastds field to point to 643 * the last descriptor in the last subframe, that's where 644 * the status update will occur. 645 */ 646 bf_first->bf_lastds = bf_prev->bf_lastds; 647 648 /* 649 * And bf_last in the first descriptor points to the end of 650 * the aggregate list. 651 */ 652 bf_first->bf_last = bf_prev; 653 654 /* 655 * For non-AR9300 NICs, which require the rate control 656 * in the final descriptor - let's set that up now. 657 * 658 * This is because the filltxdesc() HAL call doesn't 659 * populate the last segment with rate control information 660 * if firstSeg is also true. For non-aggregate frames 661 * that is fine, as the first frame already has rate control 662 * info. But if the last frame in an aggregate has one 663 * descriptor, both firstseg and lastseg will be true and 664 * the rate info isn't copied. 665 * 666 * This is inefficient on MIPS/ARM platforms that have 667 * non-cachable memory for TX descriptors, but we'll just 668 * make do for now. 669 * 670 * As to why the rate table is stashed in the last descriptor 671 * rather than the first descriptor? Because proctxdesc() 672 * is called on the final descriptor in an MPDU or A-MPDU - 673 * ie, the one that gets updated by the hardware upon 674 * completion. That way proctxdesc() doesn't need to know 675 * about the first _and_ last TX descriptor. 676 */ 677 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0); 678 679 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__); 680 } 681 682 /* 683 * Hand-off a frame to the multicast TX queue. 684 * 685 * This is a software TXQ which will be appended to the CAB queue 686 * during the beacon setup code. 687 * 688 * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID 689 * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated 690 * with the actual hardware txq, or all of this will fall apart. 691 * 692 * XXX It may not be a bad idea to just stuff the QCU ID into bf_state 693 * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated 694 * correctly. 695 */ 696 static void 697 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, 698 struct ath_buf *bf) 699 { 700 ATH_TX_LOCK_ASSERT(sc); 701 702 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 703 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 704 705 /* 706 * Ensure that the tx queue is the cabq, so things get 707 * mapped correctly. 708 */ 709 if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) { 710 DPRINTF(sc, ATH_DEBUG_XMIT, 711 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", 712 __func__, bf, bf->bf_state.bfs_tx_queue, 713 txq->axq_qnum); 714 } 715 716 ATH_TXQ_LOCK(txq); 717 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) { 718 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s); 719 struct ieee80211_frame *wh; 720 721 /* mark previous frame */ 722 wh = mtod(bf_last->bf_m, struct ieee80211_frame *); 723 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 724 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, 725 BUS_DMASYNC_PREWRITE); 726 727 /* link descriptor */ 728 ath_hal_settxdesclink(sc->sc_ah, 729 bf_last->bf_lastds, 730 bf->bf_daddr); 731 } 732 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 733 ATH_TXQ_UNLOCK(txq); 734 } 735 736 /* 737 * Hand-off packet to a hardware queue. 738 */ 739 static void 740 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, 741 struct ath_buf *bf) 742 { 743 struct ath_hal *ah = sc->sc_ah; 744 struct ath_buf *bf_first; 745 746 /* 747 * Insert the frame on the outbound list and pass it on 748 * to the hardware. Multicast frames buffered for power 749 * save stations and transmit from the CAB queue are stored 750 * on a s/w only queue and loaded on to the CAB queue in 751 * the SWBA handler since frames only go out on DTIM and 752 * to avoid possible races. 753 */ 754 ATH_TX_LOCK_ASSERT(sc); 755 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 756 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 757 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ, 758 ("ath_tx_handoff_hw called for mcast queue")); 759 760 /* 761 * XXX We should instead just verify that sc_txstart_cnt 762 * or ath_txproc_cnt > 0. That would mean that 763 * the reset is going to be waiting for us to complete. 764 */ 765 if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) { 766 device_printf(sc->sc_dev, 767 "%s: TX dispatch without holding txcount/txstart refcnt!\n", 768 __func__); 769 } 770 771 /* 772 * XXX .. this is going to cause the hardware to get upset; 773 * so we really should find some way to drop or queue 774 * things. 775 */ 776 777 ATH_TXQ_LOCK(txq); 778 779 /* 780 * XXX TODO: if there's a holdingbf, then 781 * ATH_TXQ_PUTRUNNING should be clear. 782 * 783 * If there is a holdingbf and the list is empty, 784 * then axq_link should be pointing to the holdingbf. 785 * 786 * Otherwise it should point to the last descriptor 787 * in the last ath_buf. 788 * 789 * In any case, we should really ensure that we 790 * update the previous descriptor link pointer to 791 * this descriptor, regardless of all of the above state. 792 * 793 * For now this is captured by having axq_link point 794 * to either the holdingbf (if the TXQ list is empty) 795 * or the end of the list (if the TXQ list isn't empty.) 796 * I'd rather just kill axq_link here and do it as above. 797 */ 798 799 /* 800 * Append the frame to the TX queue. 801 */ 802 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 803 ATH_KTR(sc, ATH_KTR_TX, 3, 804 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p " 805 "depth=%d", 806 txq->axq_qnum, 807 bf, 808 txq->axq_depth); 809 810 /* 811 * If there's a link pointer, update it. 812 * 813 * XXX we should replace this with the above logic, just 814 * to kill axq_link with fire. 815 */ 816 if (txq->axq_link != NULL) { 817 *txq->axq_link = bf->bf_daddr; 818 DPRINTF(sc, ATH_DEBUG_XMIT, 819 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 820 txq->axq_qnum, txq->axq_link, 821 (caddr_t)bf->bf_daddr, bf->bf_desc, 822 txq->axq_depth); 823 ATH_KTR(sc, ATH_KTR_TX, 5, 824 "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) " 825 "lastds=%d", 826 txq->axq_qnum, txq->axq_link, 827 (caddr_t)bf->bf_daddr, bf->bf_desc, 828 bf->bf_lastds); 829 } 830 831 /* 832 * If we've not pushed anything into the hardware yet, 833 * push the head of the queue into the TxDP. 834 * 835 * Once we've started DMA, there's no guarantee that 836 * updating the TxDP with a new value will actually work. 837 * So we just don't do that - if we hit the end of the list, 838 * we keep that buffer around (the "holding buffer") and 839 * re-start DMA by updating the link pointer of _that_ 840 * descriptor and then restart DMA. 841 */ 842 if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) { 843 bf_first = TAILQ_FIRST(&txq->axq_q); 844 txq->axq_flags |= ATH_TXQ_PUTRUNNING; 845 ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr); 846 DPRINTF(sc, ATH_DEBUG_XMIT, 847 "%s: TXDP[%u] = %p (%p) depth %d\n", 848 __func__, txq->axq_qnum, 849 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, 850 txq->axq_depth); 851 ATH_KTR(sc, ATH_KTR_TX, 5, 852 "ath_tx_handoff: TXDP[%u] = %p (%p) " 853 "lastds=%p depth %d", 854 txq->axq_qnum, 855 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, 856 bf_first->bf_lastds, 857 txq->axq_depth); 858 } 859 860 /* 861 * Ensure that the bf TXQ matches this TXQ, so later 862 * checking and holding buffer manipulation is sane. 863 */ 864 if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) { 865 DPRINTF(sc, ATH_DEBUG_XMIT, 866 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", 867 __func__, bf, bf->bf_state.bfs_tx_queue, 868 txq->axq_qnum); 869 } 870 871 /* 872 * Track aggregate queue depth. 873 */ 874 if (bf->bf_state.bfs_aggr) 875 txq->axq_aggr_depth++; 876 877 /* 878 * Update the link pointer. 879 */ 880 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link); 881 882 /* 883 * Start DMA. 884 * 885 * If we wrote a TxDP above, DMA will start from here. 886 * 887 * If DMA is running, it'll do nothing. 888 * 889 * If the DMA engine hit the end of the QCU list (ie LINK=NULL, 890 * or VEOL) then it stops at the last transmitted write. 891 * We then append a new frame by updating the link pointer 892 * in that descriptor and then kick TxE here; it will re-read 893 * that last descriptor and find the new descriptor to transmit. 894 * 895 * This is why we keep the holding descriptor around. 896 */ 897 ath_hal_txstart(ah, txq->axq_qnum); 898 ATH_TXQ_UNLOCK(txq); 899 ATH_KTR(sc, ATH_KTR_TX, 1, 900 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum); 901 } 902 903 /* 904 * Restart TX DMA for the given TXQ. 905 * 906 * This must be called whether the queue is empty or not. 907 */ 908 static void 909 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq) 910 { 911 struct ath_buf *bf, *bf_last; 912 913 ATH_TXQ_LOCK_ASSERT(txq); 914 915 /* XXX make this ATH_TXQ_FIRST */ 916 bf = TAILQ_FIRST(&txq->axq_q); 917 bf_last = ATH_TXQ_LAST(txq, axq_q_s); 918 919 if (bf == NULL) 920 return; 921 922 DPRINTF(sc, ATH_DEBUG_RESET, 923 "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n", 924 __func__, 925 txq->axq_qnum, 926 bf, 927 bf_last, 928 (uint32_t) bf->bf_daddr); 929 930 #ifdef ATH_DEBUG 931 if (sc->sc_debug & ATH_DEBUG_RESET) 932 ath_tx_dump(sc, txq); 933 #endif 934 935 /* 936 * This is called from a restart, so DMA is known to be 937 * completely stopped. 938 */ 939 KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)), 940 ("%s: Q%d: called with PUTRUNNING=1\n", 941 __func__, 942 txq->axq_qnum)); 943 944 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); 945 txq->axq_flags |= ATH_TXQ_PUTRUNNING; 946 947 ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds, 948 &txq->axq_link); 949 ath_hal_txstart(sc->sc_ah, txq->axq_qnum); 950 } 951 952 /* 953 * Hand off a packet to the hardware (or mcast queue.) 954 * 955 * The relevant hardware txq should be locked. 956 */ 957 static void 958 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, 959 struct ath_buf *bf) 960 { 961 ATH_TX_LOCK_ASSERT(sc); 962 963 #ifdef ATH_DEBUG_ALQ 964 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 965 ath_tx_alq_post(sc, bf); 966 #endif 967 968 if (txq->axq_qnum == ATH_TXQ_SWQ) 969 ath_tx_handoff_mcast(sc, txq, bf); 970 else 971 ath_tx_handoff_hw(sc, txq, bf); 972 } 973 974 static int 975 ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni, 976 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, 977 int *keyix) 978 { 979 DPRINTF(sc, ATH_DEBUG_XMIT, 980 "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n", 981 __func__, 982 *hdrlen, 983 *pktlen, 984 isfrag, 985 iswep, 986 m0); 987 988 if (iswep) { 989 const struct ieee80211_cipher *cip; 990 struct ieee80211_key *k; 991 992 /* 993 * Construct the 802.11 header+trailer for an encrypted 994 * frame. The only reason this can fail is because of an 995 * unknown or unsupported cipher/key type. 996 */ 997 k = ieee80211_crypto_encap(ni, m0); 998 if (k == NULL) { 999 /* 1000 * This can happen when the key is yanked after the 1001 * frame was queued. Just discard the frame; the 1002 * 802.11 layer counts failures and provides 1003 * debugging/diagnostics. 1004 */ 1005 return (0); 1006 } 1007 /* 1008 * Adjust the packet + header lengths for the crypto 1009 * additions and calculate the h/w key index. When 1010 * a s/w mic is done the frame will have had any mic 1011 * added to it prior to entry so m0->m_pkthdr.len will 1012 * account for it. Otherwise we need to add it to the 1013 * packet length. 1014 */ 1015 cip = k->wk_cipher; 1016 (*hdrlen) += cip->ic_header; 1017 (*pktlen) += cip->ic_header + cip->ic_trailer; 1018 /* NB: frags always have any TKIP MIC done in s/w */ 1019 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) 1020 (*pktlen) += cip->ic_miclen; 1021 (*keyix) = k->wk_keyix; 1022 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { 1023 /* 1024 * Use station key cache slot, if assigned. 1025 */ 1026 (*keyix) = ni->ni_ucastkey.wk_keyix; 1027 if ((*keyix) == IEEE80211_KEYIX_NONE) 1028 (*keyix) = HAL_TXKEYIX_INVALID; 1029 } else 1030 (*keyix) = HAL_TXKEYIX_INVALID; 1031 1032 return (1); 1033 } 1034 1035 /* 1036 * Calculate whether interoperability protection is required for 1037 * this frame. 1038 * 1039 * This requires the rate control information be filled in, 1040 * as the protection requirement depends upon the current 1041 * operating mode / PHY. 1042 */ 1043 static void 1044 ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf) 1045 { 1046 struct ieee80211_frame *wh; 1047 uint8_t rix; 1048 uint16_t flags; 1049 int shortPreamble; 1050 const HAL_RATE_TABLE *rt = sc->sc_currates; 1051 struct ieee80211com *ic = &sc->sc_ic; 1052 1053 flags = bf->bf_state.bfs_txflags; 1054 rix = bf->bf_state.bfs_rc[0].rix; 1055 shortPreamble = bf->bf_state.bfs_shpream; 1056 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1057 1058 /* Disable frame protection for TOA probe frames */ 1059 if (bf->bf_flags & ATH_BUF_TOA_PROBE) { 1060 /* XXX count */ 1061 flags &= ~(HAL_TXDESC_CTSENA | HAL_TXDESC_RTSENA); 1062 bf->bf_state.bfs_doprot = 0; 1063 goto finish; 1064 } 1065 1066 /* 1067 * If 802.11g protection is enabled, determine whether 1068 * to use RTS/CTS or just CTS. Note that this is only 1069 * done for OFDM unicast frames. 1070 */ 1071 if ((ic->ic_flags & IEEE80211_F_USEPROT) && 1072 rt->info[rix].phy == IEEE80211_T_OFDM && 1073 (flags & HAL_TXDESC_NOACK) == 0) { 1074 bf->bf_state.bfs_doprot = 1; 1075 /* XXX fragments must use CCK rates w/ protection */ 1076 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { 1077 flags |= HAL_TXDESC_RTSENA; 1078 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { 1079 flags |= HAL_TXDESC_CTSENA; 1080 } 1081 /* 1082 * For frags it would be desirable to use the 1083 * highest CCK rate for RTS/CTS. But stations 1084 * farther away may detect it at a lower CCK rate 1085 * so use the configured protection rate instead 1086 * (for now). 1087 */ 1088 sc->sc_stats.ast_tx_protect++; 1089 } 1090 1091 /* 1092 * If 11n protection is enabled and it's a HT frame, 1093 * enable RTS. 1094 * 1095 * XXX ic_htprotmode or ic_curhtprotmode? 1096 * XXX should it_htprotmode only matter if ic_curhtprotmode 1097 * XXX indicates it's not a HT pure environment? 1098 */ 1099 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && 1100 rt->info[rix].phy == IEEE80211_T_HT && 1101 (flags & HAL_TXDESC_NOACK) == 0) { 1102 flags |= HAL_TXDESC_RTSENA; 1103 sc->sc_stats.ast_tx_htprotect++; 1104 } 1105 1106 finish: 1107 bf->bf_state.bfs_txflags = flags; 1108 } 1109 1110 /* 1111 * Update the frame duration given the currently selected rate. 1112 * 1113 * This also updates the frame duration value, so it will require 1114 * a DMA flush. 1115 */ 1116 static void 1117 ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf) 1118 { 1119 struct ieee80211_frame *wh; 1120 uint8_t rix; 1121 uint16_t flags; 1122 int shortPreamble; 1123 struct ath_hal *ah = sc->sc_ah; 1124 const HAL_RATE_TABLE *rt = sc->sc_currates; 1125 int isfrag = bf->bf_m->m_flags & M_FRAG; 1126 1127 flags = bf->bf_state.bfs_txflags; 1128 rix = bf->bf_state.bfs_rc[0].rix; 1129 shortPreamble = bf->bf_state.bfs_shpream; 1130 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1131 1132 /* 1133 * Calculate duration. This logically belongs in the 802.11 1134 * layer but it lacks sufficient information to calculate it. 1135 */ 1136 if ((flags & HAL_TXDESC_NOACK) == 0 && 1137 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 1138 u_int16_t dur; 1139 if (shortPreamble) 1140 dur = rt->info[rix].spAckDuration; 1141 else 1142 dur = rt->info[rix].lpAckDuration; 1143 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 1144 dur += dur; /* additional SIFS+ACK */ 1145 /* 1146 * Include the size of next fragment so NAV is 1147 * updated properly. The last fragment uses only 1148 * the ACK duration 1149 * 1150 * XXX TODO: ensure that the rate lookup for each 1151 * fragment is the same as the rate used by the 1152 * first fragment! 1153 */ 1154 dur += ath_hal_computetxtime(ah, 1155 rt, 1156 bf->bf_nextfraglen, 1157 rix, shortPreamble, 1158 AH_TRUE); 1159 } 1160 if (isfrag) { 1161 /* 1162 * Force hardware to use computed duration for next 1163 * fragment by disabling multi-rate retry which updates 1164 * duration based on the multi-rate duration table. 1165 */ 1166 bf->bf_state.bfs_ismrr = 0; 1167 bf->bf_state.bfs_try0 = ATH_TXMGTTRY; 1168 /* XXX update bfs_rc[0].try? */ 1169 } 1170 1171 /* Update the duration field itself */ 1172 *(u_int16_t *)wh->i_dur = htole16(dur); 1173 } 1174 } 1175 1176 static uint8_t 1177 ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt, 1178 int cix, int shortPreamble) 1179 { 1180 uint8_t ctsrate; 1181 1182 /* 1183 * CTS transmit rate is derived from the transmit rate 1184 * by looking in the h/w rate table. We must also factor 1185 * in whether or not a short preamble is to be used. 1186 */ 1187 /* NB: cix is set above where RTS/CTS is enabled */ 1188 KASSERT(cix != 0xff, ("cix not setup")); 1189 ctsrate = rt->info[cix].rateCode; 1190 1191 /* XXX this should only matter for legacy rates */ 1192 if (shortPreamble) 1193 ctsrate |= rt->info[cix].shortPreamble; 1194 1195 return (ctsrate); 1196 } 1197 1198 /* 1199 * Calculate the RTS/CTS duration for legacy frames. 1200 */ 1201 static int 1202 ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix, 1203 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt, 1204 int flags) 1205 { 1206 int ctsduration = 0; 1207 1208 /* This mustn't be called for HT modes */ 1209 if (rt->info[cix].phy == IEEE80211_T_HT) { 1210 printf("%s: HT rate where it shouldn't be (0x%x)\n", 1211 __func__, rt->info[cix].rateCode); 1212 return (-1); 1213 } 1214 1215 /* 1216 * Compute the transmit duration based on the frame 1217 * size and the size of an ACK frame. We call into the 1218 * HAL to do the computation since it depends on the 1219 * characteristics of the actual PHY being used. 1220 * 1221 * NB: CTS is assumed the same size as an ACK so we can 1222 * use the precalculated ACK durations. 1223 */ 1224 if (shortPreamble) { 1225 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1226 ctsduration += rt->info[cix].spAckDuration; 1227 ctsduration += ath_hal_computetxtime(ah, 1228 rt, pktlen, rix, AH_TRUE, AH_TRUE); 1229 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1230 ctsduration += rt->info[rix].spAckDuration; 1231 } else { 1232 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1233 ctsduration += rt->info[cix].lpAckDuration; 1234 ctsduration += ath_hal_computetxtime(ah, 1235 rt, pktlen, rix, AH_FALSE, AH_TRUE); 1236 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1237 ctsduration += rt->info[rix].lpAckDuration; 1238 } 1239 1240 return (ctsduration); 1241 } 1242 1243 /* 1244 * Update the given ath_buf with updated rts/cts setup and duration 1245 * values. 1246 * 1247 * To support rate lookups for each software retry, the rts/cts rate 1248 * and cts duration must be re-calculated. 1249 * 1250 * This function assumes the RTS/CTS flags have been set as needed; 1251 * mrr has been disabled; and the rate control lookup has been done. 1252 * 1253 * XXX TODO: MRR need only be disabled for the pre-11n NICs. 1254 * XXX The 11n NICs support per-rate RTS/CTS configuration. 1255 */ 1256 static void 1257 ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf) 1258 { 1259 uint16_t ctsduration = 0; 1260 uint8_t ctsrate = 0; 1261 uint8_t rix = bf->bf_state.bfs_rc[0].rix; 1262 uint8_t cix = 0; 1263 const HAL_RATE_TABLE *rt = sc->sc_currates; 1264 1265 /* 1266 * No RTS/CTS enabled? Don't bother. 1267 */ 1268 if ((bf->bf_state.bfs_txflags & 1269 (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) { 1270 /* XXX is this really needed? */ 1271 bf->bf_state.bfs_ctsrate = 0; 1272 bf->bf_state.bfs_ctsduration = 0; 1273 return; 1274 } 1275 1276 /* 1277 * If protection is enabled, use the protection rix control 1278 * rate. Otherwise use the rate0 control rate. 1279 */ 1280 if (bf->bf_state.bfs_doprot) 1281 rix = sc->sc_protrix; 1282 else 1283 rix = bf->bf_state.bfs_rc[0].rix; 1284 1285 /* 1286 * If the raw path has hard-coded ctsrate0 to something, 1287 * use it. 1288 */ 1289 if (bf->bf_state.bfs_ctsrate0 != 0) 1290 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0); 1291 else 1292 /* Control rate from above */ 1293 cix = rt->info[rix].controlRate; 1294 1295 /* Calculate the rtscts rate for the given cix */ 1296 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix, 1297 bf->bf_state.bfs_shpream); 1298 1299 /* The 11n chipsets do ctsduration calculations for you */ 1300 if (! ath_tx_is_11n(sc)) 1301 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix, 1302 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, 1303 rt, bf->bf_state.bfs_txflags); 1304 1305 /* Squirrel away in ath_buf */ 1306 bf->bf_state.bfs_ctsrate = ctsrate; 1307 bf->bf_state.bfs_ctsduration = ctsduration; 1308 1309 /* 1310 * Must disable multi-rate retry when using RTS/CTS. 1311 */ 1312 if (!sc->sc_mrrprot) { 1313 bf->bf_state.bfs_ismrr = 0; 1314 bf->bf_state.bfs_try0 = 1315 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */ 1316 } 1317 } 1318 1319 /* 1320 * Setup the descriptor chain for a normal or fast-frame 1321 * frame. 1322 * 1323 * XXX TODO: extend to include the destination hardware QCU ID. 1324 * Make sure that is correct. Make sure that when being added 1325 * to the mcastq, the CABQ QCUID is set or things will get a bit 1326 * odd. 1327 */ 1328 static void 1329 ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf) 1330 { 1331 struct ath_desc *ds = bf->bf_desc; 1332 struct ath_hal *ah = sc->sc_ah; 1333 1334 if (bf->bf_state.bfs_txrate0 == 0) 1335 DPRINTF(sc, ATH_DEBUG_XMIT, 1336 "%s: bf=%p, txrate0=%d\n", __func__, bf, 0); 1337 1338 ath_hal_setuptxdesc(ah, ds 1339 , bf->bf_state.bfs_pktlen /* packet length */ 1340 , bf->bf_state.bfs_hdrlen /* header length */ 1341 , bf->bf_state.bfs_atype /* Atheros packet type */ 1342 , bf->bf_state.bfs_txpower /* txpower */ 1343 , bf->bf_state.bfs_txrate0 1344 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 1345 , bf->bf_state.bfs_keyix /* key cache index */ 1346 , bf->bf_state.bfs_txantenna /* antenna mode */ 1347 , bf->bf_state.bfs_txflags /* flags */ 1348 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 1349 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 1350 ); 1351 1352 /* 1353 * This will be overriden when the descriptor chain is written. 1354 */ 1355 bf->bf_lastds = ds; 1356 bf->bf_last = bf; 1357 1358 /* Set rate control and descriptor chain for this frame */ 1359 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 1360 ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0); 1361 } 1362 1363 /* 1364 * Do a rate lookup. 1365 * 1366 * This performs a rate lookup for the given ath_buf only if it's required. 1367 * Non-data frames and raw frames don't require it. 1368 * 1369 * This populates the primary and MRR entries; MRR values are 1370 * then disabled later on if something requires it (eg RTS/CTS on 1371 * pre-11n chipsets. 1372 * 1373 * This needs to be done before the RTS/CTS fields are calculated 1374 * as they may depend upon the rate chosen. 1375 */ 1376 static void 1377 ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf) 1378 { 1379 uint8_t rate, rix; 1380 int try0; 1381 1382 if (! bf->bf_state.bfs_doratelookup) 1383 return; 1384 1385 /* Get rid of any previous state */ 1386 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1387 1388 ATH_NODE_LOCK(ATH_NODE(bf->bf_node)); 1389 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream, 1390 bf->bf_state.bfs_pktlen, &rix, &try0, &rate); 1391 1392 /* In case MRR is disabled, make sure rc[0] is setup correctly */ 1393 bf->bf_state.bfs_rc[0].rix = rix; 1394 bf->bf_state.bfs_rc[0].ratecode = rate; 1395 bf->bf_state.bfs_rc[0].tries = try0; 1396 1397 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY) 1398 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix, 1399 bf->bf_state.bfs_rc); 1400 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node)); 1401 1402 sc->sc_txrix = rix; /* for LED blinking */ 1403 sc->sc_lastdatarix = rix; /* for fast frames */ 1404 bf->bf_state.bfs_try0 = try0; 1405 bf->bf_state.bfs_txrate0 = rate; 1406 } 1407 1408 /* 1409 * Update the CLRDMASK bit in the ath_buf if it needs to be set. 1410 */ 1411 static void 1412 ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid, 1413 struct ath_buf *bf) 1414 { 1415 struct ath_node *an = ATH_NODE(bf->bf_node); 1416 1417 ATH_TX_LOCK_ASSERT(sc); 1418 1419 if (an->clrdmask == 1) { 1420 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1421 an->clrdmask = 0; 1422 } 1423 } 1424 1425 /* 1426 * Return whether this frame should be software queued or 1427 * direct dispatched. 1428 * 1429 * When doing powersave, BAR frames should be queued but other management 1430 * frames should be directly sent. 1431 * 1432 * When not doing powersave, stick BAR frames into the hardware queue 1433 * so it goes out even though the queue is paused. 1434 * 1435 * For now, management frames are also software queued by default. 1436 */ 1437 static int 1438 ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an, 1439 struct mbuf *m0, int *queue_to_head) 1440 { 1441 struct ieee80211_node *ni = &an->an_node; 1442 struct ieee80211_frame *wh; 1443 uint8_t type, subtype; 1444 1445 wh = mtod(m0, struct ieee80211_frame *); 1446 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1447 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1448 1449 (*queue_to_head) = 0; 1450 1451 /* If it's not in powersave - direct-dispatch BAR */ 1452 if ((ATH_NODE(ni)->an_is_powersave == 0) 1453 && type == IEEE80211_FC0_TYPE_CTL && 1454 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1455 DPRINTF(sc, ATH_DEBUG_SW_TX, 1456 "%s: BAR: TX'ing direct\n", __func__); 1457 return (0); 1458 } else if ((ATH_NODE(ni)->an_is_powersave == 1) 1459 && type == IEEE80211_FC0_TYPE_CTL && 1460 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1461 /* BAR TX whilst asleep; queue */ 1462 DPRINTF(sc, ATH_DEBUG_SW_TX, 1463 "%s: swq: TX'ing\n", __func__); 1464 (*queue_to_head) = 1; 1465 return (1); 1466 } else if ((ATH_NODE(ni)->an_is_powersave == 1) 1467 && (type == IEEE80211_FC0_TYPE_MGT || 1468 type == IEEE80211_FC0_TYPE_CTL)) { 1469 /* 1470 * Other control/mgmt frame; bypass software queuing 1471 * for now! 1472 */ 1473 DPRINTF(sc, ATH_DEBUG_XMIT, 1474 "%s: %6D: Node is asleep; sending mgmt " 1475 "(type=%d, subtype=%d)\n", 1476 __func__, ni->ni_macaddr, ":", type, subtype); 1477 return (0); 1478 } else { 1479 return (1); 1480 } 1481 } 1482 1483 1484 /* 1485 * Transmit the given frame to the hardware. 1486 * 1487 * The frame must already be setup; rate control must already have 1488 * been done. 1489 * 1490 * XXX since the TXQ lock is being held here (and I dislike holding 1491 * it for this long when not doing software aggregation), later on 1492 * break this function into "setup_normal" and "xmit_normal". The 1493 * lock only needs to be held for the ath_tx_handoff call. 1494 * 1495 * XXX we don't update the leak count here - if we're doing 1496 * direct frame dispatch, we need to be able to do it without 1497 * decrementing the leak count (eg multicast queue frames.) 1498 */ 1499 static void 1500 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq, 1501 struct ath_buf *bf) 1502 { 1503 struct ath_node *an = ATH_NODE(bf->bf_node); 1504 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 1505 1506 ATH_TX_LOCK_ASSERT(sc); 1507 1508 /* 1509 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does 1510 * set a completion handler however it doesn't (yet) properly 1511 * handle the strict ordering requirements needed for normal, 1512 * non-aggregate session frames. 1513 * 1514 * Once this is implemented, only set CLRDMASK like this for 1515 * frames that must go out - eg management/raw frames. 1516 */ 1517 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1518 1519 /* Setup the descriptor before handoff */ 1520 ath_tx_do_ratelookup(sc, bf); 1521 ath_tx_calc_duration(sc, bf); 1522 ath_tx_calc_protection(sc, bf); 1523 ath_tx_set_rtscts(sc, bf); 1524 ath_tx_rate_fill_rcflags(sc, bf); 1525 ath_tx_setds(sc, bf); 1526 1527 /* Track per-TID hardware queue depth correctly */ 1528 tid->hwq_depth++; 1529 1530 /* Assign the completion handler */ 1531 bf->bf_comp = ath_tx_normal_comp; 1532 1533 /* Hand off to hardware */ 1534 ath_tx_handoff(sc, txq, bf); 1535 } 1536 1537 /* 1538 * Do the basic frame setup stuff that's required before the frame 1539 * is added to a software queue. 1540 * 1541 * All frames get mostly the same treatment and it's done once. 1542 * Retransmits fiddle with things like the rate control setup, 1543 * setting the retransmit bit in the packet; doing relevant DMA/bus 1544 * syncing and relinking it (back) into the hardware TX queue. 1545 * 1546 * Note that this may cause the mbuf to be reallocated, so 1547 * m0 may not be valid. 1548 */ 1549 static int 1550 ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni, 1551 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq) 1552 { 1553 struct ieee80211vap *vap = ni->ni_vap; 1554 struct ieee80211com *ic = &sc->sc_ic; 1555 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; 1556 int error, iswep, ismcast, isfrag, ismrr; 1557 int keyix, hdrlen, pktlen, try0 = 0; 1558 u_int8_t rix = 0, txrate = 0; 1559 struct ath_desc *ds; 1560 struct ieee80211_frame *wh; 1561 u_int subtype, flags; 1562 HAL_PKT_TYPE atype; 1563 const HAL_RATE_TABLE *rt; 1564 HAL_BOOL shortPreamble; 1565 struct ath_node *an; 1566 1567 /* XXX TODO: this pri is only used for non-QoS check, right? */ 1568 u_int pri; 1569 1570 /* 1571 * To ensure that both sequence numbers and the CCMP PN handling 1572 * is "correct", make sure that the relevant TID queue is locked. 1573 * Otherwise the CCMP PN and seqno may appear out of order, causing 1574 * re-ordered frames to have out of order CCMP PN's, resulting 1575 * in many, many frame drops. 1576 */ 1577 ATH_TX_LOCK_ASSERT(sc); 1578 1579 wh = mtod(m0, struct ieee80211_frame *); 1580 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; 1581 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1582 isfrag = m0->m_flags & M_FRAG; 1583 hdrlen = ieee80211_anyhdrsize(wh); 1584 /* 1585 * Packet length must not include any 1586 * pad bytes; deduct them here. 1587 */ 1588 pktlen = m0->m_pkthdr.len - (hdrlen & 3); 1589 1590 /* Handle encryption twiddling if needed */ 1591 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, 1592 &pktlen, &keyix)) { 1593 ieee80211_free_mbuf(m0); 1594 return EIO; 1595 } 1596 1597 /* packet header may have moved, reset our local pointer */ 1598 wh = mtod(m0, struct ieee80211_frame *); 1599 1600 pktlen += IEEE80211_CRC_LEN; 1601 1602 /* 1603 * Load the DMA map so any coalescing is done. This 1604 * also calculates the number of descriptors we need. 1605 */ 1606 error = ath_tx_dmasetup(sc, bf, m0); 1607 if (error != 0) 1608 return error; 1609 KASSERT((ni != NULL), ("%s: ni=NULL!", __func__)); 1610 bf->bf_node = ni; /* NB: held reference */ 1611 m0 = bf->bf_m; /* NB: may have changed */ 1612 wh = mtod(m0, struct ieee80211_frame *); 1613 1614 /* setup descriptors */ 1615 ds = bf->bf_desc; 1616 rt = sc->sc_currates; 1617 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1618 1619 /* 1620 * NB: the 802.11 layer marks whether or not we should 1621 * use short preamble based on the current mode and 1622 * negotiated parameters. 1623 */ 1624 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 1625 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 1626 shortPreamble = AH_TRUE; 1627 sc->sc_stats.ast_tx_shortpre++; 1628 } else { 1629 shortPreamble = AH_FALSE; 1630 } 1631 1632 an = ATH_NODE(ni); 1633 //flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 1634 flags = 0; 1635 ismrr = 0; /* default no multi-rate retry*/ 1636 1637 pri = ath_tx_getac(sc, m0); /* honor classification */ 1638 /* XXX use txparams instead of fixed values */ 1639 /* 1640 * Calculate Atheros packet type from IEEE80211 packet header, 1641 * setup for rate calculations, and select h/w transmit queue. 1642 */ 1643 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 1644 case IEEE80211_FC0_TYPE_MGT: 1645 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1646 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 1647 atype = HAL_PKT_TYPE_BEACON; 1648 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 1649 atype = HAL_PKT_TYPE_PROBE_RESP; 1650 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 1651 atype = HAL_PKT_TYPE_ATIM; 1652 else 1653 atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 1654 rix = an->an_mgmtrix; 1655 txrate = rt->info[rix].rateCode; 1656 if (shortPreamble) 1657 txrate |= rt->info[rix].shortPreamble; 1658 try0 = ATH_TXMGTTRY; 1659 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1660 break; 1661 case IEEE80211_FC0_TYPE_CTL: 1662 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 1663 rix = an->an_mgmtrix; 1664 txrate = rt->info[rix].rateCode; 1665 if (shortPreamble) 1666 txrate |= rt->info[rix].shortPreamble; 1667 try0 = ATH_TXMGTTRY; 1668 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1669 break; 1670 case IEEE80211_FC0_TYPE_DATA: 1671 atype = HAL_PKT_TYPE_NORMAL; /* default */ 1672 /* 1673 * Data frames: multicast frames go out at a fixed rate, 1674 * EAPOL frames use the mgmt frame rate; otherwise consult 1675 * the rate control module for the rate to use. 1676 */ 1677 if (ismcast) { 1678 rix = an->an_mcastrix; 1679 txrate = rt->info[rix].rateCode; 1680 if (shortPreamble) 1681 txrate |= rt->info[rix].shortPreamble; 1682 try0 = 1; 1683 } else if (m0->m_flags & M_EAPOL) { 1684 /* XXX? maybe always use long preamble? */ 1685 rix = an->an_mgmtrix; 1686 txrate = rt->info[rix].rateCode; 1687 if (shortPreamble) 1688 txrate |= rt->info[rix].shortPreamble; 1689 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 1690 } else { 1691 /* 1692 * Do rate lookup on each TX, rather than using 1693 * the hard-coded TX information decided here. 1694 */ 1695 ismrr = 1; 1696 bf->bf_state.bfs_doratelookup = 1; 1697 } 1698 if (cap->cap_wmeParams[pri].wmep_noackPolicy) 1699 flags |= HAL_TXDESC_NOACK; 1700 break; 1701 default: 1702 device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n", 1703 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 1704 /* XXX statistic */ 1705 /* XXX free tx dmamap */ 1706 ieee80211_free_mbuf(m0); 1707 return EIO; 1708 } 1709 1710 /* 1711 * There are two known scenarios where the frame AC doesn't match 1712 * what the destination TXQ is. 1713 * 1714 * + non-QoS frames (eg management?) that the net80211 stack has 1715 * assigned a higher AC to, but since it's a non-QoS TID, it's 1716 * being thrown into TID 16. TID 16 gets the AC_BE queue. 1717 * It's quite possible that management frames should just be 1718 * direct dispatched to hardware rather than go via the software 1719 * queue; that should be investigated in the future. There are 1720 * some specific scenarios where this doesn't make sense, mostly 1721 * surrounding ADDBA request/response - hence why that is special 1722 * cased. 1723 * 1724 * + Multicast frames going into the VAP mcast queue. That shows up 1725 * as "TXQ 11". 1726 * 1727 * This driver should eventually support separate TID and TXQ locking, 1728 * allowing for arbitrary AC frames to appear on arbitrary software 1729 * queues, being queued to the "correct" hardware queue when needed. 1730 */ 1731 #if 0 1732 if (txq != sc->sc_ac2q[pri]) { 1733 DPRINTF(sc, ATH_DEBUG_XMIT, 1734 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n", 1735 __func__, 1736 txq, 1737 txq->axq_qnum, 1738 pri, 1739 sc->sc_ac2q[pri], 1740 sc->sc_ac2q[pri]->axq_qnum); 1741 } 1742 #endif 1743 1744 /* 1745 * Calculate miscellaneous flags. 1746 */ 1747 if (ismcast) { 1748 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 1749 } else if (pktlen > vap->iv_rtsthreshold && 1750 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { 1751 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 1752 sc->sc_stats.ast_tx_rts++; 1753 } 1754 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ 1755 sc->sc_stats.ast_tx_noack++; 1756 #ifdef IEEE80211_SUPPORT_TDMA 1757 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) { 1758 DPRINTF(sc, ATH_DEBUG_TDMA, 1759 "%s: discard frame, ACK required w/ TDMA\n", __func__); 1760 sc->sc_stats.ast_tdma_ack++; 1761 /* XXX free tx dmamap */ 1762 ieee80211_free_mbuf(m0); 1763 return EIO; 1764 } 1765 #endif 1766 1767 /* 1768 * If it's a frame to do location reporting on, 1769 * communicate it to the HAL. 1770 */ 1771 if (ieee80211_get_toa_params(m0, NULL)) { 1772 device_printf(sc->sc_dev, 1773 "%s: setting TX positioning bit\n", __func__); 1774 flags |= HAL_TXDESC_POS; 1775 1776 /* 1777 * Note: The hardware reports timestamps for 1778 * each of the RX'ed packets as part of the packet 1779 * exchange. So this means things like RTS/CTS 1780 * exchanges, as well as the final ACK. 1781 * 1782 * So, if you send a RTS-protected NULL data frame, 1783 * you'll get an RX report for the RTS response, then 1784 * an RX report for the NULL frame, and then the TX 1785 * completion at the end. 1786 * 1787 * NOTE: it doesn't work right for CCK frames; 1788 * there's no channel info data provided unless 1789 * it's OFDM or HT. Will have to dig into it. 1790 */ 1791 flags &= ~(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA); 1792 bf->bf_flags |= ATH_BUF_TOA_PROBE; 1793 } 1794 1795 #if 0 1796 /* 1797 * Placeholder: if you want to transmit with the azimuth 1798 * timestamp in the end of the payload, here's where you 1799 * should set the TXDESC field. 1800 */ 1801 flags |= HAL_TXDESC_HWTS; 1802 #endif 1803 1804 /* 1805 * Determine if a tx interrupt should be generated for 1806 * this descriptor. We take a tx interrupt to reap 1807 * descriptors when the h/w hits an EOL condition or 1808 * when the descriptor is specifically marked to generate 1809 * an interrupt. We periodically mark descriptors in this 1810 * way to insure timely replenishing of the supply needed 1811 * for sending frames. Defering interrupts reduces system 1812 * load and potentially allows more concurrent work to be 1813 * done but if done to aggressively can cause senders to 1814 * backup. 1815 * 1816 * NB: use >= to deal with sc_txintrperiod changing 1817 * dynamically through sysctl. 1818 */ 1819 if (flags & HAL_TXDESC_INTREQ) { 1820 txq->axq_intrcnt = 0; 1821 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 1822 flags |= HAL_TXDESC_INTREQ; 1823 txq->axq_intrcnt = 0; 1824 } 1825 1826 /* This point forward is actual TX bits */ 1827 1828 /* 1829 * At this point we are committed to sending the frame 1830 * and we don't need to look at m_nextpkt; clear it in 1831 * case this frame is part of frag chain. 1832 */ 1833 m0->m_nextpkt = NULL; 1834 1835 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 1836 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 1837 sc->sc_hwmap[rix].ieeerate, -1); 1838 1839 if (ieee80211_radiotap_active_vap(vap)) { 1840 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 1841 if (iswep) 1842 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 1843 if (isfrag) 1844 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 1845 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 1846 sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni); 1847 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 1848 1849 ieee80211_radiotap_tx(vap, m0); 1850 } 1851 1852 /* Blank the legacy rate array */ 1853 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1854 1855 /* 1856 * ath_buf_set_rate needs at least one rate/try to setup 1857 * the rate scenario. 1858 */ 1859 bf->bf_state.bfs_rc[0].rix = rix; 1860 bf->bf_state.bfs_rc[0].tries = try0; 1861 bf->bf_state.bfs_rc[0].ratecode = txrate; 1862 1863 /* Store the decided rate index values away */ 1864 bf->bf_state.bfs_pktlen = pktlen; 1865 bf->bf_state.bfs_hdrlen = hdrlen; 1866 bf->bf_state.bfs_atype = atype; 1867 bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni); 1868 bf->bf_state.bfs_txrate0 = txrate; 1869 bf->bf_state.bfs_try0 = try0; 1870 bf->bf_state.bfs_keyix = keyix; 1871 bf->bf_state.bfs_txantenna = sc->sc_txantenna; 1872 bf->bf_state.bfs_txflags = flags; 1873 bf->bf_state.bfs_shpream = shortPreamble; 1874 1875 /* XXX this should be done in ath_tx_setrate() */ 1876 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */ 1877 bf->bf_state.bfs_ctsrate = 0; /* calculated later */ 1878 bf->bf_state.bfs_ctsduration = 0; 1879 bf->bf_state.bfs_ismrr = ismrr; 1880 1881 return 0; 1882 } 1883 1884 /* 1885 * Queue a frame to the hardware or software queue. 1886 * 1887 * This can be called by the net80211 code. 1888 * 1889 * XXX what about locking? Or, push the seqno assign into the 1890 * XXX aggregate scheduler so its serialised? 1891 * 1892 * XXX When sending management frames via ath_raw_xmit(), 1893 * should CLRDMASK be set unconditionally? 1894 */ 1895 int 1896 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, 1897 struct ath_buf *bf, struct mbuf *m0) 1898 { 1899 struct ieee80211vap *vap = ni->ni_vap; 1900 struct ath_vap *avp = ATH_VAP(vap); 1901 int r = 0; 1902 u_int pri; 1903 int tid; 1904 struct ath_txq *txq; 1905 int ismcast; 1906 const struct ieee80211_frame *wh; 1907 int is_ampdu, is_ampdu_tx, is_ampdu_pending; 1908 ieee80211_seq seqno; 1909 uint8_t type, subtype; 1910 int queue_to_head; 1911 1912 ATH_TX_LOCK_ASSERT(sc); 1913 1914 /* 1915 * Determine the target hardware queue. 1916 * 1917 * For multicast frames, the txq gets overridden appropriately 1918 * depending upon the state of PS. If powersave is enabled 1919 * then they get added to the cabq for later transmit. 1920 * 1921 * The "fun" issue here is that group addressed frames should 1922 * have the sequence number from a different pool, rather than 1923 * the per-TID pool. That means that even QoS group addressed 1924 * frames will have a sequence number from that global value, 1925 * which means if we transmit different group addressed frames 1926 * at different traffic priorities, the sequence numbers will 1927 * all be out of whack. So - chances are, the right thing 1928 * to do here is to always put group addressed frames into the BE 1929 * queue, and ignore the TID for queue selection. 1930 * 1931 * For any other frame, we do a TID/QoS lookup inside the frame 1932 * to see what the TID should be. If it's a non-QoS frame, the 1933 * AC and TID are overridden. The TID/TXQ code assumes the 1934 * TID is on a predictable hardware TXQ, so we don't support 1935 * having a node TID queued to multiple hardware TXQs. 1936 * This may change in the future but would require some locking 1937 * fudgery. 1938 */ 1939 pri = ath_tx_getac(sc, m0); 1940 tid = ath_tx_gettid(sc, m0); 1941 1942 txq = sc->sc_ac2q[pri]; 1943 wh = mtod(m0, struct ieee80211_frame *); 1944 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1945 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1946 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1947 1948 /* 1949 * Enforce how deep the multicast queue can grow. 1950 * 1951 * XXX duplicated in ath_raw_xmit(). 1952 */ 1953 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1954 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 1955 > sc->sc_txq_mcastq_maxdepth) { 1956 sc->sc_stats.ast_tx_mcastq_overflow++; 1957 m_freem(m0); 1958 return (ENOBUFS); 1959 } 1960 } 1961 1962 /* 1963 * Enforce how deep the unicast queue can grow. 1964 * 1965 * If the node is in power save then we don't want 1966 * the software queue to grow too deep, or a node may 1967 * end up consuming all of the ath_buf entries. 1968 * 1969 * For now, only do this for DATA frames. 1970 * 1971 * We will want to cap how many management/control 1972 * frames get punted to the software queue so it doesn't 1973 * fill up. But the correct solution isn't yet obvious. 1974 * In any case, this check should at least let frames pass 1975 * that we are direct-dispatching. 1976 * 1977 * XXX TODO: duplicate this to the raw xmit path! 1978 */ 1979 if (type == IEEE80211_FC0_TYPE_DATA && 1980 ATH_NODE(ni)->an_is_powersave && 1981 ATH_NODE(ni)->an_swq_depth > 1982 sc->sc_txq_node_psq_maxdepth) { 1983 sc->sc_stats.ast_tx_node_psq_overflow++; 1984 m_freem(m0); 1985 return (ENOBUFS); 1986 } 1987 1988 /* A-MPDU TX */ 1989 is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid); 1990 is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid); 1991 is_ampdu = is_ampdu_tx | is_ampdu_pending; 1992 1993 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n", 1994 __func__, tid, pri, is_ampdu); 1995 1996 /* Set local packet state, used to queue packets to hardware */ 1997 bf->bf_state.bfs_tid = tid; 1998 bf->bf_state.bfs_tx_queue = txq->axq_qnum; 1999 bf->bf_state.bfs_pri = pri; 2000 2001 #if 1 2002 /* 2003 * When servicing one or more stations in power-save mode 2004 * (or) if there is some mcast data waiting on the mcast 2005 * queue (to prevent out of order delivery) multicast frames 2006 * must be bufferd until after the beacon. 2007 * 2008 * TODO: we should lock the mcastq before we check the length. 2009 */ 2010 if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { 2011 txq = &avp->av_mcastq; 2012 /* 2013 * Mark the frame as eventually belonging on the CAB 2014 * queue, so the descriptor setup functions will 2015 * correctly initialise the descriptor 'qcuId' field. 2016 */ 2017 bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum; 2018 } 2019 #endif 2020 2021 /* Do the generic frame setup */ 2022 /* XXX should just bzero the bf_state? */ 2023 bf->bf_state.bfs_dobaw = 0; 2024 2025 /* A-MPDU TX? Manually set sequence number */ 2026 /* 2027 * Don't do it whilst pending; the net80211 layer still 2028 * assigns them. 2029 * 2030 * Don't assign A-MPDU sequence numbers to group address 2031 * frames; they come from a different sequence number space. 2032 */ 2033 if (is_ampdu_tx && (! IEEE80211_IS_MULTICAST(wh->i_addr1))) { 2034 /* 2035 * Always call; this function will 2036 * handle making sure that null data frames 2037 * and group-addressed frames don't get a sequence number 2038 * from the current TID and thus mess with the BAW. 2039 */ 2040 seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0); 2041 2042 /* 2043 * Don't add QoS NULL frames and group-addressed frames 2044 * to the BAW. 2045 */ 2046 if (IEEE80211_QOS_HAS_SEQ(wh) && 2047 (! IEEE80211_IS_MULTICAST(wh->i_addr1)) && 2048 (subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL)) { 2049 bf->bf_state.bfs_dobaw = 1; 2050 } 2051 } 2052 2053 /* 2054 * If needed, the sequence number has been assigned. 2055 * Squirrel it away somewhere easy to get to. 2056 */ 2057 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT; 2058 2059 /* Is ampdu pending? fetch the seqno and print it out */ 2060 if (is_ampdu_pending) 2061 DPRINTF(sc, ATH_DEBUG_SW_TX, 2062 "%s: tid %d: ampdu pending, seqno %d\n", 2063 __func__, tid, M_SEQNO_GET(m0)); 2064 2065 /* This also sets up the DMA map; crypto; frame parameters, etc */ 2066 r = ath_tx_normal_setup(sc, ni, bf, m0, txq); 2067 2068 if (r != 0) 2069 goto done; 2070 2071 /* At this point m0 could have changed! */ 2072 m0 = bf->bf_m; 2073 2074 #if 1 2075 /* 2076 * If it's a multicast frame, do a direct-dispatch to the 2077 * destination hardware queue. Don't bother software 2078 * queuing it. 2079 */ 2080 /* 2081 * If it's a BAR frame, do a direct dispatch to the 2082 * destination hardware queue. Don't bother software 2083 * queuing it, as the TID will now be paused. 2084 * Sending a BAR frame can occur from the net80211 txa timer 2085 * (ie, retries) or from the ath txtask (completion call.) 2086 * It queues directly to hardware because the TID is paused 2087 * at this point (and won't be unpaused until the BAR has 2088 * either been TXed successfully or max retries has been 2089 * reached.) 2090 */ 2091 /* 2092 * Until things are better debugged - if this node is asleep 2093 * and we're sending it a non-BAR frame, direct dispatch it. 2094 * Why? Because we need to figure out what's actually being 2095 * sent - eg, during reassociation/reauthentication after 2096 * the node (last) disappeared whilst asleep, the driver should 2097 * have unpaused/unsleep'ed the node. So until that is 2098 * sorted out, use this workaround. 2099 */ 2100 if (txq == &avp->av_mcastq) { 2101 DPRINTF(sc, ATH_DEBUG_SW_TX, 2102 "%s: bf=%p: mcastq: TX'ing\n", __func__, bf); 2103 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2104 ath_tx_xmit_normal(sc, txq, bf); 2105 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, 2106 &queue_to_head)) { 2107 ath_tx_swq(sc, ni, txq, queue_to_head, bf); 2108 } else { 2109 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2110 ath_tx_xmit_normal(sc, txq, bf); 2111 } 2112 #else 2113 /* 2114 * For now, since there's no software queue, 2115 * direct-dispatch to the hardware. 2116 */ 2117 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2118 /* 2119 * Update the current leak count if 2120 * we're leaking frames; and set the 2121 * MORE flag as appropriate. 2122 */ 2123 ath_tx_leak_count_update(sc, tid, bf); 2124 ath_tx_xmit_normal(sc, txq, bf); 2125 #endif 2126 done: 2127 return 0; 2128 } 2129 2130 static int 2131 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, 2132 struct ath_buf *bf, struct mbuf *m0, 2133 const struct ieee80211_bpf_params *params) 2134 { 2135 struct ieee80211com *ic = &sc->sc_ic; 2136 struct ieee80211vap *vap = ni->ni_vap; 2137 int error, ismcast, ismrr; 2138 int keyix, hdrlen, pktlen, try0, txantenna; 2139 u_int8_t rix, txrate; 2140 struct ieee80211_frame *wh; 2141 u_int flags; 2142 HAL_PKT_TYPE atype; 2143 const HAL_RATE_TABLE *rt; 2144 struct ath_desc *ds; 2145 u_int pri; 2146 int o_tid = -1; 2147 int do_override; 2148 uint8_t type, subtype; 2149 int queue_to_head; 2150 struct ath_node *an = ATH_NODE(ni); 2151 2152 ATH_TX_LOCK_ASSERT(sc); 2153 2154 wh = mtod(m0, struct ieee80211_frame *); 2155 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2156 hdrlen = ieee80211_anyhdrsize(wh); 2157 /* 2158 * Packet length must not include any 2159 * pad bytes; deduct them here. 2160 */ 2161 /* XXX honor IEEE80211_BPF_DATAPAD */ 2162 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; 2163 2164 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2165 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2166 2167 ATH_KTR(sc, ATH_KTR_TX, 2, 2168 "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf); 2169 2170 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n", 2171 __func__, ismcast); 2172 2173 pri = params->ibp_pri & 3; 2174 /* Override pri if the frame isn't a QoS one */ 2175 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2176 pri = ath_tx_getac(sc, m0); 2177 2178 /* XXX If it's an ADDBA, override the correct queue */ 2179 do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid); 2180 2181 /* Map ADDBA to the correct priority */ 2182 if (do_override) { 2183 #if 1 2184 DPRINTF(sc, ATH_DEBUG_XMIT, 2185 "%s: overriding tid %d pri %d -> %d\n", 2186 __func__, o_tid, pri, TID_TO_WME_AC(o_tid)); 2187 #endif 2188 pri = TID_TO_WME_AC(o_tid); 2189 } 2190 2191 /* 2192 * "pri" is the hardware queue to transmit on. 2193 * 2194 * Look at the description in ath_tx_start() to understand 2195 * what needs to be "fixed" here so we just use the TID 2196 * for QoS frames. 2197 */ 2198 2199 /* Handle encryption twiddling if needed */ 2200 if (! ath_tx_tag_crypto(sc, ni, 2201 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, 2202 &hdrlen, &pktlen, &keyix)) { 2203 ieee80211_free_mbuf(m0); 2204 return EIO; 2205 } 2206 /* packet header may have moved, reset our local pointer */ 2207 wh = mtod(m0, struct ieee80211_frame *); 2208 2209 /* Do the generic frame setup */ 2210 /* XXX should just bzero the bf_state? */ 2211 bf->bf_state.bfs_dobaw = 0; 2212 2213 error = ath_tx_dmasetup(sc, bf, m0); 2214 if (error != 0) 2215 return error; 2216 m0 = bf->bf_m; /* NB: may have changed */ 2217 wh = mtod(m0, struct ieee80211_frame *); 2218 KASSERT((ni != NULL), ("%s: ni=NULL!", __func__)); 2219 bf->bf_node = ni; /* NB: held reference */ 2220 2221 /* Always enable CLRDMASK for raw frames for now.. */ 2222 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 2223 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 2224 if (params->ibp_flags & IEEE80211_BPF_RTS) 2225 flags |= HAL_TXDESC_RTSENA; 2226 else if (params->ibp_flags & IEEE80211_BPF_CTS) { 2227 /* XXX assume 11g/11n protection? */ 2228 bf->bf_state.bfs_doprot = 1; 2229 flags |= HAL_TXDESC_CTSENA; 2230 } 2231 /* XXX leave ismcast to injector? */ 2232 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) 2233 flags |= HAL_TXDESC_NOACK; 2234 2235 rt = sc->sc_currates; 2236 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 2237 2238 /* Fetch first rate information */ 2239 rix = ath_tx_findrix(sc, params->ibp_rate0); 2240 try0 = params->ibp_try0; 2241 2242 /* 2243 * Override EAPOL rate as appropriate. 2244 */ 2245 if (m0->m_flags & M_EAPOL) { 2246 /* XXX? maybe always use long preamble? */ 2247 rix = an->an_mgmtrix; 2248 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 2249 } 2250 2251 /* 2252 * If it's a frame to do location reporting on, 2253 * communicate it to the HAL. 2254 */ 2255 if (ieee80211_get_toa_params(m0, NULL)) { 2256 device_printf(sc->sc_dev, 2257 "%s: setting TX positioning bit\n", __func__); 2258 flags |= HAL_TXDESC_POS; 2259 flags &= ~(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA); 2260 bf->bf_flags |= ATH_BUF_TOA_PROBE; 2261 } 2262 2263 txrate = rt->info[rix].rateCode; 2264 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 2265 txrate |= rt->info[rix].shortPreamble; 2266 sc->sc_txrix = rix; 2267 ismrr = (params->ibp_try1 != 0); 2268 txantenna = params->ibp_pri >> 2; 2269 if (txantenna == 0) /* XXX? */ 2270 txantenna = sc->sc_txantenna; 2271 2272 /* 2273 * Since ctsrate is fixed, store it away for later 2274 * use when the descriptor fields are being set. 2275 */ 2276 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) 2277 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate; 2278 2279 /* 2280 * NB: we mark all packets as type PSPOLL so the h/w won't 2281 * set the sequence number, duration, etc. 2282 */ 2283 atype = HAL_PKT_TYPE_PSPOLL; 2284 2285 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 2286 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, 2287 sc->sc_hwmap[rix].ieeerate, -1); 2288 2289 if (ieee80211_radiotap_active_vap(vap)) { 2290 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 2291 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) 2292 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2293 if (m0->m_flags & M_FRAG) 2294 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 2295 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 2296 sc->sc_tx_th.wt_txpower = MIN(params->ibp_power, 2297 ieee80211_get_node_txpower(ni)); 2298 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 2299 2300 ieee80211_radiotap_tx(vap, m0); 2301 } 2302 2303 /* 2304 * Formulate first tx descriptor with tx controls. 2305 */ 2306 ds = bf->bf_desc; 2307 /* XXX check return value? */ 2308 2309 /* Store the decided rate index values away */ 2310 bf->bf_state.bfs_pktlen = pktlen; 2311 bf->bf_state.bfs_hdrlen = hdrlen; 2312 bf->bf_state.bfs_atype = atype; 2313 bf->bf_state.bfs_txpower = MIN(params->ibp_power, 2314 ieee80211_get_node_txpower(ni)); 2315 bf->bf_state.bfs_txrate0 = txrate; 2316 bf->bf_state.bfs_try0 = try0; 2317 bf->bf_state.bfs_keyix = keyix; 2318 bf->bf_state.bfs_txantenna = txantenna; 2319 bf->bf_state.bfs_txflags = flags; 2320 bf->bf_state.bfs_shpream = 2321 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE); 2322 2323 /* Set local packet state, used to queue packets to hardware */ 2324 bf->bf_state.bfs_tid = WME_AC_TO_TID(pri); 2325 bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum; 2326 bf->bf_state.bfs_pri = pri; 2327 2328 /* XXX this should be done in ath_tx_setrate() */ 2329 bf->bf_state.bfs_ctsrate = 0; 2330 bf->bf_state.bfs_ctsduration = 0; 2331 bf->bf_state.bfs_ismrr = ismrr; 2332 2333 /* Blank the legacy rate array */ 2334 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 2335 2336 bf->bf_state.bfs_rc[0].rix = rix; 2337 bf->bf_state.bfs_rc[0].tries = try0; 2338 bf->bf_state.bfs_rc[0].ratecode = txrate; 2339 2340 if (ismrr) { 2341 int rix; 2342 2343 rix = ath_tx_findrix(sc, params->ibp_rate1); 2344 bf->bf_state.bfs_rc[1].rix = rix; 2345 bf->bf_state.bfs_rc[1].tries = params->ibp_try1; 2346 2347 rix = ath_tx_findrix(sc, params->ibp_rate2); 2348 bf->bf_state.bfs_rc[2].rix = rix; 2349 bf->bf_state.bfs_rc[2].tries = params->ibp_try2; 2350 2351 rix = ath_tx_findrix(sc, params->ibp_rate3); 2352 bf->bf_state.bfs_rc[3].rix = rix; 2353 bf->bf_state.bfs_rc[3].tries = params->ibp_try3; 2354 } 2355 /* 2356 * All the required rate control decisions have been made; 2357 * fill in the rc flags. 2358 */ 2359 ath_tx_rate_fill_rcflags(sc, bf); 2360 2361 /* NB: no buffered multicast in power save support */ 2362 2363 /* 2364 * If we're overiding the ADDBA destination, dump directly 2365 * into the hardware queue, right after any pending 2366 * frames to that node are. 2367 */ 2368 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n", 2369 __func__, do_override); 2370 2371 #if 1 2372 /* 2373 * Put addba frames in the right place in the right TID/HWQ. 2374 */ 2375 if (do_override) { 2376 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2377 /* 2378 * XXX if it's addba frames, should we be leaking 2379 * them out via the frame leak method? 2380 * XXX for now let's not risk it; but we may wish 2381 * to investigate this later. 2382 */ 2383 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2384 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, 2385 &queue_to_head)) { 2386 /* Queue to software queue */ 2387 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf); 2388 } else { 2389 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2390 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2391 } 2392 #else 2393 /* Direct-dispatch to the hardware */ 2394 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2395 /* 2396 * Update the current leak count if 2397 * we're leaking frames; and set the 2398 * MORE flag as appropriate. 2399 */ 2400 ath_tx_leak_count_update(sc, tid, bf); 2401 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2402 #endif 2403 return 0; 2404 } 2405 2406 /* 2407 * Send a raw frame. 2408 * 2409 * This can be called by net80211. 2410 */ 2411 int 2412 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 2413 const struct ieee80211_bpf_params *params) 2414 { 2415 struct ieee80211com *ic = ni->ni_ic; 2416 struct ath_softc *sc = ic->ic_softc; 2417 struct ath_buf *bf; 2418 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 2419 int error = 0; 2420 2421 ATH_PCU_LOCK(sc); 2422 if (sc->sc_inreset_cnt > 0) { 2423 DPRINTF(sc, ATH_DEBUG_XMIT, 2424 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2425 error = EIO; 2426 ATH_PCU_UNLOCK(sc); 2427 goto badbad; 2428 } 2429 sc->sc_txstart_cnt++; 2430 ATH_PCU_UNLOCK(sc); 2431 2432 /* Wake the hardware up already */ 2433 ATH_LOCK(sc); 2434 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2435 ATH_UNLOCK(sc); 2436 2437 ATH_TX_LOCK(sc); 2438 2439 if (!sc->sc_running || sc->sc_invalid) { 2440 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, r/i: %d/%d", 2441 __func__, sc->sc_running, sc->sc_invalid); 2442 m_freem(m); 2443 error = ENETDOWN; 2444 goto bad; 2445 } 2446 2447 /* 2448 * Enforce how deep the multicast queue can grow. 2449 * 2450 * XXX duplicated in ath_tx_start(). 2451 */ 2452 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2453 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 2454 > sc->sc_txq_mcastq_maxdepth) { 2455 sc->sc_stats.ast_tx_mcastq_overflow++; 2456 error = ENOBUFS; 2457 } 2458 2459 if (error != 0) { 2460 m_freem(m); 2461 goto bad; 2462 } 2463 } 2464 2465 /* 2466 * Grab a TX buffer and associated resources. 2467 */ 2468 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); 2469 if (bf == NULL) { 2470 sc->sc_stats.ast_tx_nobuf++; 2471 m_freem(m); 2472 error = ENOBUFS; 2473 goto bad; 2474 } 2475 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n", 2476 m, params, bf); 2477 2478 if (params == NULL) { 2479 /* 2480 * Legacy path; interpret frame contents to decide 2481 * precisely how to send the frame. 2482 */ 2483 if (ath_tx_start(sc, ni, bf, m)) { 2484 error = EIO; /* XXX */ 2485 goto bad2; 2486 } 2487 } else { 2488 /* 2489 * Caller supplied explicit parameters to use in 2490 * sending the frame. 2491 */ 2492 if (ath_tx_raw_start(sc, ni, bf, m, params)) { 2493 error = EIO; /* XXX */ 2494 goto bad2; 2495 } 2496 } 2497 sc->sc_wd_timer = 5; 2498 sc->sc_stats.ast_tx_raw++; 2499 2500 /* 2501 * Update the TIM - if there's anything queued to the 2502 * software queue and power save is enabled, we should 2503 * set the TIM. 2504 */ 2505 ath_tx_update_tim(sc, ni, 1); 2506 2507 ATH_TX_UNLOCK(sc); 2508 2509 ATH_PCU_LOCK(sc); 2510 sc->sc_txstart_cnt--; 2511 ATH_PCU_UNLOCK(sc); 2512 2513 2514 /* Put the hardware back to sleep if required */ 2515 ATH_LOCK(sc); 2516 ath_power_restore_power_state(sc); 2517 ATH_UNLOCK(sc); 2518 2519 return 0; 2520 2521 bad2: 2522 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, " 2523 "bf=%p", 2524 m, 2525 params, 2526 bf); 2527 ATH_TXBUF_LOCK(sc); 2528 ath_returnbuf_head(sc, bf); 2529 ATH_TXBUF_UNLOCK(sc); 2530 2531 bad: 2532 ATH_TX_UNLOCK(sc); 2533 2534 ATH_PCU_LOCK(sc); 2535 sc->sc_txstart_cnt--; 2536 ATH_PCU_UNLOCK(sc); 2537 2538 /* Put the hardware back to sleep if required */ 2539 ATH_LOCK(sc); 2540 ath_power_restore_power_state(sc); 2541 ATH_UNLOCK(sc); 2542 2543 badbad: 2544 ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p", 2545 m, params); 2546 sc->sc_stats.ast_tx_raw_fail++; 2547 2548 return error; 2549 } 2550 2551 /* Some helper functions */ 2552 2553 /* 2554 * ADDBA (and potentially others) need to be placed in the same 2555 * hardware queue as the TID/node it's relating to. This is so 2556 * it goes out after any pending non-aggregate frames to the 2557 * same node/TID. 2558 * 2559 * If this isn't done, the ADDBA can go out before the frames 2560 * queued in hardware. Even though these frames have a sequence 2561 * number -earlier- than the ADDBA can be transmitted (but 2562 * no frames whose sequence numbers are after the ADDBA should 2563 * be!) they'll arrive after the ADDBA - and the receiving end 2564 * will simply drop them as being out of the BAW. 2565 * 2566 * The frames can't be appended to the TID software queue - it'll 2567 * never be sent out. So these frames have to be directly 2568 * dispatched to the hardware, rather than queued in software. 2569 * So if this function returns true, the TXQ has to be 2570 * overridden and it has to be directly dispatched. 2571 * 2572 * It's a dirty hack, but someone's gotta do it. 2573 */ 2574 2575 /* 2576 * XXX doesn't belong here! 2577 */ 2578 static int 2579 ieee80211_is_action(struct ieee80211_frame *wh) 2580 { 2581 /* Type: Management frame? */ 2582 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 2583 IEEE80211_FC0_TYPE_MGT) 2584 return 0; 2585 2586 /* Subtype: Action frame? */ 2587 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) != 2588 IEEE80211_FC0_SUBTYPE_ACTION) 2589 return 0; 2590 2591 return 1; 2592 } 2593 2594 #define MS(_v, _f) (((_v) & _f) >> _f##_S) 2595 /* 2596 * Return an alternate TID for ADDBA request frames. 2597 * 2598 * Yes, this likely should be done in the net80211 layer. 2599 */ 2600 static int 2601 ath_tx_action_frame_override_queue(struct ath_softc *sc, 2602 struct ieee80211_node *ni, 2603 struct mbuf *m0, int *tid) 2604 { 2605 struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *); 2606 struct ieee80211_action_ba_addbarequest *ia; 2607 uint8_t *frm; 2608 uint16_t baparamset; 2609 2610 /* Not action frame? Bail */ 2611 if (! ieee80211_is_action(wh)) 2612 return 0; 2613 2614 /* XXX Not needed for frames we send? */ 2615 #if 0 2616 /* Correct length? */ 2617 if (! ieee80211_parse_action(ni, m)) 2618 return 0; 2619 #endif 2620 2621 /* Extract out action frame */ 2622 frm = (u_int8_t *)&wh[1]; 2623 ia = (struct ieee80211_action_ba_addbarequest *) frm; 2624 2625 /* Not ADDBA? Bail */ 2626 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA) 2627 return 0; 2628 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST) 2629 return 0; 2630 2631 /* Extract TID, return it */ 2632 baparamset = le16toh(ia->rq_baparamset); 2633 *tid = (int) MS(baparamset, IEEE80211_BAPS_TID); 2634 2635 return 1; 2636 } 2637 #undef MS 2638 2639 /* Per-node software queue operations */ 2640 2641 /* 2642 * Add the current packet to the given BAW. 2643 * It is assumed that the current packet 2644 * 2645 * + fits inside the BAW; 2646 * + already has had a sequence number allocated. 2647 * 2648 * Since the BAW status may be modified by both the ath task and 2649 * the net80211/ifnet contexts, the TID must be locked. 2650 */ 2651 void 2652 ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an, 2653 struct ath_tid *tid, struct ath_buf *bf) 2654 { 2655 int index, cindex; 2656 struct ieee80211_tx_ampdu *tap; 2657 2658 ATH_TX_LOCK_ASSERT(sc); 2659 2660 if (bf->bf_state.bfs_isretried) 2661 return; 2662 2663 tap = ath_tx_get_tx_tid(an, tid->tid); 2664 2665 if (! bf->bf_state.bfs_dobaw) { 2666 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2667 "%s: dobaw=0, seqno=%d, window %d:%d\n", 2668 __func__, SEQNO(bf->bf_state.bfs_seqno), 2669 tap->txa_start, tap->txa_wnd); 2670 } 2671 2672 if (bf->bf_state.bfs_addedbaw) 2673 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2674 "%s: re-added? tid=%d, seqno %d; window %d:%d; " 2675 "baw head=%d tail=%d\n", 2676 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2677 tap->txa_start, tap->txa_wnd, tid->baw_head, 2678 tid->baw_tail); 2679 2680 /* 2681 * Verify that the given sequence number is not outside of the 2682 * BAW. Complain loudly if that's the case. 2683 */ 2684 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2685 SEQNO(bf->bf_state.bfs_seqno))) { 2686 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2687 "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; " 2688 "baw head=%d tail=%d\n", 2689 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2690 tap->txa_start, tap->txa_wnd, tid->baw_head, 2691 tid->baw_tail); 2692 } 2693 2694 /* 2695 * ni->ni_txseqs[] is the currently allocated seqno. 2696 * the txa state contains the current baw start. 2697 */ 2698 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno)); 2699 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2700 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2701 "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d " 2702 "baw head=%d tail=%d\n", 2703 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2704 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, 2705 tid->baw_tail); 2706 2707 2708 #if 0 2709 assert(tid->tx_buf[cindex] == NULL); 2710 #endif 2711 if (tid->tx_buf[cindex] != NULL) { 2712 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2713 "%s: ba packet dup (index=%d, cindex=%d, " 2714 "head=%d, tail=%d)\n", 2715 __func__, index, cindex, tid->baw_head, tid->baw_tail); 2716 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2717 "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n", 2718 __func__, 2719 tid->tx_buf[cindex], 2720 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno), 2721 bf, 2722 SEQNO(bf->bf_state.bfs_seqno) 2723 ); 2724 } 2725 tid->tx_buf[cindex] = bf; 2726 2727 if (index >= ((tid->baw_tail - tid->baw_head) & 2728 (ATH_TID_MAX_BUFS - 1))) { 2729 tid->baw_tail = cindex; 2730 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 2731 } 2732 } 2733 2734 /* 2735 * Flip the BAW buffer entry over from the existing one to the new one. 2736 * 2737 * When software retransmitting a (sub-)frame, it is entirely possible that 2738 * the frame ath_buf is marked as BUSY and can't be immediately reused. 2739 * In that instance the buffer is cloned and the new buffer is used for 2740 * retransmit. We thus need to update the ath_buf slot in the BAW buf 2741 * tracking array to maintain consistency. 2742 */ 2743 static void 2744 ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an, 2745 struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf) 2746 { 2747 int index, cindex; 2748 struct ieee80211_tx_ampdu *tap; 2749 int seqno = SEQNO(old_bf->bf_state.bfs_seqno); 2750 2751 ATH_TX_LOCK_ASSERT(sc); 2752 2753 tap = ath_tx_get_tx_tid(an, tid->tid); 2754 index = ATH_BA_INDEX(tap->txa_start, seqno); 2755 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2756 2757 /* 2758 * Just warn for now; if it happens then we should find out 2759 * about it. It's highly likely the aggregation session will 2760 * soon hang. 2761 */ 2762 if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) { 2763 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2764 "%s: retransmitted buffer" 2765 " has mismatching seqno's, BA session may hang.\n", 2766 __func__); 2767 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2768 "%s: old seqno=%d, new_seqno=%d\n", __func__, 2769 old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno); 2770 } 2771 2772 if (tid->tx_buf[cindex] != old_bf) { 2773 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2774 "%s: ath_buf pointer incorrect; " 2775 " has m BA session may hang.\n", __func__); 2776 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2777 "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf); 2778 } 2779 2780 tid->tx_buf[cindex] = new_bf; 2781 } 2782 2783 /* 2784 * seq_start - left edge of BAW 2785 * seq_next - current/next sequence number to allocate 2786 * 2787 * Since the BAW status may be modified by both the ath task and 2788 * the net80211/ifnet contexts, the TID must be locked. 2789 */ 2790 static void 2791 ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an, 2792 struct ath_tid *tid, const struct ath_buf *bf) 2793 { 2794 int index, cindex; 2795 struct ieee80211_tx_ampdu *tap; 2796 int seqno = SEQNO(bf->bf_state.bfs_seqno); 2797 2798 ATH_TX_LOCK_ASSERT(sc); 2799 2800 tap = ath_tx_get_tx_tid(an, tid->tid); 2801 index = ATH_BA_INDEX(tap->txa_start, seqno); 2802 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2803 2804 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2805 "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, " 2806 "baw head=%d, tail=%d\n", 2807 __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index, 2808 cindex, tid->baw_head, tid->baw_tail); 2809 2810 /* 2811 * If this occurs then we have a big problem - something else 2812 * has slid tap->txa_start along without updating the BAW 2813 * tracking start/end pointers. Thus the TX BAW state is now 2814 * completely busted. 2815 * 2816 * But for now, since I haven't yet fixed TDMA and buffer cloning, 2817 * it's quite possible that a cloned buffer is making its way 2818 * here and causing it to fire off. Disable TDMA for now. 2819 */ 2820 if (tid->tx_buf[cindex] != bf) { 2821 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2822 "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n", 2823 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 2824 tid->tx_buf[cindex], 2825 (tid->tx_buf[cindex] != NULL) ? 2826 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1); 2827 } 2828 2829 tid->tx_buf[cindex] = NULL; 2830 2831 while (tid->baw_head != tid->baw_tail && 2832 !tid->tx_buf[tid->baw_head]) { 2833 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 2834 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 2835 } 2836 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2837 "%s: tid=%d: baw is now %d:%d, baw head=%d\n", 2838 __func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head); 2839 } 2840 2841 static void 2842 ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid, 2843 struct ath_buf *bf) 2844 { 2845 struct ieee80211_frame *wh; 2846 2847 ATH_TX_LOCK_ASSERT(sc); 2848 2849 if (tid->an->an_leak_count > 0) { 2850 wh = mtod(bf->bf_m, struct ieee80211_frame *); 2851 2852 /* 2853 * Update MORE based on the software/net80211 queue states. 2854 */ 2855 if ((tid->an->an_stack_psq > 0) 2856 || (tid->an->an_swq_depth > 0)) 2857 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 2858 else 2859 wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA; 2860 2861 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 2862 "%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n", 2863 __func__, 2864 tid->an->an_node.ni_macaddr, 2865 ":", 2866 tid->an->an_leak_count, 2867 tid->an->an_stack_psq, 2868 tid->an->an_swq_depth, 2869 !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA)); 2870 2871 /* 2872 * Re-sync the underlying buffer. 2873 */ 2874 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 2875 BUS_DMASYNC_PREWRITE); 2876 2877 tid->an->an_leak_count --; 2878 } 2879 } 2880 2881 static int 2882 ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid) 2883 { 2884 2885 ATH_TX_LOCK_ASSERT(sc); 2886 2887 if (tid->an->an_leak_count > 0) { 2888 return (1); 2889 } 2890 if (tid->paused) 2891 return (0); 2892 return (1); 2893 } 2894 2895 /* 2896 * Mark the current node/TID as ready to TX. 2897 * 2898 * This is done to make it easy for the software scheduler to 2899 * find which nodes have data to send. 2900 * 2901 * The TXQ lock must be held. 2902 */ 2903 void 2904 ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid) 2905 { 2906 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2907 2908 ATH_TX_LOCK_ASSERT(sc); 2909 2910 /* 2911 * If we are leaking out a frame to this destination 2912 * for PS-POLL, ensure that we allow scheduling to 2913 * occur. 2914 */ 2915 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 2916 return; /* paused, can't schedule yet */ 2917 2918 if (tid->sched) 2919 return; /* already scheduled */ 2920 2921 tid->sched = 1; 2922 2923 #if 0 2924 /* 2925 * If this is a sleeping node we're leaking to, given 2926 * it a higher priority. This is so bad for QoS it hurts. 2927 */ 2928 if (tid->an->an_leak_count) { 2929 TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem); 2930 } else { 2931 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2932 } 2933 #endif 2934 2935 /* 2936 * We can't do the above - it'll confuse the TXQ software 2937 * scheduler which will keep checking the _head_ TID 2938 * in the list to see if it has traffic. If we queue 2939 * a TID to the head of the list and it doesn't transmit, 2940 * we'll check it again. 2941 * 2942 * So, get the rest of this leaking frames support working 2943 * and reliable first and _then_ optimise it so they're 2944 * pushed out in front of any other pending software 2945 * queued nodes. 2946 */ 2947 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2948 } 2949 2950 /* 2951 * Mark the current node as no longer needing to be polled for 2952 * TX packets. 2953 * 2954 * The TXQ lock must be held. 2955 */ 2956 static void 2957 ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid) 2958 { 2959 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2960 2961 ATH_TX_LOCK_ASSERT(sc); 2962 2963 if (tid->sched == 0) 2964 return; 2965 2966 tid->sched = 0; 2967 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem); 2968 } 2969 2970 /* 2971 * Assign a sequence number manually to the given frame. 2972 * 2973 * This should only be called for A-MPDU TX frames. 2974 * 2975 * Note: for group addressed frames, the sequence number 2976 * should be from NONQOS_TID, and net80211 should have 2977 * already assigned it for us. 2978 */ 2979 static ieee80211_seq 2980 ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, 2981 struct ath_buf *bf, struct mbuf *m0) 2982 { 2983 struct ieee80211_frame *wh; 2984 int tid; 2985 ieee80211_seq seqno; 2986 uint8_t subtype; 2987 2988 wh = mtod(m0, struct ieee80211_frame *); 2989 tid = ieee80211_gettid(wh); 2990 2991 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, qos has seq=%d\n", 2992 __func__, tid, IEEE80211_QOS_HAS_SEQ(wh)); 2993 2994 /* XXX Is it a control frame? Ignore */ 2995 2996 /* Does the packet require a sequence number? */ 2997 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2998 return -1; 2999 3000 ATH_TX_LOCK_ASSERT(sc); 3001 3002 /* 3003 * Is it a QOS NULL Data frame? Give it a sequence number from 3004 * the default TID (IEEE80211_NONQOS_TID.) 3005 * 3006 * The RX path of everything I've looked at doesn't include the NULL 3007 * data frame sequence number in the aggregation state updates, so 3008 * assigning it a sequence number there will cause a BAW hole on the 3009 * RX side. 3010 */ 3011 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3012 if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) { 3013 /* XXX no locking for this TID? This is a bit of a problem. */ 3014 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; 3015 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); 3016 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3017 /* 3018 * group addressed frames get a sequence number from 3019 * a different sequence number space. 3020 */ 3021 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; 3022 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); 3023 } else { 3024 /* Manually assign sequence number */ 3025 seqno = ni->ni_txseqs[tid]; 3026 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE); 3027 } 3028 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 3029 M_SEQNO_SET(m0, seqno); 3030 3031 /* Return so caller can do something with it if needed */ 3032 DPRINTF(sc, ATH_DEBUG_SW_TX, 3033 "%s: -> subtype=0x%x, tid=%d, seqno=%d\n", 3034 __func__, subtype, tid, seqno); 3035 return seqno; 3036 } 3037 3038 /* 3039 * Attempt to direct dispatch an aggregate frame to hardware. 3040 * If the frame is out of BAW, queue. 3041 * Otherwise, schedule it as a single frame. 3042 */ 3043 static void 3044 ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an, 3045 struct ath_txq *txq, struct ath_buf *bf) 3046 { 3047 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 3048 struct ieee80211_tx_ampdu *tap; 3049 3050 ATH_TX_LOCK_ASSERT(sc); 3051 3052 tap = ath_tx_get_tx_tid(an, tid->tid); 3053 3054 /* paused? queue */ 3055 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { 3056 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3057 /* XXX don't sched - we're paused! */ 3058 return; 3059 } 3060 3061 /* outside baw? queue */ 3062 if (bf->bf_state.bfs_dobaw && 3063 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 3064 SEQNO(bf->bf_state.bfs_seqno)))) { 3065 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3066 ath_tx_tid_sched(sc, tid); 3067 return; 3068 } 3069 3070 /* 3071 * This is a temporary check and should be removed once 3072 * all the relevant code paths have been fixed. 3073 * 3074 * During aggregate retries, it's possible that the head 3075 * frame will fail (which has the bfs_aggr and bfs_nframes 3076 * fields set for said aggregate) and will be retried as 3077 * a single frame. In this instance, the values should 3078 * be reset or the completion code will get upset with you. 3079 */ 3080 if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) { 3081 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 3082 "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__, 3083 bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes); 3084 bf->bf_state.bfs_aggr = 0; 3085 bf->bf_state.bfs_nframes = 1; 3086 } 3087 3088 /* Update CLRDMASK just before this frame is queued */ 3089 ath_tx_update_clrdmask(sc, tid, bf); 3090 3091 /* Direct dispatch to hardware */ 3092 ath_tx_do_ratelookup(sc, bf); 3093 ath_tx_calc_duration(sc, bf); 3094 ath_tx_calc_protection(sc, bf); 3095 ath_tx_set_rtscts(sc, bf); 3096 ath_tx_rate_fill_rcflags(sc, bf); 3097 ath_tx_setds(sc, bf); 3098 3099 /* Statistics */ 3100 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++; 3101 3102 /* Track per-TID hardware queue depth correctly */ 3103 tid->hwq_depth++; 3104 3105 /* Add to BAW */ 3106 if (bf->bf_state.bfs_dobaw) { 3107 ath_tx_addto_baw(sc, an, tid, bf); 3108 bf->bf_state.bfs_addedbaw = 1; 3109 } 3110 3111 /* Set completion handler, multi-frame aggregate or not */ 3112 bf->bf_comp = ath_tx_aggr_comp; 3113 3114 /* 3115 * Update the current leak count if 3116 * we're leaking frames; and set the 3117 * MORE flag as appropriate. 3118 */ 3119 ath_tx_leak_count_update(sc, tid, bf); 3120 3121 /* Hand off to hardware */ 3122 ath_tx_handoff(sc, txq, bf); 3123 } 3124 3125 /* 3126 * Attempt to send the packet. 3127 * If the queue isn't busy, direct-dispatch. 3128 * If the queue is busy enough, queue the given packet on the 3129 * relevant software queue. 3130 */ 3131 void 3132 ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, 3133 struct ath_txq *txq, int queue_to_head, struct ath_buf *bf) 3134 { 3135 struct ath_node *an = ATH_NODE(ni); 3136 struct ieee80211_frame *wh; 3137 struct ath_tid *atid; 3138 int pri, tid; 3139 struct mbuf *m0 = bf->bf_m; 3140 3141 ATH_TX_LOCK_ASSERT(sc); 3142 3143 /* Fetch the TID - non-QoS frames get assigned to TID 16 */ 3144 wh = mtod(m0, struct ieee80211_frame *); 3145 pri = ath_tx_getac(sc, m0); 3146 tid = ath_tx_gettid(sc, m0); 3147 atid = &an->an_tid[tid]; 3148 3149 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n", 3150 __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 3151 3152 /* Set local packet state, used to queue packets to hardware */ 3153 /* XXX potentially duplicate info, re-check */ 3154 bf->bf_state.bfs_tid = tid; 3155 bf->bf_state.bfs_tx_queue = txq->axq_qnum; 3156 bf->bf_state.bfs_pri = pri; 3157 3158 /* 3159 * If the hardware queue isn't busy, queue it directly. 3160 * If the hardware queue is busy, queue it. 3161 * If the TID is paused or the traffic it outside BAW, software 3162 * queue it. 3163 * 3164 * If the node is in power-save and we're leaking a frame, 3165 * leak a single frame. 3166 */ 3167 if (! ath_tx_tid_can_tx_or_sched(sc, atid)) { 3168 /* TID is paused, queue */ 3169 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__); 3170 /* 3171 * If the caller requested that it be sent at a high 3172 * priority, queue it at the head of the list. 3173 */ 3174 if (queue_to_head) 3175 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 3176 else 3177 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3178 } else if (ath_tx_ampdu_pending(sc, an, tid)) { 3179 /* AMPDU pending; queue */ 3180 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__); 3181 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3182 /* XXX sched? */ 3183 } else if (ath_tx_ampdu_running(sc, an, tid)) { 3184 /* 3185 * AMPDU running, queue single-frame if the hardware queue 3186 * isn't busy. 3187 * 3188 * If the hardware queue is busy, sending an aggregate frame 3189 * then just hold off so we can queue more aggregate frames. 3190 * 3191 * Otherwise we may end up with single frames leaking through 3192 * because we are dispatching them too quickly. 3193 * 3194 * TODO: maybe we should treat this as two policies - minimise 3195 * latency, or maximise throughput. Then for BE/BK we can 3196 * maximise throughput, and VO/VI (if AMPDU is enabled!) 3197 * minimise latency. 3198 */ 3199 3200 /* 3201 * Always queue the frame to the tail of the list. 3202 */ 3203 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3204 3205 /* 3206 * If the hardware queue isn't busy, direct dispatch 3207 * the head frame in the list. 3208 * 3209 * Note: if we're say, configured to do ADDBA but not A-MPDU 3210 * then maybe we want to still queue two non-aggregate frames 3211 * to the hardware. Again with the per-TID policy 3212 * configuration..) 3213 * 3214 * Otherwise, schedule the TID. 3215 */ 3216 /* XXX TXQ locking */ 3217 if (txq->axq_depth + txq->fifo.axq_depth == 0) { 3218 3219 bf = ATH_TID_FIRST(atid); 3220 ATH_TID_REMOVE(atid, bf, bf_list); 3221 3222 /* 3223 * Ensure it's definitely treated as a non-AMPDU 3224 * frame - this information may have been left 3225 * over from a previous attempt. 3226 */ 3227 bf->bf_state.bfs_aggr = 0; 3228 bf->bf_state.bfs_nframes = 1; 3229 3230 /* Queue to the hardware */ 3231 ath_tx_xmit_aggr(sc, an, txq, bf); 3232 DPRINTF(sc, ATH_DEBUG_SW_TX, 3233 "%s: xmit_aggr\n", 3234 __func__); 3235 } else { 3236 DPRINTF(sc, ATH_DEBUG_SW_TX, 3237 "%s: ampdu; swq'ing\n", 3238 __func__); 3239 3240 ath_tx_tid_sched(sc, atid); 3241 } 3242 /* 3243 * If we're not doing A-MPDU, be prepared to direct dispatch 3244 * up to both limits if possible. This particular corner 3245 * case may end up with packet starvation between aggregate 3246 * traffic and non-aggregate traffic: we want to ensure 3247 * that non-aggregate stations get a few frames queued to the 3248 * hardware before the aggregate station(s) get their chance. 3249 * 3250 * So if you only ever see a couple of frames direct dispatched 3251 * to the hardware from a non-AMPDU client, check both here 3252 * and in the software queue dispatcher to ensure that those 3253 * non-AMPDU stations get a fair chance to transmit. 3254 */ 3255 /* XXX TXQ locking */ 3256 } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) && 3257 (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) { 3258 /* AMPDU not running, attempt direct dispatch */ 3259 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__); 3260 /* See if clrdmask needs to be set */ 3261 ath_tx_update_clrdmask(sc, atid, bf); 3262 3263 /* 3264 * Update the current leak count if 3265 * we're leaking frames; and set the 3266 * MORE flag as appropriate. 3267 */ 3268 ath_tx_leak_count_update(sc, atid, bf); 3269 3270 /* 3271 * Dispatch the frame. 3272 */ 3273 ath_tx_xmit_normal(sc, txq, bf); 3274 } else { 3275 /* Busy; queue */ 3276 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__); 3277 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3278 ath_tx_tid_sched(sc, atid); 3279 } 3280 } 3281 3282 /* 3283 * Only set the clrdmask bit if none of the nodes are currently 3284 * filtered. 3285 * 3286 * XXX TODO: go through all the callers and check to see 3287 * which are being called in the context of looping over all 3288 * TIDs (eg, if all tids are being paused, resumed, etc.) 3289 * That'll avoid O(n^2) complexity here. 3290 */ 3291 static void 3292 ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an) 3293 { 3294 int i; 3295 3296 ATH_TX_LOCK_ASSERT(sc); 3297 3298 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 3299 if (an->an_tid[i].isfiltered == 1) 3300 return; 3301 } 3302 an->clrdmask = 1; 3303 } 3304 3305 /* 3306 * Configure the per-TID node state. 3307 * 3308 * This likely belongs in if_ath_node.c but I can't think of anywhere 3309 * else to put it just yet. 3310 * 3311 * This sets up the SLISTs and the mutex as appropriate. 3312 */ 3313 void 3314 ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an) 3315 { 3316 int i, j; 3317 struct ath_tid *atid; 3318 3319 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 3320 atid = &an->an_tid[i]; 3321 3322 /* XXX now with this bzer(), is the field 0'ing needed? */ 3323 bzero(atid, sizeof(*atid)); 3324 3325 TAILQ_INIT(&atid->tid_q); 3326 TAILQ_INIT(&atid->filtq.tid_q); 3327 atid->tid = i; 3328 atid->an = an; 3329 for (j = 0; j < ATH_TID_MAX_BUFS; j++) 3330 atid->tx_buf[j] = NULL; 3331 atid->baw_head = atid->baw_tail = 0; 3332 atid->paused = 0; 3333 atid->sched = 0; 3334 atid->hwq_depth = 0; 3335 atid->cleanup_inprogress = 0; 3336 if (i == IEEE80211_NONQOS_TID) 3337 atid->ac = ATH_NONQOS_TID_AC; 3338 else 3339 atid->ac = TID_TO_WME_AC(i); 3340 } 3341 an->clrdmask = 1; /* Always start by setting this bit */ 3342 } 3343 3344 /* 3345 * Pause the current TID. This stops packets from being transmitted 3346 * on it. 3347 * 3348 * Since this is also called from upper layers as well as the driver, 3349 * it will get the TID lock. 3350 */ 3351 static void 3352 ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid) 3353 { 3354 3355 ATH_TX_LOCK_ASSERT(sc); 3356 tid->paused++; 3357 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%6D]: tid=%d, paused = %d\n", 3358 __func__, 3359 tid->an->an_node.ni_macaddr, ":", 3360 tid->tid, 3361 tid->paused); 3362 } 3363 3364 /* 3365 * Unpause the current TID, and schedule it if needed. 3366 */ 3367 static void 3368 ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid) 3369 { 3370 ATH_TX_LOCK_ASSERT(sc); 3371 3372 /* 3373 * There's some odd places where ath_tx_tid_resume() is called 3374 * when it shouldn't be; this works around that particular issue 3375 * until it's actually resolved. 3376 */ 3377 if (tid->paused == 0) { 3378 device_printf(sc->sc_dev, 3379 "%s: [%6D]: tid=%d, paused=0?\n", 3380 __func__, 3381 tid->an->an_node.ni_macaddr, ":", 3382 tid->tid); 3383 } else { 3384 tid->paused--; 3385 } 3386 3387 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3388 "%s: [%6D]: tid=%d, unpaused = %d\n", 3389 __func__, 3390 tid->an->an_node.ni_macaddr, ":", 3391 tid->tid, 3392 tid->paused); 3393 3394 if (tid->paused) 3395 return; 3396 3397 /* 3398 * Override the clrdmask configuration for the next frame 3399 * from this TID, just to get the ball rolling. 3400 */ 3401 ath_tx_set_clrdmask(sc, tid->an); 3402 3403 if (tid->axq_depth == 0) 3404 return; 3405 3406 /* XXX isfiltered shouldn't ever be 0 at this point */ 3407 if (tid->isfiltered == 1) { 3408 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n", 3409 __func__); 3410 return; 3411 } 3412 3413 ath_tx_tid_sched(sc, tid); 3414 3415 /* 3416 * Queue the software TX scheduler. 3417 */ 3418 ath_tx_swq_kick(sc); 3419 } 3420 3421 /* 3422 * Add the given ath_buf to the TID filtered frame list. 3423 * This requires the TID be filtered. 3424 */ 3425 static void 3426 ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid, 3427 struct ath_buf *bf) 3428 { 3429 3430 ATH_TX_LOCK_ASSERT(sc); 3431 3432 if (!tid->isfiltered) 3433 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n", 3434 __func__); 3435 3436 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf); 3437 3438 /* Set the retry bit and bump the retry counter */ 3439 ath_tx_set_retry(sc, bf); 3440 sc->sc_stats.ast_tx_swfiltered++; 3441 3442 ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list); 3443 } 3444 3445 /* 3446 * Handle a completed filtered frame from the given TID. 3447 * This just enables/pauses the filtered frame state if required 3448 * and appends the filtered frame to the filtered queue. 3449 */ 3450 static void 3451 ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid, 3452 struct ath_buf *bf) 3453 { 3454 3455 ATH_TX_LOCK_ASSERT(sc); 3456 3457 if (! tid->isfiltered) { 3458 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d; filter transition\n", 3459 __func__, tid->tid); 3460 tid->isfiltered = 1; 3461 ath_tx_tid_pause(sc, tid); 3462 } 3463 3464 /* Add the frame to the filter queue */ 3465 ath_tx_tid_filt_addbuf(sc, tid, bf); 3466 } 3467 3468 /* 3469 * Complete the filtered frame TX completion. 3470 * 3471 * If there are no more frames in the hardware queue, unpause/unfilter 3472 * the TID if applicable. Otherwise we will wait for a node PS transition 3473 * to unfilter. 3474 */ 3475 static void 3476 ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid) 3477 { 3478 struct ath_buf *bf; 3479 int do_resume = 0; 3480 3481 ATH_TX_LOCK_ASSERT(sc); 3482 3483 if (tid->hwq_depth != 0) 3484 return; 3485 3486 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, hwq=0, transition back\n", 3487 __func__, tid->tid); 3488 if (tid->isfiltered == 1) { 3489 tid->isfiltered = 0; 3490 do_resume = 1; 3491 } 3492 3493 /* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */ 3494 ath_tx_set_clrdmask(sc, tid->an); 3495 3496 /* XXX this is really quite inefficient */ 3497 while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) { 3498 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3499 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3500 } 3501 3502 /* And only resume if we had paused before */ 3503 if (do_resume) 3504 ath_tx_tid_resume(sc, tid); 3505 } 3506 3507 /* 3508 * Called when a single (aggregate or otherwise) frame is completed. 3509 * 3510 * Returns 0 if the buffer could be added to the filtered list 3511 * (cloned or otherwise), 1 if the buffer couldn't be added to the 3512 * filtered list (failed clone; expired retry) and the caller should 3513 * free it and handle it like a failure (eg by sending a BAR.) 3514 * 3515 * since the buffer may be cloned, bf must be not touched after this 3516 * if the return value is 0. 3517 */ 3518 static int 3519 ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid, 3520 struct ath_buf *bf) 3521 { 3522 struct ath_buf *nbf; 3523 int retval; 3524 3525 ATH_TX_LOCK_ASSERT(sc); 3526 3527 /* 3528 * Don't allow a filtered frame to live forever. 3529 */ 3530 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3531 sc->sc_stats.ast_tx_swretrymax++; 3532 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3533 "%s: bf=%p, seqno=%d, exceeded retries\n", 3534 __func__, 3535 bf, 3536 SEQNO(bf->bf_state.bfs_seqno)); 3537 retval = 1; /* error */ 3538 goto finish; 3539 } 3540 3541 /* 3542 * A busy buffer can't be added to the retry list. 3543 * It needs to be cloned. 3544 */ 3545 if (bf->bf_flags & ATH_BUF_BUSY) { 3546 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3547 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3548 "%s: busy buffer clone: %p -> %p\n", 3549 __func__, bf, nbf); 3550 } else { 3551 nbf = bf; 3552 } 3553 3554 if (nbf == NULL) { 3555 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3556 "%s: busy buffer couldn't be cloned (%p)!\n", 3557 __func__, bf); 3558 retval = 1; /* error */ 3559 } else { 3560 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3561 retval = 0; /* ok */ 3562 } 3563 finish: 3564 ath_tx_tid_filt_comp_complete(sc, tid); 3565 3566 return (retval); 3567 } 3568 3569 static void 3570 ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid, 3571 struct ath_buf *bf_first, ath_bufhead *bf_q) 3572 { 3573 struct ath_buf *bf, *bf_next, *nbf; 3574 3575 ATH_TX_LOCK_ASSERT(sc); 3576 3577 bf = bf_first; 3578 while (bf) { 3579 bf_next = bf->bf_next; 3580 bf->bf_next = NULL; /* Remove it from the aggr list */ 3581 3582 /* 3583 * Don't allow a filtered frame to live forever. 3584 */ 3585 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3586 sc->sc_stats.ast_tx_swretrymax++; 3587 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3588 "%s: tid=%d, bf=%p, seqno=%d, exceeded retries\n", 3589 __func__, 3590 tid->tid, 3591 bf, 3592 SEQNO(bf->bf_state.bfs_seqno)); 3593 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3594 goto next; 3595 } 3596 3597 if (bf->bf_flags & ATH_BUF_BUSY) { 3598 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3599 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3600 "%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n", 3601 __func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno)); 3602 } else { 3603 nbf = bf; 3604 } 3605 3606 /* 3607 * If the buffer couldn't be cloned, add it to bf_q; 3608 * the caller will free the buffer(s) as required. 3609 */ 3610 if (nbf == NULL) { 3611 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3612 "%s: tid=%d, buffer couldn't be cloned! (%p) seqno=%d\n", 3613 __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno)); 3614 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3615 } else { 3616 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3617 } 3618 next: 3619 bf = bf_next; 3620 } 3621 3622 ath_tx_tid_filt_comp_complete(sc, tid); 3623 } 3624 3625 /* 3626 * Suspend the queue because we need to TX a BAR. 3627 */ 3628 static void 3629 ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid) 3630 { 3631 3632 ATH_TX_LOCK_ASSERT(sc); 3633 3634 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3635 "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n", 3636 __func__, 3637 tid->tid, 3638 tid->bar_wait, 3639 tid->bar_tx); 3640 3641 /* We shouldn't be called when bar_tx is 1 */ 3642 if (tid->bar_tx) { 3643 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3644 "%s: bar_tx is 1?!\n", __func__); 3645 } 3646 3647 /* If we've already been called, just be patient. */ 3648 if (tid->bar_wait) 3649 return; 3650 3651 /* Wait! */ 3652 tid->bar_wait = 1; 3653 3654 /* Only one pause, no matter how many frames fail */ 3655 ath_tx_tid_pause(sc, tid); 3656 } 3657 3658 /* 3659 * We've finished with BAR handling - either we succeeded or 3660 * failed. Either way, unsuspend TX. 3661 */ 3662 static void 3663 ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid) 3664 { 3665 3666 ATH_TX_LOCK_ASSERT(sc); 3667 3668 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3669 "%s: %6D: TID=%d, called\n", 3670 __func__, 3671 tid->an->an_node.ni_macaddr, 3672 ":", 3673 tid->tid); 3674 3675 if (tid->bar_tx == 0 || tid->bar_wait == 0) { 3676 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3677 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3678 __func__, tid->an->an_node.ni_macaddr, ":", 3679 tid->tid, tid->bar_tx, tid->bar_wait); 3680 } 3681 3682 tid->bar_tx = tid->bar_wait = 0; 3683 ath_tx_tid_resume(sc, tid); 3684 } 3685 3686 /* 3687 * Return whether we're ready to TX a BAR frame. 3688 * 3689 * Requires the TID lock be held. 3690 */ 3691 static int 3692 ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid) 3693 { 3694 3695 ATH_TX_LOCK_ASSERT(sc); 3696 3697 if (tid->bar_wait == 0 || tid->hwq_depth > 0) 3698 return (0); 3699 3700 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3701 "%s: %6D: TID=%d, bar ready\n", 3702 __func__, 3703 tid->an->an_node.ni_macaddr, 3704 ":", 3705 tid->tid); 3706 3707 return (1); 3708 } 3709 3710 /* 3711 * Check whether the current TID is ready to have a BAR 3712 * TXed and if so, do the TX. 3713 * 3714 * Since the TID/TXQ lock can't be held during a call to 3715 * ieee80211_send_bar(), we have to do the dirty thing of unlocking it, 3716 * sending the BAR and locking it again. 3717 * 3718 * Eventually, the code to send the BAR should be broken out 3719 * from this routine so the lock doesn't have to be reacquired 3720 * just to be immediately dropped by the caller. 3721 */ 3722 static void 3723 ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid) 3724 { 3725 struct ieee80211_tx_ampdu *tap; 3726 3727 ATH_TX_LOCK_ASSERT(sc); 3728 3729 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3730 "%s: %6D: TID=%d, called\n", 3731 __func__, 3732 tid->an->an_node.ni_macaddr, 3733 ":", 3734 tid->tid); 3735 3736 tap = ath_tx_get_tx_tid(tid->an, tid->tid); 3737 3738 /* 3739 * This is an error condition! 3740 */ 3741 if (tid->bar_wait == 0 || tid->bar_tx == 1) { 3742 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3743 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3744 __func__, tid->an->an_node.ni_macaddr, ":", 3745 tid->tid, tid->bar_tx, tid->bar_wait); 3746 return; 3747 } 3748 3749 /* Don't do anything if we still have pending frames */ 3750 if (tid->hwq_depth > 0) { 3751 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3752 "%s: %6D: TID=%d, hwq_depth=%d, waiting\n", 3753 __func__, 3754 tid->an->an_node.ni_macaddr, 3755 ":", 3756 tid->tid, 3757 tid->hwq_depth); 3758 return; 3759 } 3760 3761 /* We're now about to TX */ 3762 tid->bar_tx = 1; 3763 3764 /* 3765 * Override the clrdmask configuration for the next frame, 3766 * just to get the ball rolling. 3767 */ 3768 ath_tx_set_clrdmask(sc, tid->an); 3769 3770 /* 3771 * Calculate new BAW left edge, now that all frames have either 3772 * succeeded or failed. 3773 * 3774 * XXX verify this is _actually_ the valid value to begin at! 3775 */ 3776 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3777 "%s: %6D: TID=%d, new BAW left edge=%d\n", 3778 __func__, 3779 tid->an->an_node.ni_macaddr, 3780 ":", 3781 tid->tid, 3782 tap->txa_start); 3783 3784 /* Try sending the BAR frame */ 3785 /* We can't hold the lock here! */ 3786 3787 ATH_TX_UNLOCK(sc); 3788 if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) { 3789 /* Success? Now we wait for notification that it's done */ 3790 ATH_TX_LOCK(sc); 3791 return; 3792 } 3793 3794 /* Failure? For now, warn loudly and continue */ 3795 ATH_TX_LOCK(sc); 3796 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3797 "%s: %6D: TID=%d, failed to TX BAR, continue!\n", 3798 __func__, tid->an->an_node.ni_macaddr, ":", 3799 tid->tid); 3800 ath_tx_tid_bar_unsuspend(sc, tid); 3801 } 3802 3803 static void 3804 ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an, 3805 struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf) 3806 { 3807 3808 ATH_TX_LOCK_ASSERT(sc); 3809 3810 /* 3811 * If the current TID is running AMPDU, update 3812 * the BAW. 3813 */ 3814 if (ath_tx_ampdu_running(sc, an, tid->tid) && 3815 bf->bf_state.bfs_dobaw) { 3816 /* 3817 * Only remove the frame from the BAW if it's 3818 * been transmitted at least once; this means 3819 * the frame was in the BAW to begin with. 3820 */ 3821 if (bf->bf_state.bfs_retries > 0) { 3822 ath_tx_update_baw(sc, an, tid, bf); 3823 bf->bf_state.bfs_dobaw = 0; 3824 } 3825 #if 0 3826 /* 3827 * This has become a non-fatal error now 3828 */ 3829 if (! bf->bf_state.bfs_addedbaw) 3830 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW 3831 "%s: wasn't added: seqno %d\n", 3832 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3833 #endif 3834 } 3835 3836 /* Strip it out of an aggregate list if it was in one */ 3837 bf->bf_next = NULL; 3838 3839 /* Insert on the free queue to be freed by the caller */ 3840 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 3841 } 3842 3843 static void 3844 ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an, 3845 const char *pfx, struct ath_tid *tid, struct ath_buf *bf) 3846 { 3847 struct ieee80211_node *ni = &an->an_node; 3848 struct ath_txq *txq; 3849 struct ieee80211_tx_ampdu *tap; 3850 3851 txq = sc->sc_ac2q[tid->ac]; 3852 tap = ath_tx_get_tx_tid(an, tid->tid); 3853 3854 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3855 "%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, " 3856 "seqno=%d, retry=%d\n", 3857 __func__, 3858 pfx, 3859 ni->ni_macaddr, 3860 ":", 3861 bf, 3862 bf->bf_state.bfs_addedbaw, 3863 bf->bf_state.bfs_dobaw, 3864 SEQNO(bf->bf_state.bfs_seqno), 3865 bf->bf_state.bfs_retries); 3866 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3867 "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n", 3868 __func__, 3869 pfx, 3870 ni->ni_macaddr, 3871 ":", 3872 bf, 3873 txq->axq_qnum, 3874 txq->axq_depth, 3875 txq->axq_aggr_depth); 3876 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3877 "%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, " 3878 "isfiltered=%d\n", 3879 __func__, 3880 pfx, 3881 ni->ni_macaddr, 3882 ":", 3883 bf, 3884 tid->axq_depth, 3885 tid->hwq_depth, 3886 tid->bar_wait, 3887 tid->isfiltered); 3888 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3889 "%s: %s: %6D: tid %d: " 3890 "sched=%d, paused=%d, " 3891 "incomp=%d, baw_head=%d, " 3892 "baw_tail=%d txa_start=%d, ni_txseqs=%d\n", 3893 __func__, 3894 pfx, 3895 ni->ni_macaddr, 3896 ":", 3897 tid->tid, 3898 tid->sched, tid->paused, 3899 tid->incomp, tid->baw_head, 3900 tid->baw_tail, tap == NULL ? -1 : tap->txa_start, 3901 ni->ni_txseqs[tid->tid]); 3902 3903 /* XXX Dump the frame, see what it is? */ 3904 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 3905 ieee80211_dump_pkt(ni->ni_ic, 3906 mtod(bf->bf_m, const uint8_t *), 3907 bf->bf_m->m_len, 0, -1); 3908 } 3909 3910 /* 3911 * Free any packets currently pending in the software TX queue. 3912 * 3913 * This will be called when a node is being deleted. 3914 * 3915 * It can also be called on an active node during an interface 3916 * reset or state transition. 3917 * 3918 * (From Linux/reference): 3919 * 3920 * TODO: For frame(s) that are in the retry state, we will reuse the 3921 * sequence number(s) without setting the retry bit. The 3922 * alternative is to give up on these and BAR the receiver's window 3923 * forward. 3924 */ 3925 static void 3926 ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an, 3927 struct ath_tid *tid, ath_bufhead *bf_cq) 3928 { 3929 struct ath_buf *bf; 3930 struct ieee80211_tx_ampdu *tap; 3931 struct ieee80211_node *ni = &an->an_node; 3932 int t; 3933 3934 tap = ath_tx_get_tx_tid(an, tid->tid); 3935 3936 ATH_TX_LOCK_ASSERT(sc); 3937 3938 /* Walk the queue, free frames */ 3939 t = 0; 3940 for (;;) { 3941 bf = ATH_TID_FIRST(tid); 3942 if (bf == NULL) { 3943 break; 3944 } 3945 3946 if (t == 0) { 3947 ath_tx_tid_drain_print(sc, an, "norm", tid, bf); 3948 // t = 1; 3949 } 3950 3951 ATH_TID_REMOVE(tid, bf, bf_list); 3952 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3953 } 3954 3955 /* And now, drain the filtered frame queue */ 3956 t = 0; 3957 for (;;) { 3958 bf = ATH_TID_FILT_FIRST(tid); 3959 if (bf == NULL) 3960 break; 3961 3962 if (t == 0) { 3963 ath_tx_tid_drain_print(sc, an, "filt", tid, bf); 3964 // t = 1; 3965 } 3966 3967 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3968 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3969 } 3970 3971 /* 3972 * Override the clrdmask configuration for the next frame 3973 * in case there is some future transmission, just to get 3974 * the ball rolling. 3975 * 3976 * This won't hurt things if the TID is about to be freed. 3977 */ 3978 ath_tx_set_clrdmask(sc, tid->an); 3979 3980 /* 3981 * Now that it's completed, grab the TID lock and update 3982 * the sequence number and BAW window. 3983 * Because sequence numbers have been assigned to frames 3984 * that haven't been sent yet, it's entirely possible 3985 * we'll be called with some pending frames that have not 3986 * been transmitted. 3987 * 3988 * The cleaner solution is to do the sequence number allocation 3989 * when the packet is first transmitted - and thus the "retries" 3990 * check above would be enough to update the BAW/seqno. 3991 */ 3992 3993 /* But don't do it for non-QoS TIDs */ 3994 if (tap) { 3995 #if 1 3996 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3997 "%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n", 3998 __func__, 3999 ni->ni_macaddr, 4000 ":", 4001 an, 4002 tid->tid, 4003 tap->txa_start); 4004 #endif 4005 ni->ni_txseqs[tid->tid] = tap->txa_start; 4006 tid->baw_tail = tid->baw_head; 4007 } 4008 } 4009 4010 /* 4011 * Reset the TID state. This must be only called once the node has 4012 * had its frames flushed from this TID, to ensure that no other 4013 * pause / unpause logic can kick in. 4014 */ 4015 static void 4016 ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid) 4017 { 4018 4019 #if 0 4020 tid->bar_wait = tid->bar_tx = tid->isfiltered = 0; 4021 tid->paused = tid->sched = tid->addba_tx_pending = 0; 4022 tid->incomp = tid->cleanup_inprogress = 0; 4023 #endif 4024 4025 /* 4026 * If we have a bar_wait set, we need to unpause the TID 4027 * here. Otherwise once cleanup has finished, the TID won't 4028 * have the right paused counter. 4029 * 4030 * XXX I'm not going through resume here - I don't want the 4031 * node to be rescheuled just yet. This however should be 4032 * methodized! 4033 */ 4034 if (tid->bar_wait) { 4035 if (tid->paused > 0) { 4036 tid->paused --; 4037 } 4038 } 4039 4040 /* 4041 * XXX same with a currently filtered TID. 4042 * 4043 * Since this is being called during a flush, we assume that 4044 * the filtered frame list is actually empty. 4045 * 4046 * XXX TODO: add in a check to ensure that the filtered queue 4047 * depth is actually 0! 4048 */ 4049 if (tid->isfiltered) { 4050 if (tid->paused > 0) { 4051 tid->paused --; 4052 } 4053 } 4054 4055 /* 4056 * Clear BAR, filtered frames, scheduled and ADDBA pending. 4057 * The TID may be going through cleanup from the last association 4058 * where things in the BAW are still in the hardware queue. 4059 */ 4060 tid->bar_wait = 0; 4061 tid->bar_tx = 0; 4062 tid->isfiltered = 0; 4063 tid->sched = 0; 4064 tid->addba_tx_pending = 0; 4065 4066 /* 4067 * XXX TODO: it may just be enough to walk the HWQs and mark 4068 * frames for that node as non-aggregate; or mark the ath_node 4069 * with something that indicates that aggregation is no longer 4070 * occurring. Then we can just toss the BAW complaints and 4071 * do a complete hard reset of state here - no pause, no 4072 * complete counter, etc. 4073 */ 4074 4075 } 4076 4077 /* 4078 * Flush all software queued packets for the given node. 4079 * 4080 * This occurs when a completion handler frees the last buffer 4081 * for a node, and the node is thus freed. This causes the node 4082 * to be cleaned up, which ends up calling ath_tx_node_flush. 4083 */ 4084 void 4085 ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an) 4086 { 4087 int tid; 4088 ath_bufhead bf_cq; 4089 struct ath_buf *bf; 4090 4091 TAILQ_INIT(&bf_cq); 4092 4093 ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p", 4094 &an->an_node); 4095 4096 ATH_TX_LOCK(sc); 4097 DPRINTF(sc, ATH_DEBUG_NODE, 4098 "%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, " 4099 "swq_depth=%d, clrdmask=%d, leak_count=%d\n", 4100 __func__, 4101 an->an_node.ni_macaddr, 4102 ":", 4103 an->an_is_powersave, 4104 an->an_stack_psq, 4105 an->an_tim_set, 4106 an->an_swq_depth, 4107 an->clrdmask, 4108 an->an_leak_count); 4109 4110 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 4111 struct ath_tid *atid = &an->an_tid[tid]; 4112 4113 /* Free packets */ 4114 ath_tx_tid_drain(sc, an, atid, &bf_cq); 4115 4116 /* Remove this tid from the list of active tids */ 4117 ath_tx_tid_unsched(sc, atid); 4118 4119 /* Reset the per-TID pause, BAR, etc state */ 4120 ath_tx_tid_reset(sc, atid); 4121 } 4122 4123 /* 4124 * Clear global leak count 4125 */ 4126 an->an_leak_count = 0; 4127 ATH_TX_UNLOCK(sc); 4128 4129 /* Handle completed frames */ 4130 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4131 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4132 ath_tx_default_comp(sc, bf, 0); 4133 } 4134 } 4135 4136 /* 4137 * Drain all the software TXQs currently with traffic queued. 4138 */ 4139 void 4140 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq) 4141 { 4142 struct ath_tid *tid; 4143 ath_bufhead bf_cq; 4144 struct ath_buf *bf; 4145 4146 TAILQ_INIT(&bf_cq); 4147 ATH_TX_LOCK(sc); 4148 4149 /* 4150 * Iterate over all active tids for the given txq, 4151 * flushing and unsched'ing them 4152 */ 4153 while (! TAILQ_EMPTY(&txq->axq_tidq)) { 4154 tid = TAILQ_FIRST(&txq->axq_tidq); 4155 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq); 4156 ath_tx_tid_unsched(sc, tid); 4157 } 4158 4159 ATH_TX_UNLOCK(sc); 4160 4161 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4162 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4163 ath_tx_default_comp(sc, bf, 0); 4164 } 4165 } 4166 4167 /* 4168 * Handle completion of non-aggregate session frames. 4169 * 4170 * This (currently) doesn't implement software retransmission of 4171 * non-aggregate frames! 4172 * 4173 * Software retransmission of non-aggregate frames needs to obey 4174 * the strict sequence number ordering, and drop any frames that 4175 * will fail this. 4176 * 4177 * For now, filtered frames and frame transmission will cause 4178 * all kinds of issues. So we don't support them. 4179 * 4180 * So anyone queuing frames via ath_tx_normal_xmit() or 4181 * ath_tx_hw_queue_norm() must override and set CLRDMASK. 4182 */ 4183 void 4184 ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4185 { 4186 struct ieee80211_node *ni = bf->bf_node; 4187 struct ath_node *an = ATH_NODE(ni); 4188 int tid = bf->bf_state.bfs_tid; 4189 struct ath_tid *atid = &an->an_tid[tid]; 4190 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 4191 4192 /* The TID state is protected behind the TXQ lock */ 4193 ATH_TX_LOCK(sc); 4194 4195 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n", 4196 __func__, bf, fail, atid->hwq_depth - 1); 4197 4198 atid->hwq_depth--; 4199 4200 #if 0 4201 /* 4202 * If the frame was filtered, stick it on the filter frame 4203 * queue and complain about it. It shouldn't happen! 4204 */ 4205 if ((ts->ts_status & HAL_TXERR_FILT) || 4206 (ts->ts_status != 0 && atid->isfiltered)) { 4207 DPRINTF(sc, ATH_DEBUG_SW_TX, 4208 "%s: isfiltered=%d, ts_status=%d: huh?\n", 4209 __func__, 4210 atid->isfiltered, 4211 ts->ts_status); 4212 ath_tx_tid_filt_comp_buf(sc, atid, bf); 4213 } 4214 #endif 4215 if (atid->isfiltered) 4216 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__); 4217 if (atid->hwq_depth < 0) 4218 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", 4219 __func__, atid->hwq_depth); 4220 4221 /* If the TID is being cleaned up, track things */ 4222 /* XXX refactor! */ 4223 if (atid->cleanup_inprogress) { 4224 atid->incomp--; 4225 if (atid->incomp == 0) { 4226 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4227 "%s: TID %d: cleaned up! resume!\n", 4228 __func__, tid); 4229 atid->cleanup_inprogress = 0; 4230 ath_tx_tid_resume(sc, atid); 4231 } 4232 } 4233 4234 /* 4235 * If the queue is filtered, potentially mark it as complete 4236 * and reschedule it as needed. 4237 * 4238 * This is required as there may be a subsequent TX descriptor 4239 * for this end-node that has CLRDMASK set, so it's quite possible 4240 * that a filtered frame will be followed by a non-filtered 4241 * (complete or otherwise) frame. 4242 * 4243 * XXX should we do this before we complete the frame? 4244 */ 4245 if (atid->isfiltered) 4246 ath_tx_tid_filt_comp_complete(sc, atid); 4247 ATH_TX_UNLOCK(sc); 4248 4249 /* 4250 * punt to rate control if we're not being cleaned up 4251 * during a hw queue drain and the frame wanted an ACK. 4252 */ 4253 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 4254 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 4255 ts, bf->bf_state.bfs_pktlen, 4256 1, (ts->ts_status == 0) ? 0 : 1); 4257 4258 ath_tx_default_comp(sc, bf, fail); 4259 } 4260 4261 /* 4262 * Handle cleanup of aggregate session packets that aren't 4263 * an A-MPDU. 4264 * 4265 * There's no need to update the BAW here - the session is being 4266 * torn down. 4267 */ 4268 static void 4269 ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf) 4270 { 4271 struct ieee80211_node *ni = bf->bf_node; 4272 struct ath_node *an = ATH_NODE(ni); 4273 int tid = bf->bf_state.bfs_tid; 4274 struct ath_tid *atid = &an->an_tid[tid]; 4275 4276 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n", 4277 __func__, tid, atid->incomp); 4278 4279 ATH_TX_LOCK(sc); 4280 atid->incomp--; 4281 4282 /* XXX refactor! */ 4283 if (bf->bf_state.bfs_dobaw) { 4284 ath_tx_update_baw(sc, an, atid, bf); 4285 if (!bf->bf_state.bfs_addedbaw) 4286 DPRINTF(sc, ATH_DEBUG_SW_TX, 4287 "%s: wasn't added: seqno %d\n", 4288 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4289 } 4290 4291 if (atid->incomp == 0) { 4292 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4293 "%s: TID %d: cleaned up! resume!\n", 4294 __func__, tid); 4295 atid->cleanup_inprogress = 0; 4296 ath_tx_tid_resume(sc, atid); 4297 } 4298 ATH_TX_UNLOCK(sc); 4299 4300 ath_tx_default_comp(sc, bf, 0); 4301 } 4302 4303 4304 /* 4305 * This as it currently stands is a bit dumb. Ideally we'd just 4306 * fail the frame the normal way and have it permanently fail 4307 * via the normal aggregate completion path. 4308 */ 4309 static void 4310 ath_tx_tid_cleanup_frame(struct ath_softc *sc, struct ath_node *an, 4311 int tid, struct ath_buf *bf_head, ath_bufhead *bf_cq) 4312 { 4313 struct ath_tid *atid = &an->an_tid[tid]; 4314 struct ath_buf *bf, *bf_next; 4315 4316 ATH_TX_LOCK_ASSERT(sc); 4317 4318 /* 4319 * Remove this frame from the queue. 4320 */ 4321 ATH_TID_REMOVE(atid, bf_head, bf_list); 4322 4323 /* 4324 * Loop over all the frames in the aggregate. 4325 */ 4326 bf = bf_head; 4327 while (bf != NULL) { 4328 bf_next = bf->bf_next; /* next aggregate frame, or NULL */ 4329 4330 /* 4331 * If it's been added to the BAW we need to kick 4332 * it out of the BAW before we continue. 4333 * 4334 * XXX if it's an aggregate, assert that it's in the 4335 * BAW - we shouldn't have it be in an aggregate 4336 * otherwise! 4337 */ 4338 if (bf->bf_state.bfs_addedbaw) { 4339 ath_tx_update_baw(sc, an, atid, bf); 4340 bf->bf_state.bfs_dobaw = 0; 4341 } 4342 4343 /* 4344 * Give it the default completion handler. 4345 */ 4346 bf->bf_comp = ath_tx_normal_comp; 4347 bf->bf_next = NULL; 4348 4349 /* 4350 * Add it to the list to free. 4351 */ 4352 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 4353 4354 /* 4355 * Now advance to the next frame in the aggregate. 4356 */ 4357 bf = bf_next; 4358 } 4359 } 4360 4361 /* 4362 * Performs transmit side cleanup when TID changes from aggregated to 4363 * unaggregated and during reassociation. 4364 * 4365 * For now, this just tosses everything from the TID software queue 4366 * whether or not it has been retried and marks the TID as 4367 * pending completion if there's anything for this TID queued to 4368 * the hardware. 4369 * 4370 * The caller is responsible for pausing the TID and unpausing the 4371 * TID if no cleanup was required. Otherwise the cleanup path will 4372 * unpause the TID once the last hardware queued frame is completed. 4373 */ 4374 static void 4375 ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid, 4376 ath_bufhead *bf_cq) 4377 { 4378 struct ath_tid *atid = &an->an_tid[tid]; 4379 struct ath_buf *bf, *bf_next; 4380 4381 ATH_TX_LOCK_ASSERT(sc); 4382 4383 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4384 "%s: TID %d: called; inprogress=%d\n", __func__, tid, 4385 atid->cleanup_inprogress); 4386 4387 /* 4388 * Move the filtered frames to the TX queue, before 4389 * we run off and discard/process things. 4390 */ 4391 4392 /* XXX this is really quite inefficient */ 4393 while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) { 4394 ATH_TID_FILT_REMOVE(atid, bf, bf_list); 4395 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4396 } 4397 4398 /* 4399 * Update the frames in the software TX queue: 4400 * 4401 * + Discard retry frames in the queue 4402 * + Fix the completion function to be non-aggregate 4403 */ 4404 bf = ATH_TID_FIRST(atid); 4405 while (bf) { 4406 /* 4407 * Grab the next frame in the list, we may 4408 * be fiddling with the list. 4409 */ 4410 bf_next = TAILQ_NEXT(bf, bf_list); 4411 4412 /* 4413 * Free the frame and all subframes. 4414 */ 4415 ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq); 4416 4417 /* 4418 * Next frame! 4419 */ 4420 bf = bf_next; 4421 } 4422 4423 /* 4424 * If there's anything in the hardware queue we wait 4425 * for the TID HWQ to empty. 4426 */ 4427 if (atid->hwq_depth > 0) { 4428 /* 4429 * XXX how about we kill atid->incomp, and instead 4430 * replace it with a macro that checks that atid->hwq_depth 4431 * is 0? 4432 */ 4433 atid->incomp = atid->hwq_depth; 4434 atid->cleanup_inprogress = 1; 4435 } 4436 4437 if (atid->cleanup_inprogress) 4438 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4439 "%s: TID %d: cleanup needed: %d packets\n", 4440 __func__, tid, atid->incomp); 4441 4442 /* Owner now must free completed frames */ 4443 } 4444 4445 static struct ath_buf * 4446 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 4447 struct ath_tid *tid, struct ath_buf *bf) 4448 { 4449 struct ath_buf *nbf; 4450 int error; 4451 4452 /* 4453 * Clone the buffer. This will handle the dma unmap and 4454 * copy the node reference to the new buffer. If this 4455 * works out, 'bf' will have no DMA mapping, no mbuf 4456 * pointer and no node reference. 4457 */ 4458 nbf = ath_buf_clone(sc, bf); 4459 4460 #if 0 4461 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n", 4462 __func__); 4463 #endif 4464 4465 if (nbf == NULL) { 4466 /* Failed to clone */ 4467 DPRINTF(sc, ATH_DEBUG_XMIT, 4468 "%s: failed to clone a busy buffer\n", 4469 __func__); 4470 return NULL; 4471 } 4472 4473 /* Setup the dma for the new buffer */ 4474 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m); 4475 if (error != 0) { 4476 DPRINTF(sc, ATH_DEBUG_XMIT, 4477 "%s: failed to setup dma for clone\n", 4478 __func__); 4479 /* 4480 * Put this at the head of the list, not tail; 4481 * that way it doesn't interfere with the 4482 * busy buffer logic (which uses the tail of 4483 * the list.) 4484 */ 4485 ATH_TXBUF_LOCK(sc); 4486 ath_returnbuf_head(sc, nbf); 4487 ATH_TXBUF_UNLOCK(sc); 4488 return NULL; 4489 } 4490 4491 /* Update BAW if required, before we free the original buf */ 4492 if (bf->bf_state.bfs_dobaw) 4493 ath_tx_switch_baw_buf(sc, an, tid, bf, nbf); 4494 4495 /* Free original buffer; return new buffer */ 4496 ath_freebuf(sc, bf); 4497 4498 return nbf; 4499 } 4500 4501 /* 4502 * Handle retrying an unaggregate frame in an aggregate 4503 * session. 4504 * 4505 * If too many retries occur, pause the TID, wait for 4506 * any further retransmits (as there's no reason why 4507 * non-aggregate frames in an aggregate session are 4508 * transmitted in-order; they just have to be in-BAW) 4509 * and then queue a BAR. 4510 */ 4511 static void 4512 ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf) 4513 { 4514 struct ieee80211_node *ni = bf->bf_node; 4515 struct ath_node *an = ATH_NODE(ni); 4516 int tid = bf->bf_state.bfs_tid; 4517 struct ath_tid *atid = &an->an_tid[tid]; 4518 struct ieee80211_tx_ampdu *tap; 4519 4520 ATH_TX_LOCK(sc); 4521 4522 tap = ath_tx_get_tx_tid(an, tid); 4523 4524 /* 4525 * If the buffer is marked as busy, we can't directly 4526 * reuse it. Instead, try to clone the buffer. 4527 * If the clone is successful, recycle the old buffer. 4528 * If the clone is unsuccessful, set bfs_retries to max 4529 * to force the next bit of code to free the buffer 4530 * for us. 4531 */ 4532 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4533 (bf->bf_flags & ATH_BUF_BUSY)) { 4534 struct ath_buf *nbf; 4535 nbf = ath_tx_retry_clone(sc, an, atid, bf); 4536 if (nbf) 4537 /* bf has been freed at this point */ 4538 bf = nbf; 4539 else 4540 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4541 } 4542 4543 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4544 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4545 "%s: exceeded retries; seqno %d\n", 4546 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4547 sc->sc_stats.ast_tx_swretrymax++; 4548 4549 /* Update BAW anyway */ 4550 if (bf->bf_state.bfs_dobaw) { 4551 ath_tx_update_baw(sc, an, atid, bf); 4552 if (! bf->bf_state.bfs_addedbaw) 4553 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4554 "%s: wasn't added: seqno %d\n", 4555 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4556 } 4557 bf->bf_state.bfs_dobaw = 0; 4558 4559 /* Suspend the TX queue and get ready to send the BAR */ 4560 ath_tx_tid_bar_suspend(sc, atid); 4561 4562 /* Send the BAR if there are no other frames waiting */ 4563 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4564 ath_tx_tid_bar_tx(sc, atid); 4565 4566 ATH_TX_UNLOCK(sc); 4567 4568 /* Free buffer, bf is free after this call */ 4569 ath_tx_default_comp(sc, bf, 0); 4570 return; 4571 } 4572 4573 /* 4574 * This increments the retry counter as well as 4575 * sets the retry flag in the ath_buf and packet 4576 * body. 4577 */ 4578 ath_tx_set_retry(sc, bf); 4579 sc->sc_stats.ast_tx_swretries++; 4580 4581 /* 4582 * Insert this at the head of the queue, so it's 4583 * retried before any current/subsequent frames. 4584 */ 4585 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4586 ath_tx_tid_sched(sc, atid); 4587 /* Send the BAR if there are no other frames waiting */ 4588 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4589 ath_tx_tid_bar_tx(sc, atid); 4590 4591 ATH_TX_UNLOCK(sc); 4592 } 4593 4594 /* 4595 * Common code for aggregate excessive retry/subframe retry. 4596 * If retrying, queues buffers to bf_q. If not, frees the 4597 * buffers. 4598 * 4599 * XXX should unify this with ath_tx_aggr_retry_unaggr() 4600 */ 4601 static int 4602 ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf, 4603 ath_bufhead *bf_q) 4604 { 4605 struct ieee80211_node *ni = bf->bf_node; 4606 struct ath_node *an = ATH_NODE(ni); 4607 int tid = bf->bf_state.bfs_tid; 4608 struct ath_tid *atid = &an->an_tid[tid]; 4609 4610 ATH_TX_LOCK_ASSERT(sc); 4611 4612 /* XXX clr11naggr should be done for all subframes */ 4613 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 4614 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0); 4615 4616 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */ 4617 4618 /* 4619 * If the buffer is marked as busy, we can't directly 4620 * reuse it. Instead, try to clone the buffer. 4621 * If the clone is successful, recycle the old buffer. 4622 * If the clone is unsuccessful, set bfs_retries to max 4623 * to force the next bit of code to free the buffer 4624 * for us. 4625 */ 4626 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4627 (bf->bf_flags & ATH_BUF_BUSY)) { 4628 struct ath_buf *nbf; 4629 nbf = ath_tx_retry_clone(sc, an, atid, bf); 4630 if (nbf) 4631 /* bf has been freed at this point */ 4632 bf = nbf; 4633 else 4634 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4635 } 4636 4637 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4638 sc->sc_stats.ast_tx_swretrymax++; 4639 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4640 "%s: max retries: seqno %d\n", 4641 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4642 ath_tx_update_baw(sc, an, atid, bf); 4643 if (!bf->bf_state.bfs_addedbaw) 4644 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4645 "%s: wasn't added: seqno %d\n", 4646 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4647 bf->bf_state.bfs_dobaw = 0; 4648 return 1; 4649 } 4650 4651 ath_tx_set_retry(sc, bf); 4652 sc->sc_stats.ast_tx_swretries++; 4653 bf->bf_next = NULL; /* Just to make sure */ 4654 4655 /* Clear the aggregate state */ 4656 bf->bf_state.bfs_aggr = 0; 4657 bf->bf_state.bfs_ndelim = 0; /* ??? needed? */ 4658 bf->bf_state.bfs_nframes = 1; 4659 4660 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 4661 return 0; 4662 } 4663 4664 /* 4665 * error pkt completion for an aggregate destination 4666 */ 4667 static void 4668 ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first, 4669 struct ath_tid *tid) 4670 { 4671 struct ieee80211_node *ni = bf_first->bf_node; 4672 struct ath_node *an = ATH_NODE(ni); 4673 struct ath_buf *bf_next, *bf; 4674 ath_bufhead bf_q; 4675 int drops = 0; 4676 struct ieee80211_tx_ampdu *tap; 4677 ath_bufhead bf_cq; 4678 4679 TAILQ_INIT(&bf_q); 4680 TAILQ_INIT(&bf_cq); 4681 4682 /* 4683 * Update rate control - all frames have failed. 4684 * 4685 * XXX use the length in the first frame in the series; 4686 * XXX just so things are consistent for now. 4687 */ 4688 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc, 4689 &bf_first->bf_status.ds_txstat, 4690 bf_first->bf_state.bfs_pktlen, 4691 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes); 4692 4693 ATH_TX_LOCK(sc); 4694 tap = ath_tx_get_tx_tid(an, tid->tid); 4695 sc->sc_stats.ast_tx_aggr_failall++; 4696 4697 /* Retry all subframes */ 4698 bf = bf_first; 4699 while (bf) { 4700 bf_next = bf->bf_next; 4701 bf->bf_next = NULL; /* Remove it from the aggr list */ 4702 sc->sc_stats.ast_tx_aggr_fail++; 4703 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4704 drops++; 4705 bf->bf_next = NULL; 4706 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4707 } 4708 bf = bf_next; 4709 } 4710 4711 /* Prepend all frames to the beginning of the queue */ 4712 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 4713 TAILQ_REMOVE(&bf_q, bf, bf_list); 4714 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 4715 } 4716 4717 /* 4718 * Schedule the TID to be re-tried. 4719 */ 4720 ath_tx_tid_sched(sc, tid); 4721 4722 /* 4723 * send bar if we dropped any frames 4724 * 4725 * Keep the txq lock held for now, as we need to ensure 4726 * that ni_txseqs[] is consistent (as it's being updated 4727 * in the ifnet TX context or raw TX context.) 4728 */ 4729 if (drops) { 4730 /* Suspend the TX queue and get ready to send the BAR */ 4731 ath_tx_tid_bar_suspend(sc, tid); 4732 } 4733 4734 /* 4735 * Send BAR if required 4736 */ 4737 if (ath_tx_tid_bar_tx_ready(sc, tid)) 4738 ath_tx_tid_bar_tx(sc, tid); 4739 4740 ATH_TX_UNLOCK(sc); 4741 4742 /* Complete frames which errored out */ 4743 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4744 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4745 ath_tx_default_comp(sc, bf, 0); 4746 } 4747 } 4748 4749 /* 4750 * Handle clean-up of packets from an aggregate list. 4751 * 4752 * There's no need to update the BAW here - the session is being 4753 * torn down. 4754 */ 4755 static void 4756 ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first) 4757 { 4758 struct ath_buf *bf, *bf_next; 4759 struct ieee80211_node *ni = bf_first->bf_node; 4760 struct ath_node *an = ATH_NODE(ni); 4761 int tid = bf_first->bf_state.bfs_tid; 4762 struct ath_tid *atid = &an->an_tid[tid]; 4763 4764 ATH_TX_LOCK(sc); 4765 4766 /* update incomp */ 4767 atid->incomp--; 4768 4769 /* Update the BAW */ 4770 bf = bf_first; 4771 while (bf) { 4772 /* XXX refactor! */ 4773 if (bf->bf_state.bfs_dobaw) { 4774 ath_tx_update_baw(sc, an, atid, bf); 4775 if (!bf->bf_state.bfs_addedbaw) 4776 DPRINTF(sc, ATH_DEBUG_SW_TX, 4777 "%s: wasn't added: seqno %d\n", 4778 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4779 } 4780 bf = bf->bf_next; 4781 } 4782 4783 if (atid->incomp == 0) { 4784 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4785 "%s: TID %d: cleaned up! resume!\n", 4786 __func__, tid); 4787 atid->cleanup_inprogress = 0; 4788 ath_tx_tid_resume(sc, atid); 4789 } 4790 4791 /* Send BAR if required */ 4792 /* XXX why would we send a BAR when transitioning to non-aggregation? */ 4793 /* 4794 * XXX TODO: we should likely just tear down the BAR state here, 4795 * rather than sending a BAR. 4796 */ 4797 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4798 ath_tx_tid_bar_tx(sc, atid); 4799 4800 ATH_TX_UNLOCK(sc); 4801 4802 /* Handle frame completion as individual frames */ 4803 bf = bf_first; 4804 while (bf) { 4805 bf_next = bf->bf_next; 4806 bf->bf_next = NULL; 4807 ath_tx_default_comp(sc, bf, 1); 4808 bf = bf_next; 4809 } 4810 } 4811 4812 /* 4813 * Handle completion of an set of aggregate frames. 4814 * 4815 * Note: the completion handler is the last descriptor in the aggregate, 4816 * not the last descriptor in the first frame. 4817 */ 4818 static void 4819 ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first, 4820 int fail) 4821 { 4822 //struct ath_desc *ds = bf->bf_lastds; 4823 struct ieee80211_node *ni = bf_first->bf_node; 4824 struct ath_node *an = ATH_NODE(ni); 4825 int tid = bf_first->bf_state.bfs_tid; 4826 struct ath_tid *atid = &an->an_tid[tid]; 4827 struct ath_tx_status ts; 4828 struct ieee80211_tx_ampdu *tap; 4829 ath_bufhead bf_q; 4830 ath_bufhead bf_cq; 4831 int seq_st, tx_ok; 4832 int hasba, isaggr; 4833 uint32_t ba[2]; 4834 struct ath_buf *bf, *bf_next; 4835 int ba_index; 4836 int drops = 0; 4837 int nframes = 0, nbad = 0, nf; 4838 int pktlen; 4839 /* XXX there's too much on the stack? */ 4840 struct ath_rc_series rc[ATH_RC_NUM]; 4841 int txseq; 4842 4843 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n", 4844 __func__, atid->hwq_depth); 4845 4846 /* 4847 * Take a copy; this may be needed -after- bf_first 4848 * has been completed and freed. 4849 */ 4850 ts = bf_first->bf_status.ds_txstat; 4851 4852 TAILQ_INIT(&bf_q); 4853 TAILQ_INIT(&bf_cq); 4854 4855 /* The TID state is kept behind the TXQ lock */ 4856 ATH_TX_LOCK(sc); 4857 4858 atid->hwq_depth--; 4859 if (atid->hwq_depth < 0) 4860 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n", 4861 __func__, atid->hwq_depth); 4862 4863 /* 4864 * If the TID is filtered, handle completing the filter 4865 * transition before potentially kicking it to the cleanup 4866 * function. 4867 * 4868 * XXX this is duplicate work, ew. 4869 */ 4870 if (atid->isfiltered) 4871 ath_tx_tid_filt_comp_complete(sc, atid); 4872 4873 /* 4874 * Punt cleanup to the relevant function, not our problem now 4875 */ 4876 if (atid->cleanup_inprogress) { 4877 if (atid->isfiltered) 4878 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4879 "%s: isfiltered=1, normal_comp?\n", 4880 __func__); 4881 ATH_TX_UNLOCK(sc); 4882 ath_tx_comp_cleanup_aggr(sc, bf_first); 4883 return; 4884 } 4885 4886 /* 4887 * If the frame is filtered, transition to filtered frame 4888 * mode and add this to the filtered frame list. 4889 * 4890 * XXX TODO: figure out how this interoperates with 4891 * BAR, pause and cleanup states. 4892 */ 4893 if ((ts.ts_status & HAL_TXERR_FILT) || 4894 (ts.ts_status != 0 && atid->isfiltered)) { 4895 if (fail != 0) 4896 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4897 "%s: isfiltered=1, fail=%d\n", __func__, fail); 4898 ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq); 4899 4900 /* Remove from BAW */ 4901 TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) { 4902 if (bf->bf_state.bfs_addedbaw) 4903 drops++; 4904 if (bf->bf_state.bfs_dobaw) { 4905 ath_tx_update_baw(sc, an, atid, bf); 4906 if (!bf->bf_state.bfs_addedbaw) 4907 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4908 "%s: wasn't added: seqno %d\n", 4909 __func__, 4910 SEQNO(bf->bf_state.bfs_seqno)); 4911 } 4912 bf->bf_state.bfs_dobaw = 0; 4913 } 4914 /* 4915 * If any intermediate frames in the BAW were dropped when 4916 * handling filtering things, send a BAR. 4917 */ 4918 if (drops) 4919 ath_tx_tid_bar_suspend(sc, atid); 4920 4921 /* 4922 * Finish up by sending a BAR if required and freeing 4923 * the frames outside of the TX lock. 4924 */ 4925 goto finish_send_bar; 4926 } 4927 4928 /* 4929 * XXX for now, use the first frame in the aggregate for 4930 * XXX rate control completion; it's at least consistent. 4931 */ 4932 pktlen = bf_first->bf_state.bfs_pktlen; 4933 4934 /* 4935 * Handle errors first! 4936 * 4937 * Here, handle _any_ error as a "exceeded retries" error. 4938 * Later on (when filtered frames are to be specially handled) 4939 * it'll have to be expanded. 4940 */ 4941 #if 0 4942 if (ts.ts_status & HAL_TXERR_XRETRY) { 4943 #endif 4944 if (ts.ts_status != 0) { 4945 ATH_TX_UNLOCK(sc); 4946 ath_tx_comp_aggr_error(sc, bf_first, atid); 4947 return; 4948 } 4949 4950 tap = ath_tx_get_tx_tid(an, tid); 4951 4952 /* 4953 * extract starting sequence and block-ack bitmap 4954 */ 4955 /* XXX endian-ness of seq_st, ba? */ 4956 seq_st = ts.ts_seqnum; 4957 hasba = !! (ts.ts_flags & HAL_TX_BA); 4958 tx_ok = (ts.ts_status == 0); 4959 isaggr = bf_first->bf_state.bfs_aggr; 4960 ba[0] = ts.ts_ba_low; 4961 ba[1] = ts.ts_ba_high; 4962 4963 /* 4964 * Copy the TX completion status and the rate control 4965 * series from the first descriptor, as it may be freed 4966 * before the rate control code can get its grubby fingers 4967 * into things. 4968 */ 4969 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc)); 4970 4971 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4972 "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, " 4973 "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n", 4974 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags, 4975 isaggr, seq_st, hasba, ba[0], ba[1]); 4976 4977 /* 4978 * The reference driver doesn't do this; it simply ignores 4979 * this check in its entirety. 4980 * 4981 * I've seen this occur when using iperf to send traffic 4982 * out tid 1 - the aggregate frames are all marked as TID 1, 4983 * but the TXSTATUS has TID=0. So, let's just ignore this 4984 * check. 4985 */ 4986 #if 0 4987 /* Occasionally, the MAC sends a tx status for the wrong TID. */ 4988 if (tid != ts.ts_tid) { 4989 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n", 4990 __func__, tid, ts.ts_tid); 4991 tx_ok = 0; 4992 } 4993 #endif 4994 4995 /* AR5416 BA bug; this requires an interface reset */ 4996 if (isaggr && tx_ok && (! hasba)) { 4997 device_printf(sc->sc_dev, 4998 "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, " 4999 "seq_st=%d\n", 5000 __func__, hasba, tx_ok, isaggr, seq_st); 5001 /* XXX TODO: schedule an interface reset */ 5002 #ifdef ATH_DEBUG 5003 ath_printtxbuf(sc, bf_first, 5004 sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0); 5005 #endif 5006 } 5007 5008 /* 5009 * Walk the list of frames, figure out which ones were correctly 5010 * sent and which weren't. 5011 */ 5012 bf = bf_first; 5013 nf = bf_first->bf_state.bfs_nframes; 5014 5015 /* bf_first is going to be invalid once this list is walked */ 5016 bf_first = NULL; 5017 5018 /* 5019 * Walk the list of completed frames and determine 5020 * which need to be completed and which need to be 5021 * retransmitted. 5022 * 5023 * For completed frames, the completion functions need 5024 * to be called at the end of this function as the last 5025 * node reference may free the node. 5026 * 5027 * Finally, since the TXQ lock can't be held during the 5028 * completion callback (to avoid lock recursion), 5029 * the completion calls have to be done outside of the 5030 * lock. 5031 */ 5032 while (bf) { 5033 nframes++; 5034 ba_index = ATH_BA_INDEX(seq_st, 5035 SEQNO(bf->bf_state.bfs_seqno)); 5036 bf_next = bf->bf_next; 5037 bf->bf_next = NULL; /* Remove it from the aggr list */ 5038 5039 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5040 "%s: checking bf=%p seqno=%d; ack=%d\n", 5041 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 5042 ATH_BA_ISSET(ba, ba_index)); 5043 5044 if (tx_ok && ATH_BA_ISSET(ba, ba_index)) { 5045 sc->sc_stats.ast_tx_aggr_ok++; 5046 ath_tx_update_baw(sc, an, atid, bf); 5047 bf->bf_state.bfs_dobaw = 0; 5048 if (!bf->bf_state.bfs_addedbaw) 5049 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5050 "%s: wasn't added: seqno %d\n", 5051 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5052 bf->bf_next = NULL; 5053 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 5054 } else { 5055 sc->sc_stats.ast_tx_aggr_fail++; 5056 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 5057 drops++; 5058 bf->bf_next = NULL; 5059 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 5060 } 5061 nbad++; 5062 } 5063 bf = bf_next; 5064 } 5065 5066 /* 5067 * Now that the BAW updates have been done, unlock 5068 * 5069 * txseq is grabbed before the lock is released so we 5070 * have a consistent view of what -was- in the BAW. 5071 * Anything after this point will not yet have been 5072 * TXed. 5073 */ 5074 txseq = tap->txa_start; 5075 ATH_TX_UNLOCK(sc); 5076 5077 if (nframes != nf) 5078 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5079 "%s: num frames seen=%d; bf nframes=%d\n", 5080 __func__, nframes, nf); 5081 5082 /* 5083 * Now we know how many frames were bad, call the rate 5084 * control code. 5085 */ 5086 if (fail == 0) 5087 ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes, 5088 nbad); 5089 5090 /* 5091 * send bar if we dropped any frames 5092 */ 5093 if (drops) { 5094 /* Suspend the TX queue and get ready to send the BAR */ 5095 ATH_TX_LOCK(sc); 5096 ath_tx_tid_bar_suspend(sc, atid); 5097 ATH_TX_UNLOCK(sc); 5098 } 5099 5100 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5101 "%s: txa_start now %d\n", __func__, tap->txa_start); 5102 5103 ATH_TX_LOCK(sc); 5104 5105 /* Prepend all frames to the beginning of the queue */ 5106 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 5107 TAILQ_REMOVE(&bf_q, bf, bf_list); 5108 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 5109 } 5110 5111 /* 5112 * Reschedule to grab some further frames. 5113 */ 5114 ath_tx_tid_sched(sc, atid); 5115 5116 /* 5117 * If the queue is filtered, re-schedule as required. 5118 * 5119 * This is required as there may be a subsequent TX descriptor 5120 * for this end-node that has CLRDMASK set, so it's quite possible 5121 * that a filtered frame will be followed by a non-filtered 5122 * (complete or otherwise) frame. 5123 * 5124 * XXX should we do this before we complete the frame? 5125 */ 5126 if (atid->isfiltered) 5127 ath_tx_tid_filt_comp_complete(sc, atid); 5128 5129 finish_send_bar: 5130 5131 /* 5132 * Send BAR if required 5133 */ 5134 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5135 ath_tx_tid_bar_tx(sc, atid); 5136 5137 ATH_TX_UNLOCK(sc); 5138 5139 /* Do deferred completion */ 5140 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 5141 TAILQ_REMOVE(&bf_cq, bf, bf_list); 5142 ath_tx_default_comp(sc, bf, 0); 5143 } 5144 } 5145 5146 /* 5147 * Handle completion of unaggregated frames in an ADDBA 5148 * session. 5149 * 5150 * Fail is set to 1 if the entry is being freed via a call to 5151 * ath_tx_draintxq(). 5152 */ 5153 static void 5154 ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail) 5155 { 5156 struct ieee80211_node *ni = bf->bf_node; 5157 struct ath_node *an = ATH_NODE(ni); 5158 int tid = bf->bf_state.bfs_tid; 5159 struct ath_tid *atid = &an->an_tid[tid]; 5160 struct ath_tx_status ts; 5161 int drops = 0; 5162 5163 /* 5164 * Take a copy of this; filtering/cloning the frame may free the 5165 * bf pointer. 5166 */ 5167 ts = bf->bf_status.ds_txstat; 5168 5169 /* 5170 * Update rate control status here, before we possibly 5171 * punt to retry or cleanup. 5172 * 5173 * Do it outside of the TXQ lock. 5174 */ 5175 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 5176 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 5177 &bf->bf_status.ds_txstat, 5178 bf->bf_state.bfs_pktlen, 5179 1, (ts.ts_status == 0) ? 0 : 1); 5180 5181 /* 5182 * This is called early so atid->hwq_depth can be tracked. 5183 * This unfortunately means that it's released and regrabbed 5184 * during retry and cleanup. That's rather inefficient. 5185 */ 5186 ATH_TX_LOCK(sc); 5187 5188 if (tid == IEEE80211_NONQOS_TID) 5189 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__); 5190 5191 DPRINTF(sc, ATH_DEBUG_SW_TX, 5192 "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n", 5193 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth, 5194 SEQNO(bf->bf_state.bfs_seqno)); 5195 5196 atid->hwq_depth--; 5197 if (atid->hwq_depth < 0) 5198 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", 5199 __func__, atid->hwq_depth); 5200 5201 /* 5202 * If the TID is filtered, handle completing the filter 5203 * transition before potentially kicking it to the cleanup 5204 * function. 5205 */ 5206 if (atid->isfiltered) 5207 ath_tx_tid_filt_comp_complete(sc, atid); 5208 5209 /* 5210 * If a cleanup is in progress, punt to comp_cleanup; 5211 * rather than handling it here. It's thus their 5212 * responsibility to clean up, call the completion 5213 * function in net80211, etc. 5214 */ 5215 if (atid->cleanup_inprogress) { 5216 if (atid->isfiltered) 5217 DPRINTF(sc, ATH_DEBUG_SW_TX, 5218 "%s: isfiltered=1, normal_comp?\n", 5219 __func__); 5220 ATH_TX_UNLOCK(sc); 5221 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n", 5222 __func__); 5223 ath_tx_comp_cleanup_unaggr(sc, bf); 5224 return; 5225 } 5226 5227 /* 5228 * XXX TODO: how does cleanup, BAR and filtered frame handling 5229 * overlap? 5230 * 5231 * If the frame is filtered OR if it's any failure but 5232 * the TID is filtered, the frame must be added to the 5233 * filtered frame list. 5234 * 5235 * However - a busy buffer can't be added to the filtered 5236 * list as it will end up being recycled without having 5237 * been made available for the hardware. 5238 */ 5239 if ((ts.ts_status & HAL_TXERR_FILT) || 5240 (ts.ts_status != 0 && atid->isfiltered)) { 5241 int freeframe; 5242 5243 if (fail != 0) 5244 DPRINTF(sc, ATH_DEBUG_SW_TX, 5245 "%s: isfiltered=1, fail=%d\n", 5246 __func__, fail); 5247 freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf); 5248 /* 5249 * If freeframe=0 then bf is no longer ours; don't 5250 * touch it. 5251 */ 5252 if (freeframe) { 5253 /* Remove from BAW */ 5254 if (bf->bf_state.bfs_addedbaw) 5255 drops++; 5256 if (bf->bf_state.bfs_dobaw) { 5257 ath_tx_update_baw(sc, an, atid, bf); 5258 if (!bf->bf_state.bfs_addedbaw) 5259 DPRINTF(sc, ATH_DEBUG_SW_TX, 5260 "%s: wasn't added: seqno %d\n", 5261 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5262 } 5263 bf->bf_state.bfs_dobaw = 0; 5264 } 5265 5266 /* 5267 * If the frame couldn't be filtered, treat it as a drop and 5268 * prepare to send a BAR. 5269 */ 5270 if (freeframe && drops) 5271 ath_tx_tid_bar_suspend(sc, atid); 5272 5273 /* 5274 * Send BAR if required 5275 */ 5276 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5277 ath_tx_tid_bar_tx(sc, atid); 5278 5279 ATH_TX_UNLOCK(sc); 5280 /* 5281 * If freeframe is set, then the frame couldn't be 5282 * cloned and bf is still valid. Just complete/free it. 5283 */ 5284 if (freeframe) 5285 ath_tx_default_comp(sc, bf, fail); 5286 5287 return; 5288 } 5289 /* 5290 * Don't bother with the retry check if all frames 5291 * are being failed (eg during queue deletion.) 5292 */ 5293 #if 0 5294 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) { 5295 #endif 5296 if (fail == 0 && ts.ts_status != 0) { 5297 ATH_TX_UNLOCK(sc); 5298 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n", 5299 __func__); 5300 ath_tx_aggr_retry_unaggr(sc, bf); 5301 return; 5302 } 5303 5304 /* Success? Complete */ 5305 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n", 5306 __func__, tid, SEQNO(bf->bf_state.bfs_seqno)); 5307 if (bf->bf_state.bfs_dobaw) { 5308 ath_tx_update_baw(sc, an, atid, bf); 5309 bf->bf_state.bfs_dobaw = 0; 5310 if (!bf->bf_state.bfs_addedbaw) 5311 DPRINTF(sc, ATH_DEBUG_SW_TX, 5312 "%s: wasn't added: seqno %d\n", 5313 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5314 } 5315 5316 /* 5317 * If the queue is filtered, re-schedule as required. 5318 * 5319 * This is required as there may be a subsequent TX descriptor 5320 * for this end-node that has CLRDMASK set, so it's quite possible 5321 * that a filtered frame will be followed by a non-filtered 5322 * (complete or otherwise) frame. 5323 * 5324 * XXX should we do this before we complete the frame? 5325 */ 5326 if (atid->isfiltered) 5327 ath_tx_tid_filt_comp_complete(sc, atid); 5328 5329 /* 5330 * Send BAR if required 5331 */ 5332 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5333 ath_tx_tid_bar_tx(sc, atid); 5334 5335 ATH_TX_UNLOCK(sc); 5336 5337 ath_tx_default_comp(sc, bf, fail); 5338 /* bf is freed at this point */ 5339 } 5340 5341 void 5342 ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 5343 { 5344 if (bf->bf_state.bfs_aggr) 5345 ath_tx_aggr_comp_aggr(sc, bf, fail); 5346 else 5347 ath_tx_aggr_comp_unaggr(sc, bf, fail); 5348 } 5349 5350 /* 5351 * Schedule some packets from the given node/TID to the hardware. 5352 * 5353 * This is the aggregate version. 5354 */ 5355 void 5356 ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an, 5357 struct ath_tid *tid) 5358 { 5359 struct ath_buf *bf; 5360 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 5361 struct ieee80211_tx_ampdu *tap; 5362 ATH_AGGR_STATUS status; 5363 ath_bufhead bf_q; 5364 5365 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid); 5366 ATH_TX_LOCK_ASSERT(sc); 5367 5368 /* 5369 * XXX TODO: If we're called for a queue that we're leaking frames to, 5370 * ensure we only leak one. 5371 */ 5372 5373 tap = ath_tx_get_tx_tid(an, tid->tid); 5374 5375 if (tid->tid == IEEE80211_NONQOS_TID) 5376 DPRINTF(sc, ATH_DEBUG_SW_TX, 5377 "%s: called for TID=NONQOS_TID?\n", __func__); 5378 5379 for (;;) { 5380 status = ATH_AGGR_DONE; 5381 5382 /* 5383 * If the upper layer has paused the TID, don't 5384 * queue any further packets. 5385 * 5386 * This can also occur from the completion task because 5387 * of packet loss; but as its serialised with this code, 5388 * it won't "appear" half way through queuing packets. 5389 */ 5390 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 5391 break; 5392 5393 bf = ATH_TID_FIRST(tid); 5394 if (bf == NULL) { 5395 break; 5396 } 5397 5398 /* 5399 * If the packet doesn't fall within the BAW (eg a NULL 5400 * data frame), schedule it directly; continue. 5401 */ 5402 if (! bf->bf_state.bfs_dobaw) { 5403 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5404 "%s: non-baw packet\n", 5405 __func__); 5406 ATH_TID_REMOVE(tid, bf, bf_list); 5407 5408 if (bf->bf_state.bfs_nframes > 1) 5409 DPRINTF(sc, ATH_DEBUG_SW_TX, 5410 "%s: aggr=%d, nframes=%d\n", 5411 __func__, 5412 bf->bf_state.bfs_aggr, 5413 bf->bf_state.bfs_nframes); 5414 5415 /* 5416 * This shouldn't happen - such frames shouldn't 5417 * ever have been queued as an aggregate in the 5418 * first place. However, make sure the fields 5419 * are correctly setup just to be totally sure. 5420 */ 5421 bf->bf_state.bfs_aggr = 0; 5422 bf->bf_state.bfs_nframes = 1; 5423 5424 /* Update CLRDMASK just before this frame is queued */ 5425 ath_tx_update_clrdmask(sc, tid, bf); 5426 5427 ath_tx_do_ratelookup(sc, bf); 5428 ath_tx_calc_duration(sc, bf); 5429 ath_tx_calc_protection(sc, bf); 5430 ath_tx_set_rtscts(sc, bf); 5431 ath_tx_rate_fill_rcflags(sc, bf); 5432 ath_tx_setds(sc, bf); 5433 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 5434 5435 sc->sc_aggr_stats.aggr_nonbaw_pkt++; 5436 5437 /* Queue the packet; continue */ 5438 goto queuepkt; 5439 } 5440 5441 TAILQ_INIT(&bf_q); 5442 5443 /* 5444 * Do a rate control lookup on the first frame in the 5445 * list. The rate control code needs that to occur 5446 * before it can determine whether to TX. 5447 * It's inaccurate because the rate control code doesn't 5448 * really "do" aggregate lookups, so it only considers 5449 * the size of the first frame. 5450 */ 5451 ath_tx_do_ratelookup(sc, bf); 5452 bf->bf_state.bfs_rc[3].rix = 0; 5453 bf->bf_state.bfs_rc[3].tries = 0; 5454 5455 ath_tx_calc_duration(sc, bf); 5456 ath_tx_calc_protection(sc, bf); 5457 5458 ath_tx_set_rtscts(sc, bf); 5459 ath_tx_rate_fill_rcflags(sc, bf); 5460 5461 status = ath_tx_form_aggr(sc, an, tid, &bf_q); 5462 5463 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5464 "%s: ath_tx_form_aggr() status=%d\n", __func__, status); 5465 5466 /* 5467 * No frames to be picked up - out of BAW 5468 */ 5469 if (TAILQ_EMPTY(&bf_q)) 5470 break; 5471 5472 /* 5473 * This assumes that the descriptor list in the ath_bufhead 5474 * are already linked together via bf_next pointers. 5475 */ 5476 bf = TAILQ_FIRST(&bf_q); 5477 5478 if (status == ATH_AGGR_8K_LIMITED) 5479 sc->sc_aggr_stats.aggr_rts_aggr_limited++; 5480 5481 /* 5482 * If it's the only frame send as non-aggregate 5483 * assume that ath_tx_form_aggr() has checked 5484 * whether it's in the BAW and added it appropriately. 5485 */ 5486 if (bf->bf_state.bfs_nframes == 1) { 5487 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5488 "%s: single-frame aggregate\n", __func__); 5489 5490 /* Update CLRDMASK just before this frame is queued */ 5491 ath_tx_update_clrdmask(sc, tid, bf); 5492 5493 bf->bf_state.bfs_aggr = 0; 5494 bf->bf_state.bfs_ndelim = 0; 5495 ath_tx_setds(sc, bf); 5496 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 5497 if (status == ATH_AGGR_BAW_CLOSED) 5498 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++; 5499 else 5500 sc->sc_aggr_stats.aggr_single_pkt++; 5501 } else { 5502 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5503 "%s: multi-frame aggregate: %d frames, " 5504 "length %d\n", 5505 __func__, bf->bf_state.bfs_nframes, 5506 bf->bf_state.bfs_al); 5507 bf->bf_state.bfs_aggr = 1; 5508 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++; 5509 sc->sc_aggr_stats.aggr_aggr_pkt++; 5510 5511 /* Update CLRDMASK just before this frame is queued */ 5512 ath_tx_update_clrdmask(sc, tid, bf); 5513 5514 /* 5515 * Calculate the duration/protection as required. 5516 */ 5517 ath_tx_calc_duration(sc, bf); 5518 ath_tx_calc_protection(sc, bf); 5519 5520 /* 5521 * Update the rate and rtscts information based on the 5522 * rate decision made by the rate control code; 5523 * the first frame in the aggregate needs it. 5524 */ 5525 ath_tx_set_rtscts(sc, bf); 5526 5527 /* 5528 * Setup the relevant descriptor fields 5529 * for aggregation. The first descriptor 5530 * already points to the rest in the chain. 5531 */ 5532 ath_tx_setds_11n(sc, bf); 5533 5534 } 5535 queuepkt: 5536 /* Set completion handler, multi-frame aggregate or not */ 5537 bf->bf_comp = ath_tx_aggr_comp; 5538 5539 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID) 5540 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__); 5541 5542 /* 5543 * Update leak count and frame config if were leaking frames. 5544 * 5545 * XXX TODO: it should update all frames in an aggregate 5546 * correctly! 5547 */ 5548 ath_tx_leak_count_update(sc, tid, bf); 5549 5550 /* Punt to txq */ 5551 ath_tx_handoff(sc, txq, bf); 5552 5553 /* Track outstanding buffer count to hardware */ 5554 /* aggregates are "one" buffer */ 5555 tid->hwq_depth++; 5556 5557 /* 5558 * Break out if ath_tx_form_aggr() indicated 5559 * there can't be any further progress (eg BAW is full.) 5560 * Checking for an empty txq is done above. 5561 * 5562 * XXX locking on txq here? 5563 */ 5564 /* XXX TXQ locking */ 5565 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr || 5566 (status == ATH_AGGR_BAW_CLOSED || 5567 status == ATH_AGGR_LEAK_CLOSED)) 5568 break; 5569 } 5570 } 5571 5572 /* 5573 * Schedule some packets from the given node/TID to the hardware. 5574 * 5575 * XXX TODO: this routine doesn't enforce the maximum TXQ depth. 5576 * It just dumps frames into the TXQ. We should limit how deep 5577 * the transmit queue can grow for frames dispatched to the given 5578 * TXQ. 5579 * 5580 * To avoid locking issues, either we need to own the TXQ lock 5581 * at this point, or we need to pass in the maximum frame count 5582 * from the caller. 5583 */ 5584 void 5585 ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an, 5586 struct ath_tid *tid) 5587 { 5588 struct ath_buf *bf; 5589 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 5590 5591 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n", 5592 __func__, an, tid->tid); 5593 5594 ATH_TX_LOCK_ASSERT(sc); 5595 5596 /* Check - is AMPDU pending or running? then print out something */ 5597 if (ath_tx_ampdu_pending(sc, an, tid->tid)) 5598 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n", 5599 __func__, tid->tid); 5600 if (ath_tx_ampdu_running(sc, an, tid->tid)) 5601 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n", 5602 __func__, tid->tid); 5603 5604 for (;;) { 5605 5606 /* 5607 * If the upper layers have paused the TID, don't 5608 * queue any further packets. 5609 * 5610 * XXX if we are leaking frames, make sure we decrement 5611 * that counter _and_ we continue here. 5612 */ 5613 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 5614 break; 5615 5616 bf = ATH_TID_FIRST(tid); 5617 if (bf == NULL) { 5618 break; 5619 } 5620 5621 ATH_TID_REMOVE(tid, bf, bf_list); 5622 5623 /* Sanity check! */ 5624 if (tid->tid != bf->bf_state.bfs_tid) { 5625 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !=" 5626 " tid %d\n", __func__, bf->bf_state.bfs_tid, 5627 tid->tid); 5628 } 5629 /* Normal completion handler */ 5630 bf->bf_comp = ath_tx_normal_comp; 5631 5632 /* 5633 * Override this for now, until the non-aggregate 5634 * completion handler correctly handles software retransmits. 5635 */ 5636 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 5637 5638 /* Update CLRDMASK just before this frame is queued */ 5639 ath_tx_update_clrdmask(sc, tid, bf); 5640 5641 /* Program descriptors + rate control */ 5642 ath_tx_do_ratelookup(sc, bf); 5643 ath_tx_calc_duration(sc, bf); 5644 ath_tx_calc_protection(sc, bf); 5645 ath_tx_set_rtscts(sc, bf); 5646 ath_tx_rate_fill_rcflags(sc, bf); 5647 ath_tx_setds(sc, bf); 5648 5649 /* 5650 * Update the current leak count if 5651 * we're leaking frames; and set the 5652 * MORE flag as appropriate. 5653 */ 5654 ath_tx_leak_count_update(sc, tid, bf); 5655 5656 /* Track outstanding buffer count to hardware */ 5657 /* aggregates are "one" buffer */ 5658 tid->hwq_depth++; 5659 5660 /* Punt to hardware or software txq */ 5661 ath_tx_handoff(sc, txq, bf); 5662 } 5663 } 5664 5665 /* 5666 * Schedule some packets to the given hardware queue. 5667 * 5668 * This function walks the list of TIDs (ie, ath_node TIDs 5669 * with queued traffic) and attempts to schedule traffic 5670 * from them. 5671 * 5672 * TID scheduling is implemented as a FIFO, with TIDs being 5673 * added to the end of the queue after some frames have been 5674 * scheduled. 5675 */ 5676 void 5677 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq) 5678 { 5679 struct ath_tid *tid, *next, *last; 5680 5681 ATH_TX_LOCK_ASSERT(sc); 5682 5683 /* 5684 * For non-EDMA chips, aggr frames that have been built are 5685 * in axq_aggr_depth, whether they've been scheduled or not. 5686 * There's no FIFO, so txq->axq_depth is what's been scheduled 5687 * to the hardware. 5688 * 5689 * For EDMA chips, we do it in two stages. The existing code 5690 * builds a list of frames to go to the hardware and the EDMA 5691 * code turns it into a single entry to push into the FIFO. 5692 * That way we don't take up one packet per FIFO slot. 5693 * We do push one aggregate per FIFO slot though, just to keep 5694 * things simple. 5695 * 5696 * The FIFO depth is what's in the hardware; the txq->axq_depth 5697 * is what's been scheduled to the FIFO. 5698 * 5699 * fifo.axq_depth is the number of frames (or aggregates) pushed 5700 * into the EDMA FIFO. For multi-frame lists, this is the number 5701 * of frames pushed in. 5702 * axq_fifo_depth is the number of FIFO slots currently busy. 5703 */ 5704 5705 /* For EDMA and non-EDMA, check built/scheduled against aggr limit */ 5706 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr) { 5707 sc->sc_aggr_stats.aggr_sched_nopkt++; 5708 return; 5709 } 5710 5711 /* 5712 * For non-EDMA chips, axq_depth is the "what's scheduled to 5713 * the hardware list". For EDMA it's "What's built for the hardware" 5714 * and fifo.axq_depth is how many frames have been dispatched 5715 * already to the hardware. 5716 */ 5717 if (txq->axq_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_nonaggr) { 5718 sc->sc_aggr_stats.aggr_sched_nopkt++; 5719 return; 5720 } 5721 5722 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s); 5723 5724 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) { 5725 /* 5726 * Suspend paused queues here; they'll be resumed 5727 * once the addba completes or times out. 5728 */ 5729 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n", 5730 __func__, tid->tid, tid->paused); 5731 ath_tx_tid_unsched(sc, tid); 5732 /* 5733 * This node may be in power-save and we're leaking 5734 * a frame; be careful. 5735 */ 5736 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { 5737 goto loop_done; 5738 } 5739 if (ath_tx_ampdu_running(sc, tid->an, tid->tid)) 5740 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid); 5741 else 5742 ath_tx_tid_hw_queue_norm(sc, tid->an, tid); 5743 5744 /* Not empty? Re-schedule */ 5745 if (tid->axq_depth != 0) 5746 ath_tx_tid_sched(sc, tid); 5747 5748 /* 5749 * Give the software queue time to aggregate more 5750 * packets. If we aren't running aggregation then 5751 * we should still limit the hardware queue depth. 5752 */ 5753 /* XXX TXQ locking */ 5754 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { 5755 break; 5756 } 5757 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { 5758 break; 5759 } 5760 loop_done: 5761 /* 5762 * If this was the last entry on the original list, stop. 5763 * Otherwise nodes that have been rescheduled onto the end 5764 * of the TID FIFO list will just keep being rescheduled. 5765 * 5766 * XXX What should we do about nodes that were paused 5767 * but are pending a leaking frame in response to a ps-poll? 5768 * They'll be put at the front of the list; so they'll 5769 * prematurely trigger this condition! Ew. 5770 */ 5771 if (tid == last) 5772 break; 5773 } 5774 } 5775 5776 /* 5777 * TX addba handling 5778 */ 5779 5780 /* 5781 * Return net80211 TID struct pointer, or NULL for none 5782 */ 5783 struct ieee80211_tx_ampdu * 5784 ath_tx_get_tx_tid(struct ath_node *an, int tid) 5785 { 5786 struct ieee80211_node *ni = &an->an_node; 5787 struct ieee80211_tx_ampdu *tap; 5788 5789 if (tid == IEEE80211_NONQOS_TID) 5790 return NULL; 5791 5792 tap = &ni->ni_tx_ampdu[tid]; 5793 return tap; 5794 } 5795 5796 /* 5797 * Is AMPDU-TX running? 5798 */ 5799 static int 5800 ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid) 5801 { 5802 struct ieee80211_tx_ampdu *tap; 5803 5804 if (tid == IEEE80211_NONQOS_TID) 5805 return 0; 5806 5807 tap = ath_tx_get_tx_tid(an, tid); 5808 if (tap == NULL) 5809 return 0; /* Not valid; default to not running */ 5810 5811 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING); 5812 } 5813 5814 /* 5815 * Is AMPDU-TX negotiation pending? 5816 */ 5817 static int 5818 ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid) 5819 { 5820 struct ieee80211_tx_ampdu *tap; 5821 5822 if (tid == IEEE80211_NONQOS_TID) 5823 return 0; 5824 5825 tap = ath_tx_get_tx_tid(an, tid); 5826 if (tap == NULL) 5827 return 0; /* Not valid; default to not pending */ 5828 5829 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND); 5830 } 5831 5832 /* 5833 * Is AMPDU-TX pending for the given TID? 5834 */ 5835 5836 5837 /* 5838 * Method to handle sending an ADDBA request. 5839 * 5840 * We tap this so the relevant flags can be set to pause the TID 5841 * whilst waiting for the response. 5842 * 5843 * XXX there's no timeout handler we can override? 5844 */ 5845 int 5846 ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5847 int dialogtoken, int baparamset, int batimeout) 5848 { 5849 struct ath_softc *sc = ni->ni_ic->ic_softc; 5850 int tid = tap->txa_tid; 5851 struct ath_node *an = ATH_NODE(ni); 5852 struct ath_tid *atid = &an->an_tid[tid]; 5853 5854 /* 5855 * XXX danger Will Robinson! 5856 * 5857 * Although the taskqueue may be running and scheduling some more 5858 * packets, these should all be _before_ the addba sequence number. 5859 * However, net80211 will keep self-assigning sequence numbers 5860 * until addba has been negotiated. 5861 * 5862 * In the past, these packets would be "paused" (which still works 5863 * fine, as they're being scheduled to the driver in the same 5864 * serialised method which is calling the addba request routine) 5865 * and when the aggregation session begins, they'll be dequeued 5866 * as aggregate packets and added to the BAW. However, now there's 5867 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these 5868 * packets. Thus they never get included in the BAW tracking and 5869 * this can cause the initial burst of packets after the addba 5870 * negotiation to "hang", as they quickly fall outside the BAW. 5871 * 5872 * The "eventual" solution should be to tag these packets with 5873 * dobaw. Although net80211 has given us a sequence number, 5874 * it'll be "after" the left edge of the BAW and thus it'll 5875 * fall within it. 5876 */ 5877 ATH_TX_LOCK(sc); 5878 /* 5879 * This is a bit annoying. Until net80211 HT code inherits some 5880 * (any) locking, we may have this called in parallel BUT only 5881 * one response/timeout will be called. Grr. 5882 */ 5883 if (atid->addba_tx_pending == 0) { 5884 ath_tx_tid_pause(sc, atid); 5885 atid->addba_tx_pending = 1; 5886 } 5887 ATH_TX_UNLOCK(sc); 5888 5889 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5890 "%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n", 5891 __func__, 5892 ni->ni_macaddr, 5893 ":", 5894 dialogtoken, baparamset, batimeout); 5895 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5896 "%s: txa_start=%d, ni_txseqs=%d\n", 5897 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5898 5899 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 5900 batimeout); 5901 } 5902 5903 /* 5904 * Handle an ADDBA response. 5905 * 5906 * We unpause the queue so TX'ing can resume. 5907 * 5908 * Any packets TX'ed from this point should be "aggregate" (whether 5909 * aggregate or not) so the BAW is updated. 5910 * 5911 * Note! net80211 keeps self-assigning sequence numbers until 5912 * ampdu is negotiated. This means the initially-negotiated BAW left 5913 * edge won't match the ni->ni_txseq. 5914 * 5915 * So, being very dirty, the BAW left edge is "slid" here to match 5916 * ni->ni_txseq. 5917 * 5918 * What likely SHOULD happen is that all packets subsequent to the 5919 * addba request should be tagged as aggregate and queued as non-aggregate 5920 * frames; thus updating the BAW. For now though, I'll just slide the 5921 * window. 5922 */ 5923 int 5924 ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5925 int status, int code, int batimeout) 5926 { 5927 struct ath_softc *sc = ni->ni_ic->ic_softc; 5928 int tid = tap->txa_tid; 5929 struct ath_node *an = ATH_NODE(ni); 5930 struct ath_tid *atid = &an->an_tid[tid]; 5931 int r; 5932 5933 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5934 "%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__, 5935 ni->ni_macaddr, 5936 ":", 5937 status, code, batimeout); 5938 5939 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5940 "%s: txa_start=%d, ni_txseqs=%d\n", 5941 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5942 5943 /* 5944 * Call this first, so the interface flags get updated 5945 * before the TID is unpaused. Otherwise a race condition 5946 * exists where the unpaused TID still doesn't yet have 5947 * IEEE80211_AGGR_RUNNING set. 5948 */ 5949 r = sc->sc_addba_response(ni, tap, status, code, batimeout); 5950 5951 ATH_TX_LOCK(sc); 5952 atid->addba_tx_pending = 0; 5953 /* 5954 * XXX dirty! 5955 * Slide the BAW left edge to wherever net80211 left it for us. 5956 * Read above for more information. 5957 */ 5958 tap->txa_start = ni->ni_txseqs[tid]; 5959 ath_tx_tid_resume(sc, atid); 5960 ATH_TX_UNLOCK(sc); 5961 return r; 5962 } 5963 5964 5965 /* 5966 * Stop ADDBA on a queue. 5967 * 5968 * This can be called whilst BAR TX is currently active on the queue, 5969 * so make sure this is unblocked before continuing. 5970 */ 5971 void 5972 ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 5973 { 5974 struct ath_softc *sc = ni->ni_ic->ic_softc; 5975 int tid = tap->txa_tid; 5976 struct ath_node *an = ATH_NODE(ni); 5977 struct ath_tid *atid = &an->an_tid[tid]; 5978 ath_bufhead bf_cq; 5979 struct ath_buf *bf; 5980 5981 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n", 5982 __func__, 5983 ni->ni_macaddr, 5984 ":"); 5985 5986 /* 5987 * Pause TID traffic early, so there aren't any races 5988 * Unblock the pending BAR held traffic, if it's currently paused. 5989 */ 5990 ATH_TX_LOCK(sc); 5991 ath_tx_tid_pause(sc, atid); 5992 if (atid->bar_wait) { 5993 /* 5994 * bar_unsuspend() expects bar_tx == 1, as it should be 5995 * called from the TX completion path. This quietens 5996 * the warning. It's cleared for us anyway. 5997 */ 5998 atid->bar_tx = 1; 5999 ath_tx_tid_bar_unsuspend(sc, atid); 6000 } 6001 ATH_TX_UNLOCK(sc); 6002 6003 /* There's no need to hold the TXQ lock here */ 6004 sc->sc_addba_stop(ni, tap); 6005 6006 /* 6007 * ath_tx_tid_cleanup will resume the TID if possible, otherwise 6008 * it'll set the cleanup flag, and it'll be unpaused once 6009 * things have been cleaned up. 6010 */ 6011 TAILQ_INIT(&bf_cq); 6012 ATH_TX_LOCK(sc); 6013 6014 /* 6015 * In case there's a followup call to this, only call it 6016 * if we don't have a cleanup in progress. 6017 * 6018 * Since we've paused the queue above, we need to make 6019 * sure we unpause if there's already a cleanup in 6020 * progress - it means something else is also doing 6021 * this stuff, so we don't need to also keep it paused. 6022 */ 6023 if (atid->cleanup_inprogress) { 6024 ath_tx_tid_resume(sc, atid); 6025 } else { 6026 ath_tx_tid_cleanup(sc, an, tid, &bf_cq); 6027 /* 6028 * Unpause the TID if no cleanup is required. 6029 */ 6030 if (! atid->cleanup_inprogress) 6031 ath_tx_tid_resume(sc, atid); 6032 } 6033 ATH_TX_UNLOCK(sc); 6034 6035 /* Handle completing frames and fail them */ 6036 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 6037 TAILQ_REMOVE(&bf_cq, bf, bf_list); 6038 ath_tx_default_comp(sc, bf, 1); 6039 } 6040 6041 } 6042 6043 /* 6044 * Handle a node reassociation. 6045 * 6046 * We may have a bunch of frames queued to the hardware; those need 6047 * to be marked as cleanup. 6048 */ 6049 void 6050 ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an) 6051 { 6052 struct ath_tid *tid; 6053 int i; 6054 ath_bufhead bf_cq; 6055 struct ath_buf *bf; 6056 6057 TAILQ_INIT(&bf_cq); 6058 6059 ATH_TX_UNLOCK_ASSERT(sc); 6060 6061 ATH_TX_LOCK(sc); 6062 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 6063 tid = &an->an_tid[i]; 6064 if (tid->hwq_depth == 0) 6065 continue; 6066 DPRINTF(sc, ATH_DEBUG_NODE, 6067 "%s: %6D: TID %d: cleaning up TID\n", 6068 __func__, 6069 an->an_node.ni_macaddr, 6070 ":", 6071 i); 6072 /* 6073 * In case there's a followup call to this, only call it 6074 * if we don't have a cleanup in progress. 6075 */ 6076 if (! tid->cleanup_inprogress) { 6077 ath_tx_tid_pause(sc, tid); 6078 ath_tx_tid_cleanup(sc, an, i, &bf_cq); 6079 /* 6080 * Unpause the TID if no cleanup is required. 6081 */ 6082 if (! tid->cleanup_inprogress) 6083 ath_tx_tid_resume(sc, tid); 6084 } 6085 } 6086 ATH_TX_UNLOCK(sc); 6087 6088 /* Handle completing frames and fail them */ 6089 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 6090 TAILQ_REMOVE(&bf_cq, bf, bf_list); 6091 ath_tx_default_comp(sc, bf, 1); 6092 } 6093 } 6094 6095 /* 6096 * Note: net80211 bar_timeout() doesn't call this function on BAR failure; 6097 * it simply tears down the aggregation session. Ew. 6098 * 6099 * It however will call ieee80211_ampdu_stop() which will call 6100 * ic->ic_addba_stop(). 6101 * 6102 * XXX This uses a hard-coded max BAR count value; the whole 6103 * XXX BAR TX success or failure should be better handled! 6104 */ 6105 void 6106 ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 6107 int status) 6108 { 6109 struct ath_softc *sc = ni->ni_ic->ic_softc; 6110 int tid = tap->txa_tid; 6111 struct ath_node *an = ATH_NODE(ni); 6112 struct ath_tid *atid = &an->an_tid[tid]; 6113 int attempts = tap->txa_attempts; 6114 int old_txa_start; 6115 6116 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 6117 "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n", 6118 __func__, 6119 ni->ni_macaddr, 6120 ":", 6121 tap->txa_tid, 6122 atid->tid, 6123 status, 6124 attempts, 6125 tap->txa_start, 6126 tap->txa_seqpending); 6127 6128 /* Note: This may update the BAW details */ 6129 /* 6130 * XXX What if this does slide the BAW along? We need to somehow 6131 * XXX either fix things when it does happen, or prevent the 6132 * XXX seqpending value to be anything other than exactly what 6133 * XXX the hell we want! 6134 * 6135 * XXX So for now, how I do this inside the TX lock for now 6136 * XXX and just correct it afterwards? The below condition should 6137 * XXX never happen and if it does I need to fix all kinds of things. 6138 */ 6139 ATH_TX_LOCK(sc); 6140 old_txa_start = tap->txa_start; 6141 sc->sc_bar_response(ni, tap, status); 6142 if (tap->txa_start != old_txa_start) { 6143 device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n", 6144 __func__, 6145 tid, 6146 tap->txa_start, 6147 old_txa_start); 6148 } 6149 tap->txa_start = old_txa_start; 6150 ATH_TX_UNLOCK(sc); 6151 6152 /* Unpause the TID */ 6153 /* 6154 * XXX if this is attempt=50, the TID will be downgraded 6155 * XXX to a non-aggregate session. So we must unpause the 6156 * XXX TID here or it'll never be done. 6157 * 6158 * Also, don't call it if bar_tx/bar_wait are 0; something 6159 * has beaten us to the punch? (XXX figure out what?) 6160 */ 6161 if (status == 0 || attempts == 50) { 6162 ATH_TX_LOCK(sc); 6163 if (atid->bar_tx == 0 || atid->bar_wait == 0) 6164 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 6165 "%s: huh? bar_tx=%d, bar_wait=%d\n", 6166 __func__, 6167 atid->bar_tx, atid->bar_wait); 6168 else 6169 ath_tx_tid_bar_unsuspend(sc, atid); 6170 ATH_TX_UNLOCK(sc); 6171 } 6172 } 6173 6174 /* 6175 * This is called whenever the pending ADDBA request times out. 6176 * Unpause and reschedule the TID. 6177 */ 6178 void 6179 ath_addba_response_timeout(struct ieee80211_node *ni, 6180 struct ieee80211_tx_ampdu *tap) 6181 { 6182 struct ath_softc *sc = ni->ni_ic->ic_softc; 6183 int tid = tap->txa_tid; 6184 struct ath_node *an = ATH_NODE(ni); 6185 struct ath_tid *atid = &an->an_tid[tid]; 6186 6187 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 6188 "%s: %6D: TID=%d, called; resuming\n", 6189 __func__, 6190 ni->ni_macaddr, 6191 ":", 6192 tid); 6193 6194 ATH_TX_LOCK(sc); 6195 atid->addba_tx_pending = 0; 6196 ATH_TX_UNLOCK(sc); 6197 6198 /* Note: This updates the aggregate state to (again) pending */ 6199 sc->sc_addba_response_timeout(ni, tap); 6200 6201 /* Unpause the TID; which reschedules it */ 6202 ATH_TX_LOCK(sc); 6203 ath_tx_tid_resume(sc, atid); 6204 ATH_TX_UNLOCK(sc); 6205 } 6206 6207 /* 6208 * Check if a node is asleep or not. 6209 */ 6210 int 6211 ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an) 6212 { 6213 6214 ATH_TX_LOCK_ASSERT(sc); 6215 6216 return (an->an_is_powersave); 6217 } 6218 6219 /* 6220 * Mark a node as currently "in powersaving." 6221 * This suspends all traffic on the node. 6222 * 6223 * This must be called with the node/tx locks free. 6224 * 6225 * XXX TODO: the locking silliness below is due to how the node 6226 * locking currently works. Right now, the node lock is grabbed 6227 * to do rate control lookups and these are done with the TX 6228 * queue lock held. This means the node lock can't be grabbed 6229 * first here or a LOR will occur. 6230 * 6231 * Eventually (hopefully!) the TX path code will only grab 6232 * the TXQ lock when transmitting and the ath_node lock when 6233 * doing node/TID operations. There are other complications - 6234 * the sched/unsched operations involve walking the per-txq 6235 * 'active tid' list and this requires both locks to be held. 6236 */ 6237 void 6238 ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an) 6239 { 6240 struct ath_tid *atid; 6241 struct ath_txq *txq; 6242 int tid; 6243 6244 ATH_TX_UNLOCK_ASSERT(sc); 6245 6246 /* Suspend all traffic on the node */ 6247 ATH_TX_LOCK(sc); 6248 6249 if (an->an_is_powersave) { 6250 DPRINTF(sc, ATH_DEBUG_XMIT, 6251 "%s: %6D: node was already asleep!\n", 6252 __func__, an->an_node.ni_macaddr, ":"); 6253 ATH_TX_UNLOCK(sc); 6254 return; 6255 } 6256 6257 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 6258 atid = &an->an_tid[tid]; 6259 txq = sc->sc_ac2q[atid->ac]; 6260 6261 ath_tx_tid_pause(sc, atid); 6262 } 6263 6264 /* Mark node as in powersaving */ 6265 an->an_is_powersave = 1; 6266 6267 ATH_TX_UNLOCK(sc); 6268 } 6269 6270 /* 6271 * Mark a node as currently "awake." 6272 * This resumes all traffic to the node. 6273 */ 6274 void 6275 ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an) 6276 { 6277 struct ath_tid *atid; 6278 struct ath_txq *txq; 6279 int tid; 6280 6281 ATH_TX_UNLOCK_ASSERT(sc); 6282 6283 ATH_TX_LOCK(sc); 6284 6285 /* !? */ 6286 if (an->an_is_powersave == 0) { 6287 ATH_TX_UNLOCK(sc); 6288 DPRINTF(sc, ATH_DEBUG_XMIT, 6289 "%s: an=%p: node was already awake\n", 6290 __func__, an); 6291 return; 6292 } 6293 6294 /* Mark node as awake */ 6295 an->an_is_powersave = 0; 6296 /* 6297 * Clear any pending leaked frame requests 6298 */ 6299 an->an_leak_count = 0; 6300 6301 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 6302 atid = &an->an_tid[tid]; 6303 txq = sc->sc_ac2q[atid->ac]; 6304 6305 ath_tx_tid_resume(sc, atid); 6306 } 6307 ATH_TX_UNLOCK(sc); 6308 } 6309 6310 static int 6311 ath_legacy_dma_txsetup(struct ath_softc *sc) 6312 { 6313 6314 /* nothing new needed */ 6315 return (0); 6316 } 6317 6318 static int 6319 ath_legacy_dma_txteardown(struct ath_softc *sc) 6320 { 6321 6322 /* nothing new needed */ 6323 return (0); 6324 } 6325 6326 void 6327 ath_xmit_setup_legacy(struct ath_softc *sc) 6328 { 6329 /* 6330 * For now, just set the descriptor length to sizeof(ath_desc); 6331 * worry about extracting the real length out of the HAL later. 6332 */ 6333 sc->sc_tx_desclen = sizeof(struct ath_desc); 6334 sc->sc_tx_statuslen = sizeof(struct ath_desc); 6335 sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */ 6336 6337 sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup; 6338 sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown; 6339 sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func; 6340 6341 sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart; 6342 sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff; 6343 6344 sc->sc_tx.xmit_drain = ath_legacy_tx_drain; 6345 } 6346