1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 5 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 15 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 16 * redistribution must be conditioned upon including a substantially 17 * similar Disclaimer requirement for further binary redistribution. 18 * 19 * NO WARRANTY 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 23 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 24 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 28 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGES. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* 37 * Driver for the Atheros Wireless LAN controller. 38 * 39 * This software is derived from work of Atsushi Onoe; his contribution 40 * is greatly appreciated. 41 */ 42 43 #include "opt_inet.h" 44 #include "opt_ath.h" 45 #include "opt_wlan.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/sysctl.h> 50 #include <sys/mbuf.h> 51 #include <sys/malloc.h> 52 #include <sys/lock.h> 53 #include <sys/mutex.h> 54 #include <sys/kernel.h> 55 #include <sys/socket.h> 56 #include <sys/sockio.h> 57 #include <sys/errno.h> 58 #include <sys/callout.h> 59 #include <sys/bus.h> 60 #include <sys/endian.h> 61 #include <sys/kthread.h> 62 #include <sys/taskqueue.h> 63 #include <sys/priv.h> 64 #include <sys/ktr.h> 65 66 #include <machine/bus.h> 67 68 #include <net/if.h> 69 #include <net/if_var.h> 70 #include <net/if_dl.h> 71 #include <net/if_media.h> 72 #include <net/if_types.h> 73 #include <net/if_arp.h> 74 #include <net/ethernet.h> 75 #include <net/if_llc.h> 76 77 #include <net80211/ieee80211_var.h> 78 #include <net80211/ieee80211_regdomain.h> 79 #ifdef IEEE80211_SUPPORT_SUPERG 80 #include <net80211/ieee80211_superg.h> 81 #endif 82 #ifdef IEEE80211_SUPPORT_TDMA 83 #include <net80211/ieee80211_tdma.h> 84 #endif 85 #include <net80211/ieee80211_ht.h> 86 87 #include <net/bpf.h> 88 89 #ifdef INET 90 #include <netinet/in.h> 91 #include <netinet/if_ether.h> 92 #endif 93 94 #include <dev/ath/if_athvar.h> 95 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 96 #include <dev/ath/ath_hal/ah_diagcodes.h> 97 98 #include <dev/ath/if_ath_debug.h> 99 100 #ifdef ATH_TX99_DIAG 101 #include <dev/ath/ath_tx99/ath_tx99.h> 102 #endif 103 104 #include <dev/ath/if_ath_misc.h> 105 #include <dev/ath/if_ath_tx.h> 106 #include <dev/ath/if_ath_tx_ht.h> 107 108 #ifdef ATH_DEBUG_ALQ 109 #include <dev/ath/if_ath_alq.h> 110 #endif 111 112 /* 113 * How many retries to perform in software 114 */ 115 #define SWMAX_RETRIES 10 116 117 /* 118 * What queue to throw the non-QoS TID traffic into 119 */ 120 #define ATH_NONQOS_TID_AC WME_AC_VO 121 122 #if 0 123 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an); 124 #endif 125 static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, 126 int tid); 127 static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, 128 int tid); 129 static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc, 130 struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0); 131 static int ath_tx_action_frame_override_queue(struct ath_softc *sc, 132 struct ieee80211_node *ni, struct mbuf *m0, int *tid); 133 static struct ath_buf * 134 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 135 struct ath_tid *tid, struct ath_buf *bf); 136 137 #ifdef ATH_DEBUG_ALQ 138 void 139 ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first) 140 { 141 struct ath_buf *bf; 142 int i, n; 143 const char *ds; 144 145 /* XXX we should skip out early if debugging isn't enabled! */ 146 bf = bf_first; 147 148 while (bf != NULL) { 149 /* XXX should ensure bf_nseg > 0! */ 150 if (bf->bf_nseg == 0) 151 break; 152 n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1; 153 for (i = 0, ds = (const char *) bf->bf_desc; 154 i < n; 155 i++, ds += sc->sc_tx_desclen) { 156 if_ath_alq_post(&sc->sc_alq, 157 ATH_ALQ_EDMA_TXDESC, 158 sc->sc_tx_desclen, 159 ds); 160 } 161 bf = bf->bf_next; 162 } 163 } 164 #endif /* ATH_DEBUG_ALQ */ 165 166 /* 167 * Whether to use the 11n rate scenario functions or not 168 */ 169 static inline int 170 ath_tx_is_11n(struct ath_softc *sc) 171 { 172 return ((sc->sc_ah->ah_magic == 0x20065416) || 173 (sc->sc_ah->ah_magic == 0x19741014)); 174 } 175 176 /* 177 * Obtain the current TID from the given frame. 178 * 179 * Non-QoS frames get mapped to a TID so frames consistently 180 * go on a sensible queue. 181 */ 182 static int 183 ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0) 184 { 185 const struct ieee80211_frame *wh; 186 187 wh = mtod(m0, const struct ieee80211_frame *); 188 189 /* Non-QoS: map frame to a TID queue for software queueing */ 190 if (! IEEE80211_QOS_HAS_SEQ(wh)) 191 return (WME_AC_TO_TID(M_WME_GETAC(m0))); 192 193 /* QoS - fetch the TID from the header, ignore mbuf WME */ 194 return (ieee80211_gettid(wh)); 195 } 196 197 static void 198 ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) 199 { 200 struct ieee80211_frame *wh; 201 202 wh = mtod(bf->bf_m, struct ieee80211_frame *); 203 /* Only update/resync if needed */ 204 if (bf->bf_state.bfs_isretried == 0) { 205 wh->i_fc[1] |= IEEE80211_FC1_RETRY; 206 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 207 BUS_DMASYNC_PREWRITE); 208 } 209 bf->bf_state.bfs_isretried = 1; 210 bf->bf_state.bfs_retries ++; 211 } 212 213 /* 214 * Determine what the correct AC queue for the given frame 215 * should be. 216 * 217 * For QoS frames, obey the TID. That way things like 218 * management frames that are related to a given TID 219 * are thus serialised with the rest of the TID traffic, 220 * regardless of net80211 overriding priority. 221 * 222 * For non-QoS frames, return the mbuf WMI priority. 223 * 224 * This has implications that higher priority non-QoS traffic 225 * may end up being scheduled before other non-QoS traffic, 226 * leading to out-of-sequence packets being emitted. 227 * 228 * (It'd be nice to log/count this so we can see if it 229 * really is a problem.) 230 * 231 * TODO: maybe we should throw multicast traffic, QoS or 232 * otherwise, into a separate TX queue? 233 */ 234 static int 235 ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0) 236 { 237 const struct ieee80211_frame *wh; 238 239 wh = mtod(m0, const struct ieee80211_frame *); 240 241 /* 242 * QoS data frame (sequence number or otherwise) - 243 * return hardware queue mapping for the underlying 244 * TID. 245 */ 246 if (IEEE80211_QOS_HAS_SEQ(wh)) 247 return TID_TO_WME_AC(ieee80211_gettid(wh)); 248 249 /* 250 * Otherwise - return mbuf QoS pri. 251 */ 252 return (M_WME_GETAC(m0)); 253 } 254 255 void 256 ath_txfrag_cleanup(struct ath_softc *sc, 257 ath_bufhead *frags, struct ieee80211_node *ni) 258 { 259 struct ath_buf *bf, *next; 260 261 ATH_TXBUF_LOCK_ASSERT(sc); 262 263 TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) { 264 /* NB: bf assumed clean */ 265 TAILQ_REMOVE(frags, bf, bf_list); 266 ath_returnbuf_head(sc, bf); 267 ieee80211_node_decref(ni); 268 } 269 } 270 271 /* 272 * Setup xmit of a fragmented frame. Allocate a buffer 273 * for each frag and bump the node reference count to 274 * reflect the held reference to be setup by ath_tx_start. 275 */ 276 int 277 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, 278 struct mbuf *m0, struct ieee80211_node *ni) 279 { 280 struct mbuf *m; 281 struct ath_buf *bf; 282 283 ATH_TXBUF_LOCK(sc); 284 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { 285 /* XXX non-management? */ 286 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 287 if (bf == NULL) { /* out of buffers, cleanup */ 288 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n", 289 __func__); 290 ath_txfrag_cleanup(sc, frags, ni); 291 break; 292 } 293 ieee80211_node_incref(ni); 294 TAILQ_INSERT_TAIL(frags, bf, bf_list); 295 } 296 ATH_TXBUF_UNLOCK(sc); 297 298 return !TAILQ_EMPTY(frags); 299 } 300 301 static int 302 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) 303 { 304 struct mbuf *m; 305 int error; 306 307 /* 308 * Load the DMA map so any coalescing is done. This 309 * also calculates the number of descriptors we need. 310 */ 311 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 312 bf->bf_segs, &bf->bf_nseg, 313 BUS_DMA_NOWAIT); 314 if (error == EFBIG) { 315 /* XXX packet requires too many descriptors */ 316 bf->bf_nseg = ATH_MAX_SCATTER + 1; 317 } else if (error != 0) { 318 sc->sc_stats.ast_tx_busdma++; 319 ieee80211_free_mbuf(m0); 320 return error; 321 } 322 /* 323 * Discard null packets and check for packets that 324 * require too many TX descriptors. We try to convert 325 * the latter to a cluster. 326 */ 327 if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */ 328 sc->sc_stats.ast_tx_linear++; 329 m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER); 330 if (m == NULL) { 331 ieee80211_free_mbuf(m0); 332 sc->sc_stats.ast_tx_nombuf++; 333 return ENOMEM; 334 } 335 m0 = m; 336 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 337 bf->bf_segs, &bf->bf_nseg, 338 BUS_DMA_NOWAIT); 339 if (error != 0) { 340 sc->sc_stats.ast_tx_busdma++; 341 ieee80211_free_mbuf(m0); 342 return error; 343 } 344 KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER, 345 ("too many segments after defrag; nseg %u", bf->bf_nseg)); 346 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 347 sc->sc_stats.ast_tx_nodata++; 348 ieee80211_free_mbuf(m0); 349 return EIO; 350 } 351 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", 352 __func__, m0, m0->m_pkthdr.len); 353 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 354 bf->bf_m = m0; 355 356 return 0; 357 } 358 359 /* 360 * Chain together segments+descriptors for a frame - 11n or otherwise. 361 * 362 * For aggregates, this is called on each frame in the aggregate. 363 */ 364 static void 365 ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0, 366 struct ath_buf *bf, int is_aggr, int is_first_subframe, 367 int is_last_subframe) 368 { 369 struct ath_hal *ah = sc->sc_ah; 370 char *ds; 371 int i, bp, dsp; 372 HAL_DMA_ADDR bufAddrList[4]; 373 uint32_t segLenList[4]; 374 int numTxMaps = 1; 375 int isFirstDesc = 1; 376 377 /* 378 * XXX There's txdma and txdma_mgmt; the descriptor 379 * sizes must match. 380 */ 381 struct ath_descdma *dd = &sc->sc_txdma; 382 383 /* 384 * Fillin the remainder of the descriptor info. 385 */ 386 387 /* 388 * We need the number of TX data pointers in each descriptor. 389 * EDMA and later chips support 4 TX buffers per descriptor; 390 * previous chips just support one. 391 */ 392 numTxMaps = sc->sc_tx_nmaps; 393 394 /* 395 * For EDMA and later chips ensure the TX map is fully populated 396 * before advancing to the next descriptor. 397 */ 398 ds = (char *) bf->bf_desc; 399 bp = dsp = 0; 400 bzero(bufAddrList, sizeof(bufAddrList)); 401 bzero(segLenList, sizeof(segLenList)); 402 for (i = 0; i < bf->bf_nseg; i++) { 403 bufAddrList[bp] = bf->bf_segs[i].ds_addr; 404 segLenList[bp] = bf->bf_segs[i].ds_len; 405 bp++; 406 407 /* 408 * Go to the next segment if this isn't the last segment 409 * and there's space in the current TX map. 410 */ 411 if ((i != bf->bf_nseg - 1) && (bp < numTxMaps)) 412 continue; 413 414 /* 415 * Last segment or we're out of buffer pointers. 416 */ 417 bp = 0; 418 419 if (i == bf->bf_nseg - 1) 420 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0); 421 else 422 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 423 bf->bf_daddr + dd->dd_descsize * (dsp + 1)); 424 425 /* 426 * XXX This assumes that bfs_txq is the actual destination 427 * hardware queue at this point. It may not have been 428 * assigned, it may actually be pointing to the multicast 429 * software TXQ id. These must be fixed! 430 */ 431 ath_hal_filltxdesc(ah, (struct ath_desc *) ds 432 , bufAddrList 433 , segLenList 434 , bf->bf_descid /* XXX desc id */ 435 , bf->bf_state.bfs_tx_queue 436 , isFirstDesc /* first segment */ 437 , i == bf->bf_nseg - 1 /* last segment */ 438 , (struct ath_desc *) ds0 /* first descriptor */ 439 ); 440 441 /* 442 * Make sure the 11n aggregate fields are cleared. 443 * 444 * XXX TODO: this doesn't need to be called for 445 * aggregate frames; as it'll be called on all 446 * sub-frames. Since the descriptors are in 447 * non-cacheable memory, this leads to some 448 * rather slow writes on MIPS/ARM platforms. 449 */ 450 if (ath_tx_is_11n(sc)) 451 ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds); 452 453 /* 454 * If 11n is enabled, set it up as if it's an aggregate 455 * frame. 456 */ 457 if (is_last_subframe) { 458 ath_hal_set11n_aggr_last(sc->sc_ah, 459 (struct ath_desc *) ds); 460 } else if (is_aggr) { 461 /* 462 * This clears the aggrlen field; so 463 * the caller needs to call set_aggr_first()! 464 * 465 * XXX TODO: don't call this for the first 466 * descriptor in the first frame in an 467 * aggregate! 468 */ 469 ath_hal_set11n_aggr_middle(sc->sc_ah, 470 (struct ath_desc *) ds, 471 bf->bf_state.bfs_ndelim); 472 } 473 isFirstDesc = 0; 474 bf->bf_lastds = (struct ath_desc *) ds; 475 476 /* 477 * Don't forget to skip to the next descriptor. 478 */ 479 ds += sc->sc_tx_desclen; 480 dsp++; 481 482 /* 483 * .. and don't forget to blank these out! 484 */ 485 bzero(bufAddrList, sizeof(bufAddrList)); 486 bzero(segLenList, sizeof(segLenList)); 487 } 488 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 489 } 490 491 /* 492 * Set the rate control fields in the given descriptor based on 493 * the bf_state fields and node state. 494 * 495 * The bfs fields should already be set with the relevant rate 496 * control information, including whether MRR is to be enabled. 497 * 498 * Since the FreeBSD HAL currently sets up the first TX rate 499 * in ath_hal_setuptxdesc(), this will setup the MRR 500 * conditionally for the pre-11n chips, and call ath_buf_set_rate 501 * unconditionally for 11n chips. These require the 11n rate 502 * scenario to be set if MCS rates are enabled, so it's easier 503 * to just always call it. The caller can then only set rates 2, 3 504 * and 4 if multi-rate retry is needed. 505 */ 506 static void 507 ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 508 struct ath_buf *bf) 509 { 510 struct ath_rc_series *rc = bf->bf_state.bfs_rc; 511 512 /* If mrr is disabled, blank tries 1, 2, 3 */ 513 if (! bf->bf_state.bfs_ismrr) 514 rc[1].tries = rc[2].tries = rc[3].tries = 0; 515 516 #if 0 517 /* 518 * If NOACK is set, just set ntries=1. 519 */ 520 else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) { 521 rc[1].tries = rc[2].tries = rc[3].tries = 0; 522 rc[0].tries = 1; 523 } 524 #endif 525 526 /* 527 * Always call - that way a retried descriptor will 528 * have the MRR fields overwritten. 529 * 530 * XXX TODO: see if this is really needed - setting up 531 * the first descriptor should set the MRR fields to 0 532 * for us anyway. 533 */ 534 if (ath_tx_is_11n(sc)) { 535 ath_buf_set_rate(sc, ni, bf); 536 } else { 537 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc 538 , rc[1].ratecode, rc[1].tries 539 , rc[2].ratecode, rc[2].tries 540 , rc[3].ratecode, rc[3].tries 541 ); 542 } 543 } 544 545 /* 546 * Setup segments+descriptors for an 11n aggregate. 547 * bf_first is the first buffer in the aggregate. 548 * The descriptor list must already been linked together using 549 * bf->bf_next. 550 */ 551 static void 552 ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first) 553 { 554 struct ath_buf *bf, *bf_prev = NULL; 555 struct ath_desc *ds0 = bf_first->bf_desc; 556 557 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n", 558 __func__, bf_first->bf_state.bfs_nframes, 559 bf_first->bf_state.bfs_al); 560 561 bf = bf_first; 562 563 if (bf->bf_state.bfs_txrate0 == 0) 564 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n", 565 __func__, bf, 0); 566 if (bf->bf_state.bfs_rc[0].ratecode == 0) 567 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n", 568 __func__, bf, 0); 569 570 /* 571 * Setup all descriptors of all subframes - this will 572 * call ath_hal_set11naggrmiddle() on every frame. 573 */ 574 while (bf != NULL) { 575 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 576 "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n", 577 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen, 578 SEQNO(bf->bf_state.bfs_seqno)); 579 580 /* 581 * Setup the initial fields for the first descriptor - all 582 * the non-11n specific stuff. 583 */ 584 ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc 585 , bf->bf_state.bfs_pktlen /* packet length */ 586 , bf->bf_state.bfs_hdrlen /* header length */ 587 , bf->bf_state.bfs_atype /* Atheros packet type */ 588 , bf->bf_state.bfs_txpower /* txpower */ 589 , bf->bf_state.bfs_txrate0 590 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 591 , bf->bf_state.bfs_keyix /* key cache index */ 592 , bf->bf_state.bfs_txantenna /* antenna mode */ 593 , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */ 594 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 595 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 596 ); 597 598 /* 599 * First descriptor? Setup the rate control and initial 600 * aggregate header information. 601 */ 602 if (bf == bf_first) { 603 /* 604 * setup first desc with rate and aggr info 605 */ 606 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 607 } 608 609 /* 610 * Setup the descriptors for a multi-descriptor frame. 611 * This is both aggregate and non-aggregate aware. 612 */ 613 ath_tx_chaindesclist(sc, ds0, bf, 614 1, /* is_aggr */ 615 !! (bf == bf_first), /* is_first_subframe */ 616 !! (bf->bf_next == NULL) /* is_last_subframe */ 617 ); 618 619 if (bf == bf_first) { 620 /* 621 * Initialise the first 11n aggregate with the 622 * aggregate length and aggregate enable bits. 623 */ 624 ath_hal_set11n_aggr_first(sc->sc_ah, 625 ds0, 626 bf->bf_state.bfs_al, 627 bf->bf_state.bfs_ndelim); 628 } 629 630 /* 631 * Link the last descriptor of the previous frame 632 * to the beginning descriptor of this frame. 633 */ 634 if (bf_prev != NULL) 635 ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds, 636 bf->bf_daddr); 637 638 /* Save a copy so we can link the next descriptor in */ 639 bf_prev = bf; 640 bf = bf->bf_next; 641 } 642 643 /* 644 * Set the first descriptor bf_lastds field to point to 645 * the last descriptor in the last subframe, that's where 646 * the status update will occur. 647 */ 648 bf_first->bf_lastds = bf_prev->bf_lastds; 649 650 /* 651 * And bf_last in the first descriptor points to the end of 652 * the aggregate list. 653 */ 654 bf_first->bf_last = bf_prev; 655 656 /* 657 * For non-AR9300 NICs, which require the rate control 658 * in the final descriptor - let's set that up now. 659 * 660 * This is because the filltxdesc() HAL call doesn't 661 * populate the last segment with rate control information 662 * if firstSeg is also true. For non-aggregate frames 663 * that is fine, as the first frame already has rate control 664 * info. But if the last frame in an aggregate has one 665 * descriptor, both firstseg and lastseg will be true and 666 * the rate info isn't copied. 667 * 668 * This is inefficient on MIPS/ARM platforms that have 669 * non-cachable memory for TX descriptors, but we'll just 670 * make do for now. 671 * 672 * As to why the rate table is stashed in the last descriptor 673 * rather than the first descriptor? Because proctxdesc() 674 * is called on the final descriptor in an MPDU or A-MPDU - 675 * ie, the one that gets updated by the hardware upon 676 * completion. That way proctxdesc() doesn't need to know 677 * about the first _and_ last TX descriptor. 678 */ 679 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0); 680 681 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__); 682 } 683 684 /* 685 * Hand-off a frame to the multicast TX queue. 686 * 687 * This is a software TXQ which will be appended to the CAB queue 688 * during the beacon setup code. 689 * 690 * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID 691 * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated 692 * with the actual hardware txq, or all of this will fall apart. 693 * 694 * XXX It may not be a bad idea to just stuff the QCU ID into bf_state 695 * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated 696 * correctly. 697 */ 698 static void 699 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, 700 struct ath_buf *bf) 701 { 702 ATH_TX_LOCK_ASSERT(sc); 703 704 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 705 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 706 707 /* 708 * Ensure that the tx queue is the cabq, so things get 709 * mapped correctly. 710 */ 711 if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) { 712 DPRINTF(sc, ATH_DEBUG_XMIT, 713 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", 714 __func__, bf, bf->bf_state.bfs_tx_queue, 715 txq->axq_qnum); 716 } 717 718 ATH_TXQ_LOCK(txq); 719 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) { 720 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s); 721 struct ieee80211_frame *wh; 722 723 /* mark previous frame */ 724 wh = mtod(bf_last->bf_m, struct ieee80211_frame *); 725 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 726 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, 727 BUS_DMASYNC_PREWRITE); 728 729 /* link descriptor */ 730 ath_hal_settxdesclink(sc->sc_ah, 731 bf_last->bf_lastds, 732 bf->bf_daddr); 733 } 734 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 735 ATH_TXQ_UNLOCK(txq); 736 } 737 738 /* 739 * Hand-off packet to a hardware queue. 740 */ 741 static void 742 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, 743 struct ath_buf *bf) 744 { 745 struct ath_hal *ah = sc->sc_ah; 746 struct ath_buf *bf_first; 747 748 /* 749 * Insert the frame on the outbound list and pass it on 750 * to the hardware. Multicast frames buffered for power 751 * save stations and transmit from the CAB queue are stored 752 * on a s/w only queue and loaded on to the CAB queue in 753 * the SWBA handler since frames only go out on DTIM and 754 * to avoid possible races. 755 */ 756 ATH_TX_LOCK_ASSERT(sc); 757 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 758 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 759 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ, 760 ("ath_tx_handoff_hw called for mcast queue")); 761 762 /* 763 * XXX We should instead just verify that sc_txstart_cnt 764 * or ath_txproc_cnt > 0. That would mean that 765 * the reset is going to be waiting for us to complete. 766 */ 767 if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) { 768 device_printf(sc->sc_dev, 769 "%s: TX dispatch without holding txcount/txstart refcnt!\n", 770 __func__); 771 } 772 773 /* 774 * XXX .. this is going to cause the hardware to get upset; 775 * so we really should find some way to drop or queue 776 * things. 777 */ 778 779 ATH_TXQ_LOCK(txq); 780 781 /* 782 * XXX TODO: if there's a holdingbf, then 783 * ATH_TXQ_PUTRUNNING should be clear. 784 * 785 * If there is a holdingbf and the list is empty, 786 * then axq_link should be pointing to the holdingbf. 787 * 788 * Otherwise it should point to the last descriptor 789 * in the last ath_buf. 790 * 791 * In any case, we should really ensure that we 792 * update the previous descriptor link pointer to 793 * this descriptor, regardless of all of the above state. 794 * 795 * For now this is captured by having axq_link point 796 * to either the holdingbf (if the TXQ list is empty) 797 * or the end of the list (if the TXQ list isn't empty.) 798 * I'd rather just kill axq_link here and do it as above. 799 */ 800 801 /* 802 * Append the frame to the TX queue. 803 */ 804 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 805 ATH_KTR(sc, ATH_KTR_TX, 3, 806 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p " 807 "depth=%d", 808 txq->axq_qnum, 809 bf, 810 txq->axq_depth); 811 812 /* 813 * If there's a link pointer, update it. 814 * 815 * XXX we should replace this with the above logic, just 816 * to kill axq_link with fire. 817 */ 818 if (txq->axq_link != NULL) { 819 *txq->axq_link = bf->bf_daddr; 820 DPRINTF(sc, ATH_DEBUG_XMIT, 821 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 822 txq->axq_qnum, txq->axq_link, 823 (caddr_t)bf->bf_daddr, bf->bf_desc, 824 txq->axq_depth); 825 ATH_KTR(sc, ATH_KTR_TX, 5, 826 "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) " 827 "lastds=%d", 828 txq->axq_qnum, txq->axq_link, 829 (caddr_t)bf->bf_daddr, bf->bf_desc, 830 bf->bf_lastds); 831 } 832 833 /* 834 * If we've not pushed anything into the hardware yet, 835 * push the head of the queue into the TxDP. 836 * 837 * Once we've started DMA, there's no guarantee that 838 * updating the TxDP with a new value will actually work. 839 * So we just don't do that - if we hit the end of the list, 840 * we keep that buffer around (the "holding buffer") and 841 * re-start DMA by updating the link pointer of _that_ 842 * descriptor and then restart DMA. 843 */ 844 if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) { 845 bf_first = TAILQ_FIRST(&txq->axq_q); 846 txq->axq_flags |= ATH_TXQ_PUTRUNNING; 847 ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr); 848 DPRINTF(sc, ATH_DEBUG_XMIT, 849 "%s: TXDP[%u] = %p (%p) depth %d\n", 850 __func__, txq->axq_qnum, 851 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, 852 txq->axq_depth); 853 ATH_KTR(sc, ATH_KTR_TX, 5, 854 "ath_tx_handoff: TXDP[%u] = %p (%p) " 855 "lastds=%p depth %d", 856 txq->axq_qnum, 857 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, 858 bf_first->bf_lastds, 859 txq->axq_depth); 860 } 861 862 /* 863 * Ensure that the bf TXQ matches this TXQ, so later 864 * checking and holding buffer manipulation is sane. 865 */ 866 if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) { 867 DPRINTF(sc, ATH_DEBUG_XMIT, 868 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", 869 __func__, bf, bf->bf_state.bfs_tx_queue, 870 txq->axq_qnum); 871 } 872 873 /* 874 * Track aggregate queue depth. 875 */ 876 if (bf->bf_state.bfs_aggr) 877 txq->axq_aggr_depth++; 878 879 /* 880 * Update the link pointer. 881 */ 882 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link); 883 884 /* 885 * Start DMA. 886 * 887 * If we wrote a TxDP above, DMA will start from here. 888 * 889 * If DMA is running, it'll do nothing. 890 * 891 * If the DMA engine hit the end of the QCU list (ie LINK=NULL, 892 * or VEOL) then it stops at the last transmitted write. 893 * We then append a new frame by updating the link pointer 894 * in that descriptor and then kick TxE here; it will re-read 895 * that last descriptor and find the new descriptor to transmit. 896 * 897 * This is why we keep the holding descriptor around. 898 */ 899 ath_hal_txstart(ah, txq->axq_qnum); 900 ATH_TXQ_UNLOCK(txq); 901 ATH_KTR(sc, ATH_KTR_TX, 1, 902 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum); 903 } 904 905 /* 906 * Restart TX DMA for the given TXQ. 907 * 908 * This must be called whether the queue is empty or not. 909 */ 910 static void 911 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq) 912 { 913 struct ath_buf *bf, *bf_last; 914 915 ATH_TXQ_LOCK_ASSERT(txq); 916 917 /* XXX make this ATH_TXQ_FIRST */ 918 bf = TAILQ_FIRST(&txq->axq_q); 919 bf_last = ATH_TXQ_LAST(txq, axq_q_s); 920 921 if (bf == NULL) 922 return; 923 924 DPRINTF(sc, ATH_DEBUG_RESET, 925 "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n", 926 __func__, 927 txq->axq_qnum, 928 bf, 929 bf_last, 930 (uint32_t) bf->bf_daddr); 931 932 #ifdef ATH_DEBUG 933 if (sc->sc_debug & ATH_DEBUG_RESET) 934 ath_tx_dump(sc, txq); 935 #endif 936 937 /* 938 * This is called from a restart, so DMA is known to be 939 * completely stopped. 940 */ 941 KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)), 942 ("%s: Q%d: called with PUTRUNNING=1\n", 943 __func__, 944 txq->axq_qnum)); 945 946 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); 947 txq->axq_flags |= ATH_TXQ_PUTRUNNING; 948 949 ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds, 950 &txq->axq_link); 951 ath_hal_txstart(sc->sc_ah, txq->axq_qnum); 952 } 953 954 /* 955 * Hand off a packet to the hardware (or mcast queue.) 956 * 957 * The relevant hardware txq should be locked. 958 */ 959 static void 960 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, 961 struct ath_buf *bf) 962 { 963 ATH_TX_LOCK_ASSERT(sc); 964 965 #ifdef ATH_DEBUG_ALQ 966 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 967 ath_tx_alq_post(sc, bf); 968 #endif 969 970 if (txq->axq_qnum == ATH_TXQ_SWQ) 971 ath_tx_handoff_mcast(sc, txq, bf); 972 else 973 ath_tx_handoff_hw(sc, txq, bf); 974 } 975 976 static int 977 ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni, 978 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, 979 int *keyix) 980 { 981 DPRINTF(sc, ATH_DEBUG_XMIT, 982 "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n", 983 __func__, 984 *hdrlen, 985 *pktlen, 986 isfrag, 987 iswep, 988 m0); 989 990 if (iswep) { 991 const struct ieee80211_cipher *cip; 992 struct ieee80211_key *k; 993 994 /* 995 * Construct the 802.11 header+trailer for an encrypted 996 * frame. The only reason this can fail is because of an 997 * unknown or unsupported cipher/key type. 998 */ 999 k = ieee80211_crypto_encap(ni, m0); 1000 if (k == NULL) { 1001 /* 1002 * This can happen when the key is yanked after the 1003 * frame was queued. Just discard the frame; the 1004 * 802.11 layer counts failures and provides 1005 * debugging/diagnostics. 1006 */ 1007 return (0); 1008 } 1009 /* 1010 * Adjust the packet + header lengths for the crypto 1011 * additions and calculate the h/w key index. When 1012 * a s/w mic is done the frame will have had any mic 1013 * added to it prior to entry so m0->m_pkthdr.len will 1014 * account for it. Otherwise we need to add it to the 1015 * packet length. 1016 */ 1017 cip = k->wk_cipher; 1018 (*hdrlen) += cip->ic_header; 1019 (*pktlen) += cip->ic_header + cip->ic_trailer; 1020 /* NB: frags always have any TKIP MIC done in s/w */ 1021 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) 1022 (*pktlen) += cip->ic_miclen; 1023 (*keyix) = k->wk_keyix; 1024 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { 1025 /* 1026 * Use station key cache slot, if assigned. 1027 */ 1028 (*keyix) = ni->ni_ucastkey.wk_keyix; 1029 if ((*keyix) == IEEE80211_KEYIX_NONE) 1030 (*keyix) = HAL_TXKEYIX_INVALID; 1031 } else 1032 (*keyix) = HAL_TXKEYIX_INVALID; 1033 1034 return (1); 1035 } 1036 1037 /* 1038 * Calculate whether interoperability protection is required for 1039 * this frame. 1040 * 1041 * This requires the rate control information be filled in, 1042 * as the protection requirement depends upon the current 1043 * operating mode / PHY. 1044 */ 1045 static void 1046 ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf) 1047 { 1048 struct ieee80211_frame *wh; 1049 uint8_t rix; 1050 uint16_t flags; 1051 int shortPreamble; 1052 const HAL_RATE_TABLE *rt = sc->sc_currates; 1053 struct ieee80211com *ic = &sc->sc_ic; 1054 1055 flags = bf->bf_state.bfs_txflags; 1056 rix = bf->bf_state.bfs_rc[0].rix; 1057 shortPreamble = bf->bf_state.bfs_shpream; 1058 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1059 1060 /* Disable frame protection for TOA probe frames */ 1061 if (bf->bf_flags & ATH_BUF_TOA_PROBE) { 1062 /* XXX count */ 1063 flags &= ~(HAL_TXDESC_CTSENA | HAL_TXDESC_RTSENA); 1064 bf->bf_state.bfs_doprot = 0; 1065 goto finish; 1066 } 1067 1068 /* 1069 * If 802.11g protection is enabled, determine whether 1070 * to use RTS/CTS or just CTS. Note that this is only 1071 * done for OFDM unicast frames. 1072 */ 1073 if ((ic->ic_flags & IEEE80211_F_USEPROT) && 1074 rt->info[rix].phy == IEEE80211_T_OFDM && 1075 (flags & HAL_TXDESC_NOACK) == 0) { 1076 bf->bf_state.bfs_doprot = 1; 1077 /* XXX fragments must use CCK rates w/ protection */ 1078 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { 1079 flags |= HAL_TXDESC_RTSENA; 1080 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { 1081 flags |= HAL_TXDESC_CTSENA; 1082 } 1083 /* 1084 * For frags it would be desirable to use the 1085 * highest CCK rate for RTS/CTS. But stations 1086 * farther away may detect it at a lower CCK rate 1087 * so use the configured protection rate instead 1088 * (for now). 1089 */ 1090 sc->sc_stats.ast_tx_protect++; 1091 } 1092 1093 /* 1094 * If 11n protection is enabled and it's a HT frame, 1095 * enable RTS. 1096 * 1097 * XXX ic_htprotmode or ic_curhtprotmode? 1098 * XXX should it_htprotmode only matter if ic_curhtprotmode 1099 * XXX indicates it's not a HT pure environment? 1100 */ 1101 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && 1102 rt->info[rix].phy == IEEE80211_T_HT && 1103 (flags & HAL_TXDESC_NOACK) == 0) { 1104 flags |= HAL_TXDESC_RTSENA; 1105 sc->sc_stats.ast_tx_htprotect++; 1106 } 1107 1108 finish: 1109 bf->bf_state.bfs_txflags = flags; 1110 } 1111 1112 /* 1113 * Update the frame duration given the currently selected rate. 1114 * 1115 * This also updates the frame duration value, so it will require 1116 * a DMA flush. 1117 */ 1118 static void 1119 ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf) 1120 { 1121 struct ieee80211_frame *wh; 1122 uint8_t rix; 1123 uint16_t flags; 1124 int shortPreamble; 1125 struct ath_hal *ah = sc->sc_ah; 1126 const HAL_RATE_TABLE *rt = sc->sc_currates; 1127 int isfrag = bf->bf_m->m_flags & M_FRAG; 1128 1129 flags = bf->bf_state.bfs_txflags; 1130 rix = bf->bf_state.bfs_rc[0].rix; 1131 shortPreamble = bf->bf_state.bfs_shpream; 1132 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1133 1134 /* 1135 * Calculate duration. This logically belongs in the 802.11 1136 * layer but it lacks sufficient information to calculate it. 1137 */ 1138 if ((flags & HAL_TXDESC_NOACK) == 0 && 1139 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 1140 u_int16_t dur; 1141 if (shortPreamble) 1142 dur = rt->info[rix].spAckDuration; 1143 else 1144 dur = rt->info[rix].lpAckDuration; 1145 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 1146 dur += dur; /* additional SIFS+ACK */ 1147 /* 1148 * Include the size of next fragment so NAV is 1149 * updated properly. The last fragment uses only 1150 * the ACK duration 1151 * 1152 * XXX TODO: ensure that the rate lookup for each 1153 * fragment is the same as the rate used by the 1154 * first fragment! 1155 */ 1156 dur += ath_hal_computetxtime(ah, 1157 rt, 1158 bf->bf_nextfraglen, 1159 rix, shortPreamble, 1160 AH_TRUE); 1161 } 1162 if (isfrag) { 1163 /* 1164 * Force hardware to use computed duration for next 1165 * fragment by disabling multi-rate retry which updates 1166 * duration based on the multi-rate duration table. 1167 */ 1168 bf->bf_state.bfs_ismrr = 0; 1169 bf->bf_state.bfs_try0 = ATH_TXMGTTRY; 1170 /* XXX update bfs_rc[0].try? */ 1171 } 1172 1173 /* Update the duration field itself */ 1174 *(u_int16_t *)wh->i_dur = htole16(dur); 1175 } 1176 } 1177 1178 static uint8_t 1179 ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt, 1180 int cix, int shortPreamble) 1181 { 1182 uint8_t ctsrate; 1183 1184 /* 1185 * CTS transmit rate is derived from the transmit rate 1186 * by looking in the h/w rate table. We must also factor 1187 * in whether or not a short preamble is to be used. 1188 */ 1189 /* NB: cix is set above where RTS/CTS is enabled */ 1190 KASSERT(cix != 0xff, ("cix not setup")); 1191 ctsrate = rt->info[cix].rateCode; 1192 1193 /* XXX this should only matter for legacy rates */ 1194 if (shortPreamble) 1195 ctsrate |= rt->info[cix].shortPreamble; 1196 1197 return (ctsrate); 1198 } 1199 1200 /* 1201 * Calculate the RTS/CTS duration for legacy frames. 1202 */ 1203 static int 1204 ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix, 1205 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt, 1206 int flags) 1207 { 1208 int ctsduration = 0; 1209 1210 /* This mustn't be called for HT modes */ 1211 if (rt->info[cix].phy == IEEE80211_T_HT) { 1212 printf("%s: HT rate where it shouldn't be (0x%x)\n", 1213 __func__, rt->info[cix].rateCode); 1214 return (-1); 1215 } 1216 1217 /* 1218 * Compute the transmit duration based on the frame 1219 * size and the size of an ACK frame. We call into the 1220 * HAL to do the computation since it depends on the 1221 * characteristics of the actual PHY being used. 1222 * 1223 * NB: CTS is assumed the same size as an ACK so we can 1224 * use the precalculated ACK durations. 1225 */ 1226 if (shortPreamble) { 1227 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1228 ctsduration += rt->info[cix].spAckDuration; 1229 ctsduration += ath_hal_computetxtime(ah, 1230 rt, pktlen, rix, AH_TRUE, AH_TRUE); 1231 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1232 ctsduration += rt->info[rix].spAckDuration; 1233 } else { 1234 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1235 ctsduration += rt->info[cix].lpAckDuration; 1236 ctsduration += ath_hal_computetxtime(ah, 1237 rt, pktlen, rix, AH_FALSE, AH_TRUE); 1238 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1239 ctsduration += rt->info[rix].lpAckDuration; 1240 } 1241 1242 return (ctsduration); 1243 } 1244 1245 /* 1246 * Update the given ath_buf with updated rts/cts setup and duration 1247 * values. 1248 * 1249 * To support rate lookups for each software retry, the rts/cts rate 1250 * and cts duration must be re-calculated. 1251 * 1252 * This function assumes the RTS/CTS flags have been set as needed; 1253 * mrr has been disabled; and the rate control lookup has been done. 1254 * 1255 * XXX TODO: MRR need only be disabled for the pre-11n NICs. 1256 * XXX The 11n NICs support per-rate RTS/CTS configuration. 1257 */ 1258 static void 1259 ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf) 1260 { 1261 uint16_t ctsduration = 0; 1262 uint8_t ctsrate = 0; 1263 uint8_t rix = bf->bf_state.bfs_rc[0].rix; 1264 uint8_t cix = 0; 1265 const HAL_RATE_TABLE *rt = sc->sc_currates; 1266 1267 /* 1268 * No RTS/CTS enabled? Don't bother. 1269 */ 1270 if ((bf->bf_state.bfs_txflags & 1271 (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) { 1272 /* XXX is this really needed? */ 1273 bf->bf_state.bfs_ctsrate = 0; 1274 bf->bf_state.bfs_ctsduration = 0; 1275 return; 1276 } 1277 1278 /* 1279 * If protection is enabled, use the protection rix control 1280 * rate. Otherwise use the rate0 control rate. 1281 */ 1282 if (bf->bf_state.bfs_doprot) 1283 rix = sc->sc_protrix; 1284 else 1285 rix = bf->bf_state.bfs_rc[0].rix; 1286 1287 /* 1288 * If the raw path has hard-coded ctsrate0 to something, 1289 * use it. 1290 */ 1291 if (bf->bf_state.bfs_ctsrate0 != 0) 1292 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0); 1293 else 1294 /* Control rate from above */ 1295 cix = rt->info[rix].controlRate; 1296 1297 /* Calculate the rtscts rate for the given cix */ 1298 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix, 1299 bf->bf_state.bfs_shpream); 1300 1301 /* The 11n chipsets do ctsduration calculations for you */ 1302 if (! ath_tx_is_11n(sc)) 1303 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix, 1304 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, 1305 rt, bf->bf_state.bfs_txflags); 1306 1307 /* Squirrel away in ath_buf */ 1308 bf->bf_state.bfs_ctsrate = ctsrate; 1309 bf->bf_state.bfs_ctsduration = ctsduration; 1310 1311 /* 1312 * Must disable multi-rate retry when using RTS/CTS. 1313 */ 1314 if (!sc->sc_mrrprot) { 1315 bf->bf_state.bfs_ismrr = 0; 1316 bf->bf_state.bfs_try0 = 1317 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */ 1318 } 1319 } 1320 1321 /* 1322 * Setup the descriptor chain for a normal or fast-frame 1323 * frame. 1324 * 1325 * XXX TODO: extend to include the destination hardware QCU ID. 1326 * Make sure that is correct. Make sure that when being added 1327 * to the mcastq, the CABQ QCUID is set or things will get a bit 1328 * odd. 1329 */ 1330 static void 1331 ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf) 1332 { 1333 struct ath_desc *ds = bf->bf_desc; 1334 struct ath_hal *ah = sc->sc_ah; 1335 1336 if (bf->bf_state.bfs_txrate0 == 0) 1337 DPRINTF(sc, ATH_DEBUG_XMIT, 1338 "%s: bf=%p, txrate0=%d\n", __func__, bf, 0); 1339 1340 ath_hal_setuptxdesc(ah, ds 1341 , bf->bf_state.bfs_pktlen /* packet length */ 1342 , bf->bf_state.bfs_hdrlen /* header length */ 1343 , bf->bf_state.bfs_atype /* Atheros packet type */ 1344 , bf->bf_state.bfs_txpower /* txpower */ 1345 , bf->bf_state.bfs_txrate0 1346 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 1347 , bf->bf_state.bfs_keyix /* key cache index */ 1348 , bf->bf_state.bfs_txantenna /* antenna mode */ 1349 , bf->bf_state.bfs_txflags /* flags */ 1350 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 1351 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 1352 ); 1353 1354 /* 1355 * This will be overriden when the descriptor chain is written. 1356 */ 1357 bf->bf_lastds = ds; 1358 bf->bf_last = bf; 1359 1360 /* Set rate control and descriptor chain for this frame */ 1361 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 1362 ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0); 1363 } 1364 1365 /* 1366 * Do a rate lookup. 1367 * 1368 * This performs a rate lookup for the given ath_buf only if it's required. 1369 * Non-data frames and raw frames don't require it. 1370 * 1371 * This populates the primary and MRR entries; MRR values are 1372 * then disabled later on if something requires it (eg RTS/CTS on 1373 * pre-11n chipsets. 1374 * 1375 * This needs to be done before the RTS/CTS fields are calculated 1376 * as they may depend upon the rate chosen. 1377 */ 1378 static void 1379 ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf, int tid, 1380 bool is_aggr) 1381 { 1382 uint8_t rate, rix; 1383 int try0; 1384 int maxdur; // Note: Unused for now 1385 1386 if (! bf->bf_state.bfs_doratelookup) 1387 return; 1388 1389 /* Get rid of any previous state */ 1390 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1391 1392 ATH_NODE_LOCK(ATH_NODE(bf->bf_node)); 1393 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream, 1394 bf->bf_state.bfs_pktlen, tid, is_aggr, &rix, &try0, &rate, &maxdur); 1395 1396 /* In case MRR is disabled, make sure rc[0] is setup correctly */ 1397 bf->bf_state.bfs_rc[0].rix = rix; 1398 bf->bf_state.bfs_rc[0].ratecode = rate; 1399 bf->bf_state.bfs_rc[0].tries = try0; 1400 1401 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY) 1402 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix, 1403 bf->bf_state.bfs_rc); 1404 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node)); 1405 1406 sc->sc_txrix = rix; /* for LED blinking */ 1407 sc->sc_lastdatarix = rix; /* for fast frames */ 1408 bf->bf_state.bfs_try0 = try0; 1409 bf->bf_state.bfs_txrate0 = rate; 1410 } 1411 1412 /* 1413 * Update the CLRDMASK bit in the ath_buf if it needs to be set. 1414 */ 1415 static void 1416 ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid, 1417 struct ath_buf *bf) 1418 { 1419 struct ath_node *an = ATH_NODE(bf->bf_node); 1420 1421 ATH_TX_LOCK_ASSERT(sc); 1422 1423 if (an->clrdmask == 1) { 1424 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1425 an->clrdmask = 0; 1426 } 1427 } 1428 1429 /* 1430 * Return whether this frame should be software queued or 1431 * direct dispatched. 1432 * 1433 * When doing powersave, BAR frames should be queued but other management 1434 * frames should be directly sent. 1435 * 1436 * When not doing powersave, stick BAR frames into the hardware queue 1437 * so it goes out even though the queue is paused. 1438 * 1439 * For now, management frames are also software queued by default. 1440 */ 1441 static int 1442 ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an, 1443 struct mbuf *m0, int *queue_to_head) 1444 { 1445 struct ieee80211_node *ni = &an->an_node; 1446 struct ieee80211_frame *wh; 1447 uint8_t type, subtype; 1448 1449 wh = mtod(m0, struct ieee80211_frame *); 1450 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1451 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1452 1453 (*queue_to_head) = 0; 1454 1455 /* If it's not in powersave - direct-dispatch BAR */ 1456 if ((ATH_NODE(ni)->an_is_powersave == 0) 1457 && type == IEEE80211_FC0_TYPE_CTL && 1458 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1459 DPRINTF(sc, ATH_DEBUG_SW_TX, 1460 "%s: BAR: TX'ing direct\n", __func__); 1461 return (0); 1462 } else if ((ATH_NODE(ni)->an_is_powersave == 1) 1463 && type == IEEE80211_FC0_TYPE_CTL && 1464 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1465 /* BAR TX whilst asleep; queue */ 1466 DPRINTF(sc, ATH_DEBUG_SW_TX, 1467 "%s: swq: TX'ing\n", __func__); 1468 (*queue_to_head) = 1; 1469 return (1); 1470 } else if ((ATH_NODE(ni)->an_is_powersave == 1) 1471 && (type == IEEE80211_FC0_TYPE_MGT || 1472 type == IEEE80211_FC0_TYPE_CTL)) { 1473 /* 1474 * Other control/mgmt frame; bypass software queuing 1475 * for now! 1476 */ 1477 DPRINTF(sc, ATH_DEBUG_XMIT, 1478 "%s: %6D: Node is asleep; sending mgmt " 1479 "(type=%d, subtype=%d)\n", 1480 __func__, ni->ni_macaddr, ":", type, subtype); 1481 return (0); 1482 } else { 1483 return (1); 1484 } 1485 } 1486 1487 1488 /* 1489 * Transmit the given frame to the hardware. 1490 * 1491 * The frame must already be setup; rate control must already have 1492 * been done. 1493 * 1494 * XXX since the TXQ lock is being held here (and I dislike holding 1495 * it for this long when not doing software aggregation), later on 1496 * break this function into "setup_normal" and "xmit_normal". The 1497 * lock only needs to be held for the ath_tx_handoff call. 1498 * 1499 * XXX we don't update the leak count here - if we're doing 1500 * direct frame dispatch, we need to be able to do it without 1501 * decrementing the leak count (eg multicast queue frames.) 1502 */ 1503 static void 1504 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq, 1505 struct ath_buf *bf) 1506 { 1507 struct ath_node *an = ATH_NODE(bf->bf_node); 1508 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 1509 1510 ATH_TX_LOCK_ASSERT(sc); 1511 1512 /* 1513 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does 1514 * set a completion handler however it doesn't (yet) properly 1515 * handle the strict ordering requirements needed for normal, 1516 * non-aggregate session frames. 1517 * 1518 * Once this is implemented, only set CLRDMASK like this for 1519 * frames that must go out - eg management/raw frames. 1520 */ 1521 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1522 1523 /* Setup the descriptor before handoff */ 1524 ath_tx_do_ratelookup(sc, bf, tid->tid, false); 1525 ath_tx_calc_duration(sc, bf); 1526 ath_tx_calc_protection(sc, bf); 1527 ath_tx_set_rtscts(sc, bf); 1528 ath_tx_rate_fill_rcflags(sc, bf); 1529 ath_tx_setds(sc, bf); 1530 1531 /* Track per-TID hardware queue depth correctly */ 1532 tid->hwq_depth++; 1533 1534 /* Assign the completion handler */ 1535 bf->bf_comp = ath_tx_normal_comp; 1536 1537 /* Hand off to hardware */ 1538 ath_tx_handoff(sc, txq, bf); 1539 } 1540 1541 /* 1542 * Do the basic frame setup stuff that's required before the frame 1543 * is added to a software queue. 1544 * 1545 * All frames get mostly the same treatment and it's done once. 1546 * Retransmits fiddle with things like the rate control setup, 1547 * setting the retransmit bit in the packet; doing relevant DMA/bus 1548 * syncing and relinking it (back) into the hardware TX queue. 1549 * 1550 * Note that this may cause the mbuf to be reallocated, so 1551 * m0 may not be valid. 1552 */ 1553 static int 1554 ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni, 1555 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq) 1556 { 1557 struct ieee80211vap *vap = ni->ni_vap; 1558 struct ieee80211com *ic = &sc->sc_ic; 1559 int error, iswep, ismcast, isfrag, ismrr; 1560 int keyix, hdrlen, pktlen, try0 = 0; 1561 u_int8_t rix = 0, txrate = 0; 1562 struct ath_desc *ds; 1563 struct ieee80211_frame *wh; 1564 u_int subtype, flags; 1565 HAL_PKT_TYPE atype; 1566 const HAL_RATE_TABLE *rt; 1567 HAL_BOOL shortPreamble; 1568 struct ath_node *an; 1569 1570 /* XXX TODO: this pri is only used for non-QoS check, right? */ 1571 u_int pri; 1572 1573 /* 1574 * To ensure that both sequence numbers and the CCMP PN handling 1575 * is "correct", make sure that the relevant TID queue is locked. 1576 * Otherwise the CCMP PN and seqno may appear out of order, causing 1577 * re-ordered frames to have out of order CCMP PN's, resulting 1578 * in many, many frame drops. 1579 */ 1580 ATH_TX_LOCK_ASSERT(sc); 1581 1582 wh = mtod(m0, struct ieee80211_frame *); 1583 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; 1584 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1585 isfrag = m0->m_flags & M_FRAG; 1586 hdrlen = ieee80211_anyhdrsize(wh); 1587 /* 1588 * Packet length must not include any 1589 * pad bytes; deduct them here. 1590 */ 1591 pktlen = m0->m_pkthdr.len - (hdrlen & 3); 1592 1593 /* Handle encryption twiddling if needed */ 1594 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, 1595 &pktlen, &keyix)) { 1596 ieee80211_free_mbuf(m0); 1597 return EIO; 1598 } 1599 1600 /* packet header may have moved, reset our local pointer */ 1601 wh = mtod(m0, struct ieee80211_frame *); 1602 1603 pktlen += IEEE80211_CRC_LEN; 1604 1605 /* 1606 * Load the DMA map so any coalescing is done. This 1607 * also calculates the number of descriptors we need. 1608 */ 1609 error = ath_tx_dmasetup(sc, bf, m0); 1610 if (error != 0) 1611 return error; 1612 KASSERT((ni != NULL), ("%s: ni=NULL!", __func__)); 1613 bf->bf_node = ni; /* NB: held reference */ 1614 m0 = bf->bf_m; /* NB: may have changed */ 1615 wh = mtod(m0, struct ieee80211_frame *); 1616 1617 /* setup descriptors */ 1618 ds = bf->bf_desc; 1619 rt = sc->sc_currates; 1620 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1621 1622 /* 1623 * NB: the 802.11 layer marks whether or not we should 1624 * use short preamble based on the current mode and 1625 * negotiated parameters. 1626 */ 1627 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 1628 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 1629 shortPreamble = AH_TRUE; 1630 sc->sc_stats.ast_tx_shortpre++; 1631 } else { 1632 shortPreamble = AH_FALSE; 1633 } 1634 1635 an = ATH_NODE(ni); 1636 //flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 1637 flags = 0; 1638 ismrr = 0; /* default no multi-rate retry*/ 1639 1640 pri = ath_tx_getac(sc, m0); /* honor classification */ 1641 /* XXX use txparams instead of fixed values */ 1642 /* 1643 * Calculate Atheros packet type from IEEE80211 packet header, 1644 * setup for rate calculations, and select h/w transmit queue. 1645 */ 1646 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 1647 case IEEE80211_FC0_TYPE_MGT: 1648 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1649 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 1650 atype = HAL_PKT_TYPE_BEACON; 1651 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 1652 atype = HAL_PKT_TYPE_PROBE_RESP; 1653 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 1654 atype = HAL_PKT_TYPE_ATIM; 1655 else 1656 atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 1657 rix = an->an_mgmtrix; 1658 txrate = rt->info[rix].rateCode; 1659 if (shortPreamble) 1660 txrate |= rt->info[rix].shortPreamble; 1661 try0 = ATH_TXMGTTRY; 1662 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1663 break; 1664 case IEEE80211_FC0_TYPE_CTL: 1665 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 1666 rix = an->an_mgmtrix; 1667 txrate = rt->info[rix].rateCode; 1668 if (shortPreamble) 1669 txrate |= rt->info[rix].shortPreamble; 1670 try0 = ATH_TXMGTTRY; 1671 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1672 break; 1673 case IEEE80211_FC0_TYPE_DATA: 1674 atype = HAL_PKT_TYPE_NORMAL; /* default */ 1675 /* 1676 * Data frames: multicast frames go out at a fixed rate, 1677 * EAPOL frames use the mgmt frame rate; otherwise consult 1678 * the rate control module for the rate to use. 1679 */ 1680 if (ismcast) { 1681 rix = an->an_mcastrix; 1682 txrate = rt->info[rix].rateCode; 1683 if (shortPreamble) 1684 txrate |= rt->info[rix].shortPreamble; 1685 try0 = 1; 1686 } else if (m0->m_flags & M_EAPOL) { 1687 /* XXX? maybe always use long preamble? */ 1688 rix = an->an_mgmtrix; 1689 txrate = rt->info[rix].rateCode; 1690 if (shortPreamble) 1691 txrate |= rt->info[rix].shortPreamble; 1692 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 1693 } else { 1694 /* 1695 * Do rate lookup on each TX, rather than using 1696 * the hard-coded TX information decided here. 1697 */ 1698 ismrr = 1; 1699 bf->bf_state.bfs_doratelookup = 1; 1700 } 1701 1702 /* 1703 * Check whether to set NOACK for this WME category or not. 1704 */ 1705 if (ieee80211_wme_vap_ac_is_noack(vap, pri)) 1706 flags |= HAL_TXDESC_NOACK; 1707 break; 1708 default: 1709 device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n", 1710 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 1711 /* XXX statistic */ 1712 /* XXX free tx dmamap */ 1713 ieee80211_free_mbuf(m0); 1714 return EIO; 1715 } 1716 1717 /* 1718 * There are two known scenarios where the frame AC doesn't match 1719 * what the destination TXQ is. 1720 * 1721 * + non-QoS frames (eg management?) that the net80211 stack has 1722 * assigned a higher AC to, but since it's a non-QoS TID, it's 1723 * being thrown into TID 16. TID 16 gets the AC_BE queue. 1724 * It's quite possible that management frames should just be 1725 * direct dispatched to hardware rather than go via the software 1726 * queue; that should be investigated in the future. There are 1727 * some specific scenarios where this doesn't make sense, mostly 1728 * surrounding ADDBA request/response - hence why that is special 1729 * cased. 1730 * 1731 * + Multicast frames going into the VAP mcast queue. That shows up 1732 * as "TXQ 11". 1733 * 1734 * This driver should eventually support separate TID and TXQ locking, 1735 * allowing for arbitrary AC frames to appear on arbitrary software 1736 * queues, being queued to the "correct" hardware queue when needed. 1737 */ 1738 #if 0 1739 if (txq != sc->sc_ac2q[pri]) { 1740 DPRINTF(sc, ATH_DEBUG_XMIT, 1741 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n", 1742 __func__, 1743 txq, 1744 txq->axq_qnum, 1745 pri, 1746 sc->sc_ac2q[pri], 1747 sc->sc_ac2q[pri]->axq_qnum); 1748 } 1749 #endif 1750 1751 /* 1752 * Calculate miscellaneous flags. 1753 */ 1754 if (ismcast) { 1755 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 1756 } else if (pktlen > vap->iv_rtsthreshold && 1757 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { 1758 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 1759 sc->sc_stats.ast_tx_rts++; 1760 } 1761 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ 1762 sc->sc_stats.ast_tx_noack++; 1763 #ifdef IEEE80211_SUPPORT_TDMA 1764 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) { 1765 DPRINTF(sc, ATH_DEBUG_TDMA, 1766 "%s: discard frame, ACK required w/ TDMA\n", __func__); 1767 sc->sc_stats.ast_tdma_ack++; 1768 /* XXX free tx dmamap */ 1769 ieee80211_free_mbuf(m0); 1770 return EIO; 1771 } 1772 #endif 1773 1774 /* 1775 * If it's a frame to do location reporting on, 1776 * communicate it to the HAL. 1777 */ 1778 if (ieee80211_get_toa_params(m0, NULL)) { 1779 device_printf(sc->sc_dev, 1780 "%s: setting TX positioning bit\n", __func__); 1781 flags |= HAL_TXDESC_POS; 1782 1783 /* 1784 * Note: The hardware reports timestamps for 1785 * each of the RX'ed packets as part of the packet 1786 * exchange. So this means things like RTS/CTS 1787 * exchanges, as well as the final ACK. 1788 * 1789 * So, if you send a RTS-protected NULL data frame, 1790 * you'll get an RX report for the RTS response, then 1791 * an RX report for the NULL frame, and then the TX 1792 * completion at the end. 1793 * 1794 * NOTE: it doesn't work right for CCK frames; 1795 * there's no channel info data provided unless 1796 * it's OFDM or HT. Will have to dig into it. 1797 */ 1798 flags &= ~(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA); 1799 bf->bf_flags |= ATH_BUF_TOA_PROBE; 1800 } 1801 1802 #if 0 1803 /* 1804 * Placeholder: if you want to transmit with the azimuth 1805 * timestamp in the end of the payload, here's where you 1806 * should set the TXDESC field. 1807 */ 1808 flags |= HAL_TXDESC_HWTS; 1809 #endif 1810 1811 /* 1812 * Determine if a tx interrupt should be generated for 1813 * this descriptor. We take a tx interrupt to reap 1814 * descriptors when the h/w hits an EOL condition or 1815 * when the descriptor is specifically marked to generate 1816 * an interrupt. We periodically mark descriptors in this 1817 * way to insure timely replenishing of the supply needed 1818 * for sending frames. Defering interrupts reduces system 1819 * load and potentially allows more concurrent work to be 1820 * done but if done to aggressively can cause senders to 1821 * backup. 1822 * 1823 * NB: use >= to deal with sc_txintrperiod changing 1824 * dynamically through sysctl. 1825 */ 1826 if (flags & HAL_TXDESC_INTREQ) { 1827 txq->axq_intrcnt = 0; 1828 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 1829 flags |= HAL_TXDESC_INTREQ; 1830 txq->axq_intrcnt = 0; 1831 } 1832 1833 /* This point forward is actual TX bits */ 1834 1835 /* 1836 * At this point we are committed to sending the frame 1837 * and we don't need to look at m_nextpkt; clear it in 1838 * case this frame is part of frag chain. 1839 */ 1840 m0->m_nextpkt = NULL; 1841 1842 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 1843 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 1844 sc->sc_hwmap[rix].ieeerate, -1); 1845 1846 if (ieee80211_radiotap_active_vap(vap)) { 1847 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 1848 if (iswep) 1849 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 1850 if (isfrag) 1851 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 1852 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 1853 sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni); 1854 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 1855 1856 ieee80211_radiotap_tx(vap, m0); 1857 } 1858 1859 /* Blank the legacy rate array */ 1860 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1861 1862 /* 1863 * ath_buf_set_rate needs at least one rate/try to setup 1864 * the rate scenario. 1865 */ 1866 bf->bf_state.bfs_rc[0].rix = rix; 1867 bf->bf_state.bfs_rc[0].tries = try0; 1868 bf->bf_state.bfs_rc[0].ratecode = txrate; 1869 1870 /* Store the decided rate index values away */ 1871 bf->bf_state.bfs_pktlen = pktlen; 1872 bf->bf_state.bfs_hdrlen = hdrlen; 1873 bf->bf_state.bfs_atype = atype; 1874 bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni); 1875 bf->bf_state.bfs_txrate0 = txrate; 1876 bf->bf_state.bfs_try0 = try0; 1877 bf->bf_state.bfs_keyix = keyix; 1878 bf->bf_state.bfs_txantenna = sc->sc_txantenna; 1879 bf->bf_state.bfs_txflags = flags; 1880 bf->bf_state.bfs_shpream = shortPreamble; 1881 1882 /* XXX this should be done in ath_tx_setrate() */ 1883 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */ 1884 bf->bf_state.bfs_ctsrate = 0; /* calculated later */ 1885 bf->bf_state.bfs_ctsduration = 0; 1886 bf->bf_state.bfs_ismrr = ismrr; 1887 1888 return 0; 1889 } 1890 1891 /* 1892 * Queue a frame to the hardware or software queue. 1893 * 1894 * This can be called by the net80211 code. 1895 * 1896 * XXX what about locking? Or, push the seqno assign into the 1897 * XXX aggregate scheduler so its serialised? 1898 * 1899 * XXX When sending management frames via ath_raw_xmit(), 1900 * should CLRDMASK be set unconditionally? 1901 */ 1902 int 1903 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, 1904 struct ath_buf *bf, struct mbuf *m0) 1905 { 1906 struct ieee80211vap *vap = ni->ni_vap; 1907 struct ath_vap *avp = ATH_VAP(vap); 1908 int r = 0; 1909 u_int pri; 1910 int tid; 1911 struct ath_txq *txq; 1912 int ismcast; 1913 const struct ieee80211_frame *wh; 1914 int is_ampdu, is_ampdu_tx, is_ampdu_pending; 1915 ieee80211_seq seqno; 1916 uint8_t type, subtype; 1917 int queue_to_head; 1918 1919 ATH_TX_LOCK_ASSERT(sc); 1920 1921 /* 1922 * Determine the target hardware queue. 1923 * 1924 * For multicast frames, the txq gets overridden appropriately 1925 * depending upon the state of PS. If powersave is enabled 1926 * then they get added to the cabq for later transmit. 1927 * 1928 * The "fun" issue here is that group addressed frames should 1929 * have the sequence number from a different pool, rather than 1930 * the per-TID pool. That means that even QoS group addressed 1931 * frames will have a sequence number from that global value, 1932 * which means if we transmit different group addressed frames 1933 * at different traffic priorities, the sequence numbers will 1934 * all be out of whack. So - chances are, the right thing 1935 * to do here is to always put group addressed frames into the BE 1936 * queue, and ignore the TID for queue selection. 1937 * 1938 * For any other frame, we do a TID/QoS lookup inside the frame 1939 * to see what the TID should be. If it's a non-QoS frame, the 1940 * AC and TID are overridden. The TID/TXQ code assumes the 1941 * TID is on a predictable hardware TXQ, so we don't support 1942 * having a node TID queued to multiple hardware TXQs. 1943 * This may change in the future but would require some locking 1944 * fudgery. 1945 */ 1946 pri = ath_tx_getac(sc, m0); 1947 tid = ath_tx_gettid(sc, m0); 1948 1949 txq = sc->sc_ac2q[pri]; 1950 wh = mtod(m0, struct ieee80211_frame *); 1951 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1952 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1953 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1954 1955 /* 1956 * Enforce how deep the multicast queue can grow. 1957 * 1958 * XXX duplicated in ath_raw_xmit(). 1959 */ 1960 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1961 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 1962 > sc->sc_txq_mcastq_maxdepth) { 1963 sc->sc_stats.ast_tx_mcastq_overflow++; 1964 m_freem(m0); 1965 return (ENOBUFS); 1966 } 1967 } 1968 1969 /* 1970 * Enforce how deep the unicast queue can grow. 1971 * 1972 * If the node is in power save then we don't want 1973 * the software queue to grow too deep, or a node may 1974 * end up consuming all of the ath_buf entries. 1975 * 1976 * For now, only do this for DATA frames. 1977 * 1978 * We will want to cap how many management/control 1979 * frames get punted to the software queue so it doesn't 1980 * fill up. But the correct solution isn't yet obvious. 1981 * In any case, this check should at least let frames pass 1982 * that we are direct-dispatching. 1983 * 1984 * XXX TODO: duplicate this to the raw xmit path! 1985 */ 1986 if (type == IEEE80211_FC0_TYPE_DATA && 1987 ATH_NODE(ni)->an_is_powersave && 1988 ATH_NODE(ni)->an_swq_depth > 1989 sc->sc_txq_node_psq_maxdepth) { 1990 sc->sc_stats.ast_tx_node_psq_overflow++; 1991 m_freem(m0); 1992 return (ENOBUFS); 1993 } 1994 1995 /* A-MPDU TX */ 1996 is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid); 1997 is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid); 1998 is_ampdu = is_ampdu_tx | is_ampdu_pending; 1999 2000 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n", 2001 __func__, tid, pri, is_ampdu); 2002 2003 /* Set local packet state, used to queue packets to hardware */ 2004 bf->bf_state.bfs_tid = tid; 2005 bf->bf_state.bfs_tx_queue = txq->axq_qnum; 2006 bf->bf_state.bfs_pri = pri; 2007 2008 #if 1 2009 /* 2010 * When servicing one or more stations in power-save mode 2011 * (or) if there is some mcast data waiting on the mcast 2012 * queue (to prevent out of order delivery) multicast frames 2013 * must be bufferd until after the beacon. 2014 * 2015 * TODO: we should lock the mcastq before we check the length. 2016 */ 2017 if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { 2018 txq = &avp->av_mcastq; 2019 /* 2020 * Mark the frame as eventually belonging on the CAB 2021 * queue, so the descriptor setup functions will 2022 * correctly initialise the descriptor 'qcuId' field. 2023 */ 2024 bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum; 2025 } 2026 #endif 2027 2028 /* Do the generic frame setup */ 2029 /* XXX should just bzero the bf_state? */ 2030 bf->bf_state.bfs_dobaw = 0; 2031 2032 /* A-MPDU TX? Manually set sequence number */ 2033 /* 2034 * Don't do it whilst pending; the net80211 layer still 2035 * assigns them. 2036 * 2037 * Don't assign A-MPDU sequence numbers to group address 2038 * frames; they come from a different sequence number space. 2039 */ 2040 if (is_ampdu_tx && (! IEEE80211_IS_MULTICAST(wh->i_addr1))) { 2041 /* 2042 * Always call; this function will 2043 * handle making sure that null data frames 2044 * and group-addressed frames don't get a sequence number 2045 * from the current TID and thus mess with the BAW. 2046 */ 2047 seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0); 2048 2049 /* 2050 * Don't add QoS NULL frames and group-addressed frames 2051 * to the BAW. 2052 */ 2053 if (IEEE80211_QOS_HAS_SEQ(wh) && 2054 (! IEEE80211_IS_MULTICAST(wh->i_addr1)) && 2055 (subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL)) { 2056 bf->bf_state.bfs_dobaw = 1; 2057 } 2058 } 2059 2060 /* 2061 * If needed, the sequence number has been assigned. 2062 * Squirrel it away somewhere easy to get to. 2063 */ 2064 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT; 2065 2066 /* Is ampdu pending? fetch the seqno and print it out */ 2067 if (is_ampdu_pending) 2068 DPRINTF(sc, ATH_DEBUG_SW_TX, 2069 "%s: tid %d: ampdu pending, seqno %d\n", 2070 __func__, tid, M_SEQNO_GET(m0)); 2071 2072 /* This also sets up the DMA map; crypto; frame parameters, etc */ 2073 r = ath_tx_normal_setup(sc, ni, bf, m0, txq); 2074 2075 if (r != 0) 2076 goto done; 2077 2078 /* At this point m0 could have changed! */ 2079 m0 = bf->bf_m; 2080 2081 #if 1 2082 /* 2083 * If it's a multicast frame, do a direct-dispatch to the 2084 * destination hardware queue. Don't bother software 2085 * queuing it. 2086 */ 2087 /* 2088 * If it's a BAR frame, do a direct dispatch to the 2089 * destination hardware queue. Don't bother software 2090 * queuing it, as the TID will now be paused. 2091 * Sending a BAR frame can occur from the net80211 txa timer 2092 * (ie, retries) or from the ath txtask (completion call.) 2093 * It queues directly to hardware because the TID is paused 2094 * at this point (and won't be unpaused until the BAR has 2095 * either been TXed successfully or max retries has been 2096 * reached.) 2097 */ 2098 /* 2099 * Until things are better debugged - if this node is asleep 2100 * and we're sending it a non-BAR frame, direct dispatch it. 2101 * Why? Because we need to figure out what's actually being 2102 * sent - eg, during reassociation/reauthentication after 2103 * the node (last) disappeared whilst asleep, the driver should 2104 * have unpaused/unsleep'ed the node. So until that is 2105 * sorted out, use this workaround. 2106 */ 2107 if (txq == &avp->av_mcastq) { 2108 DPRINTF(sc, ATH_DEBUG_SW_TX, 2109 "%s: bf=%p: mcastq: TX'ing\n", __func__, bf); 2110 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2111 ath_tx_xmit_normal(sc, txq, bf); 2112 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, 2113 &queue_to_head)) { 2114 ath_tx_swq(sc, ni, txq, queue_to_head, bf); 2115 } else { 2116 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2117 ath_tx_xmit_normal(sc, txq, bf); 2118 } 2119 #else 2120 /* 2121 * For now, since there's no software queue, 2122 * direct-dispatch to the hardware. 2123 */ 2124 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2125 /* 2126 * Update the current leak count if 2127 * we're leaking frames; and set the 2128 * MORE flag as appropriate. 2129 */ 2130 ath_tx_leak_count_update(sc, tid, bf); 2131 ath_tx_xmit_normal(sc, txq, bf); 2132 #endif 2133 done: 2134 return 0; 2135 } 2136 2137 static int 2138 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, 2139 struct ath_buf *bf, struct mbuf *m0, 2140 const struct ieee80211_bpf_params *params) 2141 { 2142 struct ieee80211com *ic = &sc->sc_ic; 2143 struct ieee80211vap *vap = ni->ni_vap; 2144 int error, ismcast, ismrr; 2145 int keyix, hdrlen, pktlen, try0, txantenna; 2146 u_int8_t rix, txrate; 2147 struct ieee80211_frame *wh; 2148 u_int flags; 2149 HAL_PKT_TYPE atype; 2150 const HAL_RATE_TABLE *rt; 2151 struct ath_desc *ds; 2152 u_int pri; 2153 int o_tid = -1; 2154 int do_override; 2155 uint8_t type, subtype; 2156 int queue_to_head; 2157 struct ath_node *an = ATH_NODE(ni); 2158 2159 ATH_TX_LOCK_ASSERT(sc); 2160 2161 wh = mtod(m0, struct ieee80211_frame *); 2162 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2163 hdrlen = ieee80211_anyhdrsize(wh); 2164 /* 2165 * Packet length must not include any 2166 * pad bytes; deduct them here. 2167 */ 2168 /* XXX honor IEEE80211_BPF_DATAPAD */ 2169 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; 2170 2171 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2172 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2173 2174 ATH_KTR(sc, ATH_KTR_TX, 2, 2175 "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf); 2176 2177 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n", 2178 __func__, ismcast); 2179 2180 pri = params->ibp_pri & 3; 2181 /* Override pri if the frame isn't a QoS one */ 2182 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2183 pri = ath_tx_getac(sc, m0); 2184 2185 /* XXX If it's an ADDBA, override the correct queue */ 2186 do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid); 2187 2188 /* Map ADDBA to the correct priority */ 2189 if (do_override) { 2190 #if 1 2191 DPRINTF(sc, ATH_DEBUG_XMIT, 2192 "%s: overriding tid %d pri %d -> %d\n", 2193 __func__, o_tid, pri, TID_TO_WME_AC(o_tid)); 2194 #endif 2195 pri = TID_TO_WME_AC(o_tid); 2196 } 2197 2198 /* 2199 * "pri" is the hardware queue to transmit on. 2200 * 2201 * Look at the description in ath_tx_start() to understand 2202 * what needs to be "fixed" here so we just use the TID 2203 * for QoS frames. 2204 */ 2205 2206 /* Handle encryption twiddling if needed */ 2207 if (! ath_tx_tag_crypto(sc, ni, 2208 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, 2209 &hdrlen, &pktlen, &keyix)) { 2210 ieee80211_free_mbuf(m0); 2211 return EIO; 2212 } 2213 /* packet header may have moved, reset our local pointer */ 2214 wh = mtod(m0, struct ieee80211_frame *); 2215 2216 /* Do the generic frame setup */ 2217 /* XXX should just bzero the bf_state? */ 2218 bf->bf_state.bfs_dobaw = 0; 2219 2220 error = ath_tx_dmasetup(sc, bf, m0); 2221 if (error != 0) 2222 return error; 2223 m0 = bf->bf_m; /* NB: may have changed */ 2224 wh = mtod(m0, struct ieee80211_frame *); 2225 KASSERT((ni != NULL), ("%s: ni=NULL!", __func__)); 2226 bf->bf_node = ni; /* NB: held reference */ 2227 2228 /* Always enable CLRDMASK for raw frames for now.. */ 2229 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 2230 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 2231 if (params->ibp_flags & IEEE80211_BPF_RTS) 2232 flags |= HAL_TXDESC_RTSENA; 2233 else if (params->ibp_flags & IEEE80211_BPF_CTS) { 2234 /* XXX assume 11g/11n protection? */ 2235 bf->bf_state.bfs_doprot = 1; 2236 flags |= HAL_TXDESC_CTSENA; 2237 } 2238 /* XXX leave ismcast to injector? */ 2239 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) 2240 flags |= HAL_TXDESC_NOACK; 2241 2242 rt = sc->sc_currates; 2243 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 2244 2245 /* Fetch first rate information */ 2246 rix = ath_tx_findrix(sc, params->ibp_rate0); 2247 try0 = params->ibp_try0; 2248 2249 /* 2250 * Override EAPOL rate as appropriate. 2251 */ 2252 if (m0->m_flags & M_EAPOL) { 2253 /* XXX? maybe always use long preamble? */ 2254 rix = an->an_mgmtrix; 2255 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 2256 } 2257 2258 /* 2259 * If it's a frame to do location reporting on, 2260 * communicate it to the HAL. 2261 */ 2262 if (ieee80211_get_toa_params(m0, NULL)) { 2263 device_printf(sc->sc_dev, 2264 "%s: setting TX positioning bit\n", __func__); 2265 flags |= HAL_TXDESC_POS; 2266 flags &= ~(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA); 2267 bf->bf_flags |= ATH_BUF_TOA_PROBE; 2268 } 2269 2270 txrate = rt->info[rix].rateCode; 2271 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 2272 txrate |= rt->info[rix].shortPreamble; 2273 sc->sc_txrix = rix; 2274 ismrr = (params->ibp_try1 != 0); 2275 txantenna = params->ibp_pri >> 2; 2276 if (txantenna == 0) /* XXX? */ 2277 txantenna = sc->sc_txantenna; 2278 2279 /* 2280 * Since ctsrate is fixed, store it away for later 2281 * use when the descriptor fields are being set. 2282 */ 2283 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) 2284 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate; 2285 2286 /* 2287 * NB: we mark all packets as type PSPOLL so the h/w won't 2288 * set the sequence number, duration, etc. 2289 */ 2290 atype = HAL_PKT_TYPE_PSPOLL; 2291 2292 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 2293 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, 2294 sc->sc_hwmap[rix].ieeerate, -1); 2295 2296 if (ieee80211_radiotap_active_vap(vap)) { 2297 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 2298 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) 2299 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2300 if (m0->m_flags & M_FRAG) 2301 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 2302 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 2303 sc->sc_tx_th.wt_txpower = MIN(params->ibp_power, 2304 ieee80211_get_node_txpower(ni)); 2305 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 2306 2307 ieee80211_radiotap_tx(vap, m0); 2308 } 2309 2310 /* 2311 * Formulate first tx descriptor with tx controls. 2312 */ 2313 ds = bf->bf_desc; 2314 /* XXX check return value? */ 2315 2316 /* Store the decided rate index values away */ 2317 bf->bf_state.bfs_pktlen = pktlen; 2318 bf->bf_state.bfs_hdrlen = hdrlen; 2319 bf->bf_state.bfs_atype = atype; 2320 bf->bf_state.bfs_txpower = MIN(params->ibp_power, 2321 ieee80211_get_node_txpower(ni)); 2322 bf->bf_state.bfs_txrate0 = txrate; 2323 bf->bf_state.bfs_try0 = try0; 2324 bf->bf_state.bfs_keyix = keyix; 2325 bf->bf_state.bfs_txantenna = txantenna; 2326 bf->bf_state.bfs_txflags = flags; 2327 bf->bf_state.bfs_shpream = 2328 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE); 2329 2330 /* Set local packet state, used to queue packets to hardware */ 2331 bf->bf_state.bfs_tid = WME_AC_TO_TID(pri); 2332 bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum; 2333 bf->bf_state.bfs_pri = pri; 2334 2335 /* XXX this should be done in ath_tx_setrate() */ 2336 bf->bf_state.bfs_ctsrate = 0; 2337 bf->bf_state.bfs_ctsduration = 0; 2338 bf->bf_state.bfs_ismrr = ismrr; 2339 2340 /* Blank the legacy rate array */ 2341 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 2342 2343 bf->bf_state.bfs_rc[0].rix = rix; 2344 bf->bf_state.bfs_rc[0].tries = try0; 2345 bf->bf_state.bfs_rc[0].ratecode = txrate; 2346 2347 if (ismrr) { 2348 int rix; 2349 2350 rix = ath_tx_findrix(sc, params->ibp_rate1); 2351 bf->bf_state.bfs_rc[1].rix = rix; 2352 bf->bf_state.bfs_rc[1].tries = params->ibp_try1; 2353 2354 rix = ath_tx_findrix(sc, params->ibp_rate2); 2355 bf->bf_state.bfs_rc[2].rix = rix; 2356 bf->bf_state.bfs_rc[2].tries = params->ibp_try2; 2357 2358 rix = ath_tx_findrix(sc, params->ibp_rate3); 2359 bf->bf_state.bfs_rc[3].rix = rix; 2360 bf->bf_state.bfs_rc[3].tries = params->ibp_try3; 2361 } 2362 /* 2363 * All the required rate control decisions have been made; 2364 * fill in the rc flags. 2365 */ 2366 ath_tx_rate_fill_rcflags(sc, bf); 2367 2368 /* NB: no buffered multicast in power save support */ 2369 2370 /* 2371 * If we're overiding the ADDBA destination, dump directly 2372 * into the hardware queue, right after any pending 2373 * frames to that node are. 2374 */ 2375 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n", 2376 __func__, do_override); 2377 2378 #if 1 2379 /* 2380 * Put addba frames in the right place in the right TID/HWQ. 2381 */ 2382 if (do_override) { 2383 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2384 /* 2385 * XXX if it's addba frames, should we be leaking 2386 * them out via the frame leak method? 2387 * XXX for now let's not risk it; but we may wish 2388 * to investigate this later. 2389 */ 2390 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2391 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, 2392 &queue_to_head)) { 2393 /* Queue to software queue */ 2394 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf); 2395 } else { 2396 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2397 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2398 } 2399 #else 2400 /* Direct-dispatch to the hardware */ 2401 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2402 /* 2403 * Update the current leak count if 2404 * we're leaking frames; and set the 2405 * MORE flag as appropriate. 2406 */ 2407 ath_tx_leak_count_update(sc, tid, bf); 2408 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2409 #endif 2410 return 0; 2411 } 2412 2413 /* 2414 * Send a raw frame. 2415 * 2416 * This can be called by net80211. 2417 */ 2418 int 2419 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 2420 const struct ieee80211_bpf_params *params) 2421 { 2422 struct ieee80211com *ic = ni->ni_ic; 2423 struct ath_softc *sc = ic->ic_softc; 2424 struct ath_buf *bf; 2425 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 2426 int error = 0; 2427 2428 ATH_PCU_LOCK(sc); 2429 if (sc->sc_inreset_cnt > 0) { 2430 DPRINTF(sc, ATH_DEBUG_XMIT, 2431 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2432 error = EIO; 2433 ATH_PCU_UNLOCK(sc); 2434 goto badbad; 2435 } 2436 sc->sc_txstart_cnt++; 2437 ATH_PCU_UNLOCK(sc); 2438 2439 /* Wake the hardware up already */ 2440 ATH_LOCK(sc); 2441 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2442 ATH_UNLOCK(sc); 2443 2444 ATH_TX_LOCK(sc); 2445 2446 if (!sc->sc_running || sc->sc_invalid) { 2447 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, r/i: %d/%d", 2448 __func__, sc->sc_running, sc->sc_invalid); 2449 m_freem(m); 2450 error = ENETDOWN; 2451 goto bad; 2452 } 2453 2454 /* 2455 * Enforce how deep the multicast queue can grow. 2456 * 2457 * XXX duplicated in ath_tx_start(). 2458 */ 2459 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2460 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 2461 > sc->sc_txq_mcastq_maxdepth) { 2462 sc->sc_stats.ast_tx_mcastq_overflow++; 2463 error = ENOBUFS; 2464 } 2465 2466 if (error != 0) { 2467 m_freem(m); 2468 goto bad; 2469 } 2470 } 2471 2472 /* 2473 * Grab a TX buffer and associated resources. 2474 */ 2475 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); 2476 if (bf == NULL) { 2477 sc->sc_stats.ast_tx_nobuf++; 2478 m_freem(m); 2479 error = ENOBUFS; 2480 goto bad; 2481 } 2482 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n", 2483 m, params, bf); 2484 2485 if (params == NULL) { 2486 /* 2487 * Legacy path; interpret frame contents to decide 2488 * precisely how to send the frame. 2489 */ 2490 if (ath_tx_start(sc, ni, bf, m)) { 2491 error = EIO; /* XXX */ 2492 goto bad2; 2493 } 2494 } else { 2495 /* 2496 * Caller supplied explicit parameters to use in 2497 * sending the frame. 2498 */ 2499 if (ath_tx_raw_start(sc, ni, bf, m, params)) { 2500 error = EIO; /* XXX */ 2501 goto bad2; 2502 } 2503 } 2504 sc->sc_wd_timer = 5; 2505 sc->sc_stats.ast_tx_raw++; 2506 2507 /* 2508 * Update the TIM - if there's anything queued to the 2509 * software queue and power save is enabled, we should 2510 * set the TIM. 2511 */ 2512 ath_tx_update_tim(sc, ni, 1); 2513 2514 ATH_TX_UNLOCK(sc); 2515 2516 ATH_PCU_LOCK(sc); 2517 sc->sc_txstart_cnt--; 2518 ATH_PCU_UNLOCK(sc); 2519 2520 2521 /* Put the hardware back to sleep if required */ 2522 ATH_LOCK(sc); 2523 ath_power_restore_power_state(sc); 2524 ATH_UNLOCK(sc); 2525 2526 return 0; 2527 2528 bad2: 2529 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, " 2530 "bf=%p", 2531 m, 2532 params, 2533 bf); 2534 ATH_TXBUF_LOCK(sc); 2535 ath_returnbuf_head(sc, bf); 2536 ATH_TXBUF_UNLOCK(sc); 2537 2538 bad: 2539 ATH_TX_UNLOCK(sc); 2540 2541 ATH_PCU_LOCK(sc); 2542 sc->sc_txstart_cnt--; 2543 ATH_PCU_UNLOCK(sc); 2544 2545 /* Put the hardware back to sleep if required */ 2546 ATH_LOCK(sc); 2547 ath_power_restore_power_state(sc); 2548 ATH_UNLOCK(sc); 2549 2550 badbad: 2551 ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p", 2552 m, params); 2553 sc->sc_stats.ast_tx_raw_fail++; 2554 2555 return error; 2556 } 2557 2558 /* Some helper functions */ 2559 2560 /* 2561 * ADDBA (and potentially others) need to be placed in the same 2562 * hardware queue as the TID/node it's relating to. This is so 2563 * it goes out after any pending non-aggregate frames to the 2564 * same node/TID. 2565 * 2566 * If this isn't done, the ADDBA can go out before the frames 2567 * queued in hardware. Even though these frames have a sequence 2568 * number -earlier- than the ADDBA can be transmitted (but 2569 * no frames whose sequence numbers are after the ADDBA should 2570 * be!) they'll arrive after the ADDBA - and the receiving end 2571 * will simply drop them as being out of the BAW. 2572 * 2573 * The frames can't be appended to the TID software queue - it'll 2574 * never be sent out. So these frames have to be directly 2575 * dispatched to the hardware, rather than queued in software. 2576 * So if this function returns true, the TXQ has to be 2577 * overridden and it has to be directly dispatched. 2578 * 2579 * It's a dirty hack, but someone's gotta do it. 2580 */ 2581 2582 /* 2583 * XXX doesn't belong here! 2584 */ 2585 static int 2586 ieee80211_is_action(struct ieee80211_frame *wh) 2587 { 2588 /* Type: Management frame? */ 2589 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 2590 IEEE80211_FC0_TYPE_MGT) 2591 return 0; 2592 2593 /* Subtype: Action frame? */ 2594 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) != 2595 IEEE80211_FC0_SUBTYPE_ACTION) 2596 return 0; 2597 2598 return 1; 2599 } 2600 2601 #define MS(_v, _f) (((_v) & _f) >> _f##_S) 2602 /* 2603 * Return an alternate TID for ADDBA request frames. 2604 * 2605 * Yes, this likely should be done in the net80211 layer. 2606 */ 2607 static int 2608 ath_tx_action_frame_override_queue(struct ath_softc *sc, 2609 struct ieee80211_node *ni, 2610 struct mbuf *m0, int *tid) 2611 { 2612 struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *); 2613 struct ieee80211_action_ba_addbarequest *ia; 2614 uint8_t *frm; 2615 uint16_t baparamset; 2616 2617 /* Not action frame? Bail */ 2618 if (! ieee80211_is_action(wh)) 2619 return 0; 2620 2621 /* XXX Not needed for frames we send? */ 2622 #if 0 2623 /* Correct length? */ 2624 if (! ieee80211_parse_action(ni, m)) 2625 return 0; 2626 #endif 2627 2628 /* Extract out action frame */ 2629 frm = (u_int8_t *)&wh[1]; 2630 ia = (struct ieee80211_action_ba_addbarequest *) frm; 2631 2632 /* Not ADDBA? Bail */ 2633 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA) 2634 return 0; 2635 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST) 2636 return 0; 2637 2638 /* Extract TID, return it */ 2639 baparamset = le16toh(ia->rq_baparamset); 2640 *tid = (int) MS(baparamset, IEEE80211_BAPS_TID); 2641 2642 return 1; 2643 } 2644 #undef MS 2645 2646 /* Per-node software queue operations */ 2647 2648 /* 2649 * Add the current packet to the given BAW. 2650 * It is assumed that the current packet 2651 * 2652 * + fits inside the BAW; 2653 * + already has had a sequence number allocated. 2654 * 2655 * Since the BAW status may be modified by both the ath task and 2656 * the net80211/ifnet contexts, the TID must be locked. 2657 */ 2658 void 2659 ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an, 2660 struct ath_tid *tid, struct ath_buf *bf) 2661 { 2662 int index, cindex; 2663 struct ieee80211_tx_ampdu *tap; 2664 2665 ATH_TX_LOCK_ASSERT(sc); 2666 2667 if (bf->bf_state.bfs_isretried) 2668 return; 2669 2670 tap = ath_tx_get_tx_tid(an, tid->tid); 2671 2672 if (! bf->bf_state.bfs_dobaw) { 2673 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2674 "%s: dobaw=0, seqno=%d, window %d:%d\n", 2675 __func__, SEQNO(bf->bf_state.bfs_seqno), 2676 tap->txa_start, tap->txa_wnd); 2677 } 2678 2679 if (bf->bf_state.bfs_addedbaw) 2680 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2681 "%s: re-added? tid=%d, seqno %d; window %d:%d; " 2682 "baw head=%d tail=%d\n", 2683 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2684 tap->txa_start, tap->txa_wnd, tid->baw_head, 2685 tid->baw_tail); 2686 2687 /* 2688 * Verify that the given sequence number is not outside of the 2689 * BAW. Complain loudly if that's the case. 2690 */ 2691 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2692 SEQNO(bf->bf_state.bfs_seqno))) { 2693 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2694 "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; " 2695 "baw head=%d tail=%d\n", 2696 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2697 tap->txa_start, tap->txa_wnd, tid->baw_head, 2698 tid->baw_tail); 2699 } 2700 2701 /* 2702 * ni->ni_txseqs[] is the currently allocated seqno. 2703 * the txa state contains the current baw start. 2704 */ 2705 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno)); 2706 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2707 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2708 "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d " 2709 "baw head=%d tail=%d\n", 2710 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2711 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, 2712 tid->baw_tail); 2713 2714 2715 #if 0 2716 assert(tid->tx_buf[cindex] == NULL); 2717 #endif 2718 if (tid->tx_buf[cindex] != NULL) { 2719 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2720 "%s: ba packet dup (index=%d, cindex=%d, " 2721 "head=%d, tail=%d)\n", 2722 __func__, index, cindex, tid->baw_head, tid->baw_tail); 2723 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2724 "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n", 2725 __func__, 2726 tid->tx_buf[cindex], 2727 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno), 2728 bf, 2729 SEQNO(bf->bf_state.bfs_seqno) 2730 ); 2731 } 2732 tid->tx_buf[cindex] = bf; 2733 2734 if (index >= ((tid->baw_tail - tid->baw_head) & 2735 (ATH_TID_MAX_BUFS - 1))) { 2736 tid->baw_tail = cindex; 2737 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 2738 } 2739 } 2740 2741 /* 2742 * Flip the BAW buffer entry over from the existing one to the new one. 2743 * 2744 * When software retransmitting a (sub-)frame, it is entirely possible that 2745 * the frame ath_buf is marked as BUSY and can't be immediately reused. 2746 * In that instance the buffer is cloned and the new buffer is used for 2747 * retransmit. We thus need to update the ath_buf slot in the BAW buf 2748 * tracking array to maintain consistency. 2749 */ 2750 static void 2751 ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an, 2752 struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf) 2753 { 2754 int index, cindex; 2755 struct ieee80211_tx_ampdu *tap; 2756 int seqno = SEQNO(old_bf->bf_state.bfs_seqno); 2757 2758 ATH_TX_LOCK_ASSERT(sc); 2759 2760 tap = ath_tx_get_tx_tid(an, tid->tid); 2761 index = ATH_BA_INDEX(tap->txa_start, seqno); 2762 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2763 2764 /* 2765 * Just warn for now; if it happens then we should find out 2766 * about it. It's highly likely the aggregation session will 2767 * soon hang. 2768 */ 2769 if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) { 2770 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2771 "%s: retransmitted buffer" 2772 " has mismatching seqno's, BA session may hang.\n", 2773 __func__); 2774 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2775 "%s: old seqno=%d, new_seqno=%d\n", __func__, 2776 old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno); 2777 } 2778 2779 if (tid->tx_buf[cindex] != old_bf) { 2780 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2781 "%s: ath_buf pointer incorrect; " 2782 " has m BA session may hang.\n", __func__); 2783 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2784 "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf); 2785 } 2786 2787 tid->tx_buf[cindex] = new_bf; 2788 } 2789 2790 /* 2791 * seq_start - left edge of BAW 2792 * seq_next - current/next sequence number to allocate 2793 * 2794 * Since the BAW status may be modified by both the ath task and 2795 * the net80211/ifnet contexts, the TID must be locked. 2796 */ 2797 static void 2798 ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an, 2799 struct ath_tid *tid, const struct ath_buf *bf) 2800 { 2801 int index, cindex; 2802 struct ieee80211_tx_ampdu *tap; 2803 int seqno = SEQNO(bf->bf_state.bfs_seqno); 2804 2805 ATH_TX_LOCK_ASSERT(sc); 2806 2807 tap = ath_tx_get_tx_tid(an, tid->tid); 2808 index = ATH_BA_INDEX(tap->txa_start, seqno); 2809 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2810 2811 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2812 "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, " 2813 "baw head=%d, tail=%d\n", 2814 __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index, 2815 cindex, tid->baw_head, tid->baw_tail); 2816 2817 /* 2818 * If this occurs then we have a big problem - something else 2819 * has slid tap->txa_start along without updating the BAW 2820 * tracking start/end pointers. Thus the TX BAW state is now 2821 * completely busted. 2822 * 2823 * But for now, since I haven't yet fixed TDMA and buffer cloning, 2824 * it's quite possible that a cloned buffer is making its way 2825 * here and causing it to fire off. Disable TDMA for now. 2826 */ 2827 if (tid->tx_buf[cindex] != bf) { 2828 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2829 "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n", 2830 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 2831 tid->tx_buf[cindex], 2832 (tid->tx_buf[cindex] != NULL) ? 2833 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1); 2834 } 2835 2836 tid->tx_buf[cindex] = NULL; 2837 2838 while (tid->baw_head != tid->baw_tail && 2839 !tid->tx_buf[tid->baw_head]) { 2840 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 2841 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 2842 } 2843 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2844 "%s: tid=%d: baw is now %d:%d, baw head=%d\n", 2845 __func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head); 2846 } 2847 2848 static void 2849 ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid, 2850 struct ath_buf *bf) 2851 { 2852 struct ieee80211_frame *wh; 2853 2854 ATH_TX_LOCK_ASSERT(sc); 2855 2856 if (tid->an->an_leak_count > 0) { 2857 wh = mtod(bf->bf_m, struct ieee80211_frame *); 2858 2859 /* 2860 * Update MORE based on the software/net80211 queue states. 2861 */ 2862 if ((tid->an->an_stack_psq > 0) 2863 || (tid->an->an_swq_depth > 0)) 2864 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 2865 else 2866 wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA; 2867 2868 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 2869 "%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n", 2870 __func__, 2871 tid->an->an_node.ni_macaddr, 2872 ":", 2873 tid->an->an_leak_count, 2874 tid->an->an_stack_psq, 2875 tid->an->an_swq_depth, 2876 !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA)); 2877 2878 /* 2879 * Re-sync the underlying buffer. 2880 */ 2881 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 2882 BUS_DMASYNC_PREWRITE); 2883 2884 tid->an->an_leak_count --; 2885 } 2886 } 2887 2888 static int 2889 ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid) 2890 { 2891 2892 ATH_TX_LOCK_ASSERT(sc); 2893 2894 if (tid->an->an_leak_count > 0) { 2895 return (1); 2896 } 2897 if (tid->paused) 2898 return (0); 2899 return (1); 2900 } 2901 2902 /* 2903 * Mark the current node/TID as ready to TX. 2904 * 2905 * This is done to make it easy for the software scheduler to 2906 * find which nodes have data to send. 2907 * 2908 * The TXQ lock must be held. 2909 */ 2910 void 2911 ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid) 2912 { 2913 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2914 2915 ATH_TX_LOCK_ASSERT(sc); 2916 2917 /* 2918 * If we are leaking out a frame to this destination 2919 * for PS-POLL, ensure that we allow scheduling to 2920 * occur. 2921 */ 2922 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 2923 return; /* paused, can't schedule yet */ 2924 2925 if (tid->sched) 2926 return; /* already scheduled */ 2927 2928 tid->sched = 1; 2929 2930 #if 0 2931 /* 2932 * If this is a sleeping node we're leaking to, given 2933 * it a higher priority. This is so bad for QoS it hurts. 2934 */ 2935 if (tid->an->an_leak_count) { 2936 TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem); 2937 } else { 2938 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2939 } 2940 #endif 2941 2942 /* 2943 * We can't do the above - it'll confuse the TXQ software 2944 * scheduler which will keep checking the _head_ TID 2945 * in the list to see if it has traffic. If we queue 2946 * a TID to the head of the list and it doesn't transmit, 2947 * we'll check it again. 2948 * 2949 * So, get the rest of this leaking frames support working 2950 * and reliable first and _then_ optimise it so they're 2951 * pushed out in front of any other pending software 2952 * queued nodes. 2953 */ 2954 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2955 } 2956 2957 /* 2958 * Mark the current node as no longer needing to be polled for 2959 * TX packets. 2960 * 2961 * The TXQ lock must be held. 2962 */ 2963 static void 2964 ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid) 2965 { 2966 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2967 2968 ATH_TX_LOCK_ASSERT(sc); 2969 2970 if (tid->sched == 0) 2971 return; 2972 2973 tid->sched = 0; 2974 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem); 2975 } 2976 2977 /* 2978 * Assign a sequence number manually to the given frame. 2979 * 2980 * This should only be called for A-MPDU TX frames. 2981 * 2982 * Note: for group addressed frames, the sequence number 2983 * should be from NONQOS_TID, and net80211 should have 2984 * already assigned it for us. 2985 */ 2986 static ieee80211_seq 2987 ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, 2988 struct ath_buf *bf, struct mbuf *m0) 2989 { 2990 struct ieee80211_frame *wh; 2991 int tid; 2992 ieee80211_seq seqno; 2993 uint8_t subtype; 2994 2995 wh = mtod(m0, struct ieee80211_frame *); 2996 tid = ieee80211_gettid(wh); 2997 2998 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, qos has seq=%d\n", 2999 __func__, tid, IEEE80211_QOS_HAS_SEQ(wh)); 3000 3001 /* XXX Is it a control frame? Ignore */ 3002 3003 /* Does the packet require a sequence number? */ 3004 if (! IEEE80211_QOS_HAS_SEQ(wh)) 3005 return -1; 3006 3007 ATH_TX_LOCK_ASSERT(sc); 3008 3009 /* 3010 * Is it a QOS NULL Data frame? Give it a sequence number from 3011 * the default TID (IEEE80211_NONQOS_TID.) 3012 * 3013 * The RX path of everything I've looked at doesn't include the NULL 3014 * data frame sequence number in the aggregation state updates, so 3015 * assigning it a sequence number there will cause a BAW hole on the 3016 * RX side. 3017 */ 3018 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3019 if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) { 3020 /* XXX no locking for this TID? This is a bit of a problem. */ 3021 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; 3022 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); 3023 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3024 /* 3025 * group addressed frames get a sequence number from 3026 * a different sequence number space. 3027 */ 3028 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; 3029 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); 3030 } else { 3031 /* Manually assign sequence number */ 3032 seqno = ni->ni_txseqs[tid]; 3033 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE); 3034 } 3035 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 3036 M_SEQNO_SET(m0, seqno); 3037 3038 /* Return so caller can do something with it if needed */ 3039 DPRINTF(sc, ATH_DEBUG_SW_TX, 3040 "%s: -> subtype=0x%x, tid=%d, seqno=%d\n", 3041 __func__, subtype, tid, seqno); 3042 return seqno; 3043 } 3044 3045 /* 3046 * Attempt to direct dispatch an aggregate frame to hardware. 3047 * If the frame is out of BAW, queue. 3048 * Otherwise, schedule it as a single frame. 3049 */ 3050 static void 3051 ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an, 3052 struct ath_txq *txq, struct ath_buf *bf) 3053 { 3054 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 3055 struct ieee80211_tx_ampdu *tap; 3056 3057 ATH_TX_LOCK_ASSERT(sc); 3058 3059 tap = ath_tx_get_tx_tid(an, tid->tid); 3060 3061 /* paused? queue */ 3062 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { 3063 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3064 /* XXX don't sched - we're paused! */ 3065 return; 3066 } 3067 3068 /* outside baw? queue */ 3069 if (bf->bf_state.bfs_dobaw && 3070 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 3071 SEQNO(bf->bf_state.bfs_seqno)))) { 3072 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3073 ath_tx_tid_sched(sc, tid); 3074 return; 3075 } 3076 3077 /* 3078 * This is a temporary check and should be removed once 3079 * all the relevant code paths have been fixed. 3080 * 3081 * During aggregate retries, it's possible that the head 3082 * frame will fail (which has the bfs_aggr and bfs_nframes 3083 * fields set for said aggregate) and will be retried as 3084 * a single frame. In this instance, the values should 3085 * be reset or the completion code will get upset with you. 3086 */ 3087 if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) { 3088 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 3089 "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__, 3090 bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes); 3091 bf->bf_state.bfs_aggr = 0; 3092 bf->bf_state.bfs_nframes = 1; 3093 } 3094 3095 /* Update CLRDMASK just before this frame is queued */ 3096 ath_tx_update_clrdmask(sc, tid, bf); 3097 3098 /* Direct dispatch to hardware */ 3099 ath_tx_do_ratelookup(sc, bf, tid->tid, false); 3100 ath_tx_calc_duration(sc, bf); 3101 ath_tx_calc_protection(sc, bf); 3102 ath_tx_set_rtscts(sc, bf); 3103 ath_tx_rate_fill_rcflags(sc, bf); 3104 ath_tx_setds(sc, bf); 3105 3106 /* Statistics */ 3107 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++; 3108 3109 /* Track per-TID hardware queue depth correctly */ 3110 tid->hwq_depth++; 3111 3112 /* Add to BAW */ 3113 if (bf->bf_state.bfs_dobaw) { 3114 ath_tx_addto_baw(sc, an, tid, bf); 3115 bf->bf_state.bfs_addedbaw = 1; 3116 } 3117 3118 /* Set completion handler, multi-frame aggregate or not */ 3119 bf->bf_comp = ath_tx_aggr_comp; 3120 3121 /* 3122 * Update the current leak count if 3123 * we're leaking frames; and set the 3124 * MORE flag as appropriate. 3125 */ 3126 ath_tx_leak_count_update(sc, tid, bf); 3127 3128 /* Hand off to hardware */ 3129 ath_tx_handoff(sc, txq, bf); 3130 } 3131 3132 /* 3133 * Attempt to send the packet. 3134 * If the queue isn't busy, direct-dispatch. 3135 * If the queue is busy enough, queue the given packet on the 3136 * relevant software queue. 3137 */ 3138 void 3139 ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, 3140 struct ath_txq *txq, int queue_to_head, struct ath_buf *bf) 3141 { 3142 struct ath_node *an = ATH_NODE(ni); 3143 struct ieee80211_frame *wh; 3144 struct ath_tid *atid; 3145 int pri, tid; 3146 struct mbuf *m0 = bf->bf_m; 3147 3148 ATH_TX_LOCK_ASSERT(sc); 3149 3150 /* Fetch the TID - non-QoS frames get assigned to TID 16 */ 3151 wh = mtod(m0, struct ieee80211_frame *); 3152 pri = ath_tx_getac(sc, m0); 3153 tid = ath_tx_gettid(sc, m0); 3154 atid = &an->an_tid[tid]; 3155 3156 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n", 3157 __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 3158 3159 /* Set local packet state, used to queue packets to hardware */ 3160 /* XXX potentially duplicate info, re-check */ 3161 bf->bf_state.bfs_tid = tid; 3162 bf->bf_state.bfs_tx_queue = txq->axq_qnum; 3163 bf->bf_state.bfs_pri = pri; 3164 3165 /* 3166 * If the hardware queue isn't busy, queue it directly. 3167 * If the hardware queue is busy, queue it. 3168 * If the TID is paused or the traffic it outside BAW, software 3169 * queue it. 3170 * 3171 * If the node is in power-save and we're leaking a frame, 3172 * leak a single frame. 3173 */ 3174 if (! ath_tx_tid_can_tx_or_sched(sc, atid)) { 3175 /* TID is paused, queue */ 3176 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__); 3177 /* 3178 * If the caller requested that it be sent at a high 3179 * priority, queue it at the head of the list. 3180 */ 3181 if (queue_to_head) 3182 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 3183 else 3184 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3185 } else if (ath_tx_ampdu_pending(sc, an, tid)) { 3186 /* AMPDU pending; queue */ 3187 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__); 3188 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3189 /* XXX sched? */ 3190 } else if (ath_tx_ampdu_running(sc, an, tid)) { 3191 /* 3192 * AMPDU running, queue single-frame if the hardware queue 3193 * isn't busy. 3194 * 3195 * If the hardware queue is busy, sending an aggregate frame 3196 * then just hold off so we can queue more aggregate frames. 3197 * 3198 * Otherwise we may end up with single frames leaking through 3199 * because we are dispatching them too quickly. 3200 * 3201 * TODO: maybe we should treat this as two policies - minimise 3202 * latency, or maximise throughput. Then for BE/BK we can 3203 * maximise throughput, and VO/VI (if AMPDU is enabled!) 3204 * minimise latency. 3205 */ 3206 3207 /* 3208 * Always queue the frame to the tail of the list. 3209 */ 3210 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3211 3212 /* 3213 * If the hardware queue isn't busy, direct dispatch 3214 * the head frame in the list. 3215 * 3216 * Note: if we're say, configured to do ADDBA but not A-MPDU 3217 * then maybe we want to still queue two non-aggregate frames 3218 * to the hardware. Again with the per-TID policy 3219 * configuration..) 3220 * 3221 * Otherwise, schedule the TID. 3222 */ 3223 /* XXX TXQ locking */ 3224 if (txq->axq_depth + txq->fifo.axq_depth == 0) { 3225 3226 bf = ATH_TID_FIRST(atid); 3227 ATH_TID_REMOVE(atid, bf, bf_list); 3228 3229 /* 3230 * Ensure it's definitely treated as a non-AMPDU 3231 * frame - this information may have been left 3232 * over from a previous attempt. 3233 */ 3234 bf->bf_state.bfs_aggr = 0; 3235 bf->bf_state.bfs_nframes = 1; 3236 3237 /* Queue to the hardware */ 3238 ath_tx_xmit_aggr(sc, an, txq, bf); 3239 DPRINTF(sc, ATH_DEBUG_SW_TX, 3240 "%s: xmit_aggr\n", 3241 __func__); 3242 } else { 3243 DPRINTF(sc, ATH_DEBUG_SW_TX, 3244 "%s: ampdu; swq'ing\n", 3245 __func__); 3246 3247 ath_tx_tid_sched(sc, atid); 3248 } 3249 /* 3250 * If we're not doing A-MPDU, be prepared to direct dispatch 3251 * up to both limits if possible. This particular corner 3252 * case may end up with packet starvation between aggregate 3253 * traffic and non-aggregate traffic: we want to ensure 3254 * that non-aggregate stations get a few frames queued to the 3255 * hardware before the aggregate station(s) get their chance. 3256 * 3257 * So if you only ever see a couple of frames direct dispatched 3258 * to the hardware from a non-AMPDU client, check both here 3259 * and in the software queue dispatcher to ensure that those 3260 * non-AMPDU stations get a fair chance to transmit. 3261 */ 3262 /* XXX TXQ locking */ 3263 } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) && 3264 (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) { 3265 /* AMPDU not running, attempt direct dispatch */ 3266 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__); 3267 /* See if clrdmask needs to be set */ 3268 ath_tx_update_clrdmask(sc, atid, bf); 3269 3270 /* 3271 * Update the current leak count if 3272 * we're leaking frames; and set the 3273 * MORE flag as appropriate. 3274 */ 3275 ath_tx_leak_count_update(sc, atid, bf); 3276 3277 /* 3278 * Dispatch the frame. 3279 */ 3280 ath_tx_xmit_normal(sc, txq, bf); 3281 } else { 3282 /* Busy; queue */ 3283 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__); 3284 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3285 ath_tx_tid_sched(sc, atid); 3286 } 3287 } 3288 3289 /* 3290 * Only set the clrdmask bit if none of the nodes are currently 3291 * filtered. 3292 * 3293 * XXX TODO: go through all the callers and check to see 3294 * which are being called in the context of looping over all 3295 * TIDs (eg, if all tids are being paused, resumed, etc.) 3296 * That'll avoid O(n^2) complexity here. 3297 */ 3298 static void 3299 ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an) 3300 { 3301 int i; 3302 3303 ATH_TX_LOCK_ASSERT(sc); 3304 3305 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 3306 if (an->an_tid[i].isfiltered == 1) 3307 return; 3308 } 3309 an->clrdmask = 1; 3310 } 3311 3312 /* 3313 * Configure the per-TID node state. 3314 * 3315 * This likely belongs in if_ath_node.c but I can't think of anywhere 3316 * else to put it just yet. 3317 * 3318 * This sets up the SLISTs and the mutex as appropriate. 3319 */ 3320 void 3321 ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an) 3322 { 3323 int i, j; 3324 struct ath_tid *atid; 3325 3326 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 3327 atid = &an->an_tid[i]; 3328 3329 /* XXX now with this bzer(), is the field 0'ing needed? */ 3330 bzero(atid, sizeof(*atid)); 3331 3332 TAILQ_INIT(&atid->tid_q); 3333 TAILQ_INIT(&atid->filtq.tid_q); 3334 atid->tid = i; 3335 atid->an = an; 3336 for (j = 0; j < ATH_TID_MAX_BUFS; j++) 3337 atid->tx_buf[j] = NULL; 3338 atid->baw_head = atid->baw_tail = 0; 3339 atid->paused = 0; 3340 atid->sched = 0; 3341 atid->hwq_depth = 0; 3342 atid->cleanup_inprogress = 0; 3343 if (i == IEEE80211_NONQOS_TID) 3344 atid->ac = ATH_NONQOS_TID_AC; 3345 else 3346 atid->ac = TID_TO_WME_AC(i); 3347 } 3348 an->clrdmask = 1; /* Always start by setting this bit */ 3349 } 3350 3351 /* 3352 * Pause the current TID. This stops packets from being transmitted 3353 * on it. 3354 * 3355 * Since this is also called from upper layers as well as the driver, 3356 * it will get the TID lock. 3357 */ 3358 static void 3359 ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid) 3360 { 3361 3362 ATH_TX_LOCK_ASSERT(sc); 3363 tid->paused++; 3364 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%6D]: tid=%d, paused = %d\n", 3365 __func__, 3366 tid->an->an_node.ni_macaddr, ":", 3367 tid->tid, 3368 tid->paused); 3369 } 3370 3371 /* 3372 * Unpause the current TID, and schedule it if needed. 3373 */ 3374 static void 3375 ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid) 3376 { 3377 ATH_TX_LOCK_ASSERT(sc); 3378 3379 /* 3380 * There's some odd places where ath_tx_tid_resume() is called 3381 * when it shouldn't be; this works around that particular issue 3382 * until it's actually resolved. 3383 */ 3384 if (tid->paused == 0) { 3385 device_printf(sc->sc_dev, 3386 "%s: [%6D]: tid=%d, paused=0?\n", 3387 __func__, 3388 tid->an->an_node.ni_macaddr, ":", 3389 tid->tid); 3390 } else { 3391 tid->paused--; 3392 } 3393 3394 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3395 "%s: [%6D]: tid=%d, unpaused = %d\n", 3396 __func__, 3397 tid->an->an_node.ni_macaddr, ":", 3398 tid->tid, 3399 tid->paused); 3400 3401 if (tid->paused) 3402 return; 3403 3404 /* 3405 * Override the clrdmask configuration for the next frame 3406 * from this TID, just to get the ball rolling. 3407 */ 3408 ath_tx_set_clrdmask(sc, tid->an); 3409 3410 if (tid->axq_depth == 0) 3411 return; 3412 3413 /* XXX isfiltered shouldn't ever be 0 at this point */ 3414 if (tid->isfiltered == 1) { 3415 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n", 3416 __func__); 3417 return; 3418 } 3419 3420 ath_tx_tid_sched(sc, tid); 3421 3422 /* 3423 * Queue the software TX scheduler. 3424 */ 3425 ath_tx_swq_kick(sc); 3426 } 3427 3428 /* 3429 * Add the given ath_buf to the TID filtered frame list. 3430 * This requires the TID be filtered. 3431 */ 3432 static void 3433 ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid, 3434 struct ath_buf *bf) 3435 { 3436 3437 ATH_TX_LOCK_ASSERT(sc); 3438 3439 if (!tid->isfiltered) 3440 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n", 3441 __func__); 3442 3443 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf); 3444 3445 /* Set the retry bit and bump the retry counter */ 3446 ath_tx_set_retry(sc, bf); 3447 sc->sc_stats.ast_tx_swfiltered++; 3448 3449 ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list); 3450 } 3451 3452 /* 3453 * Handle a completed filtered frame from the given TID. 3454 * This just enables/pauses the filtered frame state if required 3455 * and appends the filtered frame to the filtered queue. 3456 */ 3457 static void 3458 ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid, 3459 struct ath_buf *bf) 3460 { 3461 3462 ATH_TX_LOCK_ASSERT(sc); 3463 3464 if (! tid->isfiltered) { 3465 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d; filter transition\n", 3466 __func__, tid->tid); 3467 tid->isfiltered = 1; 3468 ath_tx_tid_pause(sc, tid); 3469 } 3470 3471 /* Add the frame to the filter queue */ 3472 ath_tx_tid_filt_addbuf(sc, tid, bf); 3473 } 3474 3475 /* 3476 * Complete the filtered frame TX completion. 3477 * 3478 * If there are no more frames in the hardware queue, unpause/unfilter 3479 * the TID if applicable. Otherwise we will wait for a node PS transition 3480 * to unfilter. 3481 */ 3482 static void 3483 ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid) 3484 { 3485 struct ath_buf *bf; 3486 int do_resume = 0; 3487 3488 ATH_TX_LOCK_ASSERT(sc); 3489 3490 if (tid->hwq_depth != 0) 3491 return; 3492 3493 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, hwq=0, transition back\n", 3494 __func__, tid->tid); 3495 if (tid->isfiltered == 1) { 3496 tid->isfiltered = 0; 3497 do_resume = 1; 3498 } 3499 3500 /* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */ 3501 ath_tx_set_clrdmask(sc, tid->an); 3502 3503 /* XXX this is really quite inefficient */ 3504 while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) { 3505 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3506 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3507 } 3508 3509 /* And only resume if we had paused before */ 3510 if (do_resume) 3511 ath_tx_tid_resume(sc, tid); 3512 } 3513 3514 /* 3515 * Called when a single (aggregate or otherwise) frame is completed. 3516 * 3517 * Returns 0 if the buffer could be added to the filtered list 3518 * (cloned or otherwise), 1 if the buffer couldn't be added to the 3519 * filtered list (failed clone; expired retry) and the caller should 3520 * free it and handle it like a failure (eg by sending a BAR.) 3521 * 3522 * since the buffer may be cloned, bf must be not touched after this 3523 * if the return value is 0. 3524 */ 3525 static int 3526 ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid, 3527 struct ath_buf *bf) 3528 { 3529 struct ath_buf *nbf; 3530 int retval; 3531 3532 ATH_TX_LOCK_ASSERT(sc); 3533 3534 /* 3535 * Don't allow a filtered frame to live forever. 3536 */ 3537 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3538 sc->sc_stats.ast_tx_swretrymax++; 3539 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3540 "%s: bf=%p, seqno=%d, exceeded retries\n", 3541 __func__, 3542 bf, 3543 SEQNO(bf->bf_state.bfs_seqno)); 3544 retval = 1; /* error */ 3545 goto finish; 3546 } 3547 3548 /* 3549 * A busy buffer can't be added to the retry list. 3550 * It needs to be cloned. 3551 */ 3552 if (bf->bf_flags & ATH_BUF_BUSY) { 3553 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3554 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3555 "%s: busy buffer clone: %p -> %p\n", 3556 __func__, bf, nbf); 3557 } else { 3558 nbf = bf; 3559 } 3560 3561 if (nbf == NULL) { 3562 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3563 "%s: busy buffer couldn't be cloned (%p)!\n", 3564 __func__, bf); 3565 retval = 1; /* error */ 3566 } else { 3567 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3568 retval = 0; /* ok */ 3569 } 3570 finish: 3571 ath_tx_tid_filt_comp_complete(sc, tid); 3572 3573 return (retval); 3574 } 3575 3576 static void 3577 ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid, 3578 struct ath_buf *bf_first, ath_bufhead *bf_q) 3579 { 3580 struct ath_buf *bf, *bf_next, *nbf; 3581 3582 ATH_TX_LOCK_ASSERT(sc); 3583 3584 bf = bf_first; 3585 while (bf) { 3586 bf_next = bf->bf_next; 3587 bf->bf_next = NULL; /* Remove it from the aggr list */ 3588 3589 /* 3590 * Don't allow a filtered frame to live forever. 3591 */ 3592 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3593 sc->sc_stats.ast_tx_swretrymax++; 3594 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3595 "%s: tid=%d, bf=%p, seqno=%d, exceeded retries\n", 3596 __func__, 3597 tid->tid, 3598 bf, 3599 SEQNO(bf->bf_state.bfs_seqno)); 3600 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3601 goto next; 3602 } 3603 3604 if (bf->bf_flags & ATH_BUF_BUSY) { 3605 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3606 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3607 "%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n", 3608 __func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno)); 3609 } else { 3610 nbf = bf; 3611 } 3612 3613 /* 3614 * If the buffer couldn't be cloned, add it to bf_q; 3615 * the caller will free the buffer(s) as required. 3616 */ 3617 if (nbf == NULL) { 3618 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3619 "%s: tid=%d, buffer couldn't be cloned! (%p) seqno=%d\n", 3620 __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno)); 3621 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3622 } else { 3623 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3624 } 3625 next: 3626 bf = bf_next; 3627 } 3628 3629 ath_tx_tid_filt_comp_complete(sc, tid); 3630 } 3631 3632 /* 3633 * Suspend the queue because we need to TX a BAR. 3634 */ 3635 static void 3636 ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid) 3637 { 3638 3639 ATH_TX_LOCK_ASSERT(sc); 3640 3641 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3642 "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n", 3643 __func__, 3644 tid->tid, 3645 tid->bar_wait, 3646 tid->bar_tx); 3647 3648 /* We shouldn't be called when bar_tx is 1 */ 3649 if (tid->bar_tx) { 3650 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3651 "%s: bar_tx is 1?!\n", __func__); 3652 } 3653 3654 /* If we've already been called, just be patient. */ 3655 if (tid->bar_wait) 3656 return; 3657 3658 /* Wait! */ 3659 tid->bar_wait = 1; 3660 3661 /* Only one pause, no matter how many frames fail */ 3662 ath_tx_tid_pause(sc, tid); 3663 } 3664 3665 /* 3666 * We've finished with BAR handling - either we succeeded or 3667 * failed. Either way, unsuspend TX. 3668 */ 3669 static void 3670 ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid) 3671 { 3672 3673 ATH_TX_LOCK_ASSERT(sc); 3674 3675 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3676 "%s: %6D: TID=%d, called\n", 3677 __func__, 3678 tid->an->an_node.ni_macaddr, 3679 ":", 3680 tid->tid); 3681 3682 if (tid->bar_tx == 0 || tid->bar_wait == 0) { 3683 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3684 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3685 __func__, tid->an->an_node.ni_macaddr, ":", 3686 tid->tid, tid->bar_tx, tid->bar_wait); 3687 } 3688 3689 tid->bar_tx = tid->bar_wait = 0; 3690 ath_tx_tid_resume(sc, tid); 3691 } 3692 3693 /* 3694 * Return whether we're ready to TX a BAR frame. 3695 * 3696 * Requires the TID lock be held. 3697 */ 3698 static int 3699 ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid) 3700 { 3701 3702 ATH_TX_LOCK_ASSERT(sc); 3703 3704 if (tid->bar_wait == 0 || tid->hwq_depth > 0) 3705 return (0); 3706 3707 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3708 "%s: %6D: TID=%d, bar ready\n", 3709 __func__, 3710 tid->an->an_node.ni_macaddr, 3711 ":", 3712 tid->tid); 3713 3714 return (1); 3715 } 3716 3717 /* 3718 * Check whether the current TID is ready to have a BAR 3719 * TXed and if so, do the TX. 3720 * 3721 * Since the TID/TXQ lock can't be held during a call to 3722 * ieee80211_send_bar(), we have to do the dirty thing of unlocking it, 3723 * sending the BAR and locking it again. 3724 * 3725 * Eventually, the code to send the BAR should be broken out 3726 * from this routine so the lock doesn't have to be reacquired 3727 * just to be immediately dropped by the caller. 3728 */ 3729 static void 3730 ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid) 3731 { 3732 struct ieee80211_tx_ampdu *tap; 3733 3734 ATH_TX_LOCK_ASSERT(sc); 3735 3736 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3737 "%s: %6D: TID=%d, called\n", 3738 __func__, 3739 tid->an->an_node.ni_macaddr, 3740 ":", 3741 tid->tid); 3742 3743 tap = ath_tx_get_tx_tid(tid->an, tid->tid); 3744 3745 /* 3746 * This is an error condition! 3747 */ 3748 if (tid->bar_wait == 0 || tid->bar_tx == 1) { 3749 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3750 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3751 __func__, tid->an->an_node.ni_macaddr, ":", 3752 tid->tid, tid->bar_tx, tid->bar_wait); 3753 return; 3754 } 3755 3756 /* Don't do anything if we still have pending frames */ 3757 if (tid->hwq_depth > 0) { 3758 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3759 "%s: %6D: TID=%d, hwq_depth=%d, waiting\n", 3760 __func__, 3761 tid->an->an_node.ni_macaddr, 3762 ":", 3763 tid->tid, 3764 tid->hwq_depth); 3765 return; 3766 } 3767 3768 /* We're now about to TX */ 3769 tid->bar_tx = 1; 3770 3771 /* 3772 * Override the clrdmask configuration for the next frame, 3773 * just to get the ball rolling. 3774 */ 3775 ath_tx_set_clrdmask(sc, tid->an); 3776 3777 /* 3778 * Calculate new BAW left edge, now that all frames have either 3779 * succeeded or failed. 3780 * 3781 * XXX verify this is _actually_ the valid value to begin at! 3782 */ 3783 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3784 "%s: %6D: TID=%d, new BAW left edge=%d\n", 3785 __func__, 3786 tid->an->an_node.ni_macaddr, 3787 ":", 3788 tid->tid, 3789 tap->txa_start); 3790 3791 /* Try sending the BAR frame */ 3792 /* We can't hold the lock here! */ 3793 3794 ATH_TX_UNLOCK(sc); 3795 if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) { 3796 /* Success? Now we wait for notification that it's done */ 3797 ATH_TX_LOCK(sc); 3798 return; 3799 } 3800 3801 /* Failure? For now, warn loudly and continue */ 3802 ATH_TX_LOCK(sc); 3803 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3804 "%s: %6D: TID=%d, failed to TX BAR, continue!\n", 3805 __func__, tid->an->an_node.ni_macaddr, ":", 3806 tid->tid); 3807 ath_tx_tid_bar_unsuspend(sc, tid); 3808 } 3809 3810 static void 3811 ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an, 3812 struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf) 3813 { 3814 3815 ATH_TX_LOCK_ASSERT(sc); 3816 3817 /* 3818 * If the current TID is running AMPDU, update 3819 * the BAW. 3820 */ 3821 if (ath_tx_ampdu_running(sc, an, tid->tid) && 3822 bf->bf_state.bfs_dobaw) { 3823 /* 3824 * Only remove the frame from the BAW if it's 3825 * been transmitted at least once; this means 3826 * the frame was in the BAW to begin with. 3827 */ 3828 if (bf->bf_state.bfs_retries > 0) { 3829 ath_tx_update_baw(sc, an, tid, bf); 3830 bf->bf_state.bfs_dobaw = 0; 3831 } 3832 #if 0 3833 /* 3834 * This has become a non-fatal error now 3835 */ 3836 if (! bf->bf_state.bfs_addedbaw) 3837 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW 3838 "%s: wasn't added: seqno %d\n", 3839 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3840 #endif 3841 } 3842 3843 /* Strip it out of an aggregate list if it was in one */ 3844 bf->bf_next = NULL; 3845 3846 /* Insert on the free queue to be freed by the caller */ 3847 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 3848 } 3849 3850 static void 3851 ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an, 3852 const char *pfx, struct ath_tid *tid, struct ath_buf *bf) 3853 { 3854 struct ieee80211_node *ni = &an->an_node; 3855 struct ath_txq *txq; 3856 struct ieee80211_tx_ampdu *tap; 3857 3858 txq = sc->sc_ac2q[tid->ac]; 3859 tap = ath_tx_get_tx_tid(an, tid->tid); 3860 3861 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3862 "%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, " 3863 "seqno=%d, retry=%d\n", 3864 __func__, 3865 pfx, 3866 ni->ni_macaddr, 3867 ":", 3868 bf, 3869 bf->bf_state.bfs_addedbaw, 3870 bf->bf_state.bfs_dobaw, 3871 SEQNO(bf->bf_state.bfs_seqno), 3872 bf->bf_state.bfs_retries); 3873 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3874 "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n", 3875 __func__, 3876 pfx, 3877 ni->ni_macaddr, 3878 ":", 3879 bf, 3880 txq->axq_qnum, 3881 txq->axq_depth, 3882 txq->axq_aggr_depth); 3883 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3884 "%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, " 3885 "isfiltered=%d\n", 3886 __func__, 3887 pfx, 3888 ni->ni_macaddr, 3889 ":", 3890 bf, 3891 tid->axq_depth, 3892 tid->hwq_depth, 3893 tid->bar_wait, 3894 tid->isfiltered); 3895 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3896 "%s: %s: %6D: tid %d: " 3897 "sched=%d, paused=%d, " 3898 "incomp=%d, baw_head=%d, " 3899 "baw_tail=%d txa_start=%d, ni_txseqs=%d\n", 3900 __func__, 3901 pfx, 3902 ni->ni_macaddr, 3903 ":", 3904 tid->tid, 3905 tid->sched, tid->paused, 3906 tid->incomp, tid->baw_head, 3907 tid->baw_tail, tap == NULL ? -1 : tap->txa_start, 3908 ni->ni_txseqs[tid->tid]); 3909 3910 /* XXX Dump the frame, see what it is? */ 3911 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 3912 ieee80211_dump_pkt(ni->ni_ic, 3913 mtod(bf->bf_m, const uint8_t *), 3914 bf->bf_m->m_len, 0, -1); 3915 } 3916 3917 /* 3918 * Free any packets currently pending in the software TX queue. 3919 * 3920 * This will be called when a node is being deleted. 3921 * 3922 * It can also be called on an active node during an interface 3923 * reset or state transition. 3924 * 3925 * (From Linux/reference): 3926 * 3927 * TODO: For frame(s) that are in the retry state, we will reuse the 3928 * sequence number(s) without setting the retry bit. The 3929 * alternative is to give up on these and BAR the receiver's window 3930 * forward. 3931 */ 3932 static void 3933 ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an, 3934 struct ath_tid *tid, ath_bufhead *bf_cq) 3935 { 3936 struct ath_buf *bf; 3937 struct ieee80211_tx_ampdu *tap; 3938 struct ieee80211_node *ni = &an->an_node; 3939 int t; 3940 3941 tap = ath_tx_get_tx_tid(an, tid->tid); 3942 3943 ATH_TX_LOCK_ASSERT(sc); 3944 3945 /* Walk the queue, free frames */ 3946 t = 0; 3947 for (;;) { 3948 bf = ATH_TID_FIRST(tid); 3949 if (bf == NULL) { 3950 break; 3951 } 3952 3953 if (t == 0) { 3954 ath_tx_tid_drain_print(sc, an, "norm", tid, bf); 3955 // t = 1; 3956 } 3957 3958 ATH_TID_REMOVE(tid, bf, bf_list); 3959 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3960 } 3961 3962 /* And now, drain the filtered frame queue */ 3963 t = 0; 3964 for (;;) { 3965 bf = ATH_TID_FILT_FIRST(tid); 3966 if (bf == NULL) 3967 break; 3968 3969 if (t == 0) { 3970 ath_tx_tid_drain_print(sc, an, "filt", tid, bf); 3971 // t = 1; 3972 } 3973 3974 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3975 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3976 } 3977 3978 /* 3979 * Override the clrdmask configuration for the next frame 3980 * in case there is some future transmission, just to get 3981 * the ball rolling. 3982 * 3983 * This won't hurt things if the TID is about to be freed. 3984 */ 3985 ath_tx_set_clrdmask(sc, tid->an); 3986 3987 /* 3988 * Now that it's completed, grab the TID lock and update 3989 * the sequence number and BAW window. 3990 * Because sequence numbers have been assigned to frames 3991 * that haven't been sent yet, it's entirely possible 3992 * we'll be called with some pending frames that have not 3993 * been transmitted. 3994 * 3995 * The cleaner solution is to do the sequence number allocation 3996 * when the packet is first transmitted - and thus the "retries" 3997 * check above would be enough to update the BAW/seqno. 3998 */ 3999 4000 /* But don't do it for non-QoS TIDs */ 4001 if (tap) { 4002 #if 1 4003 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4004 "%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n", 4005 __func__, 4006 ni->ni_macaddr, 4007 ":", 4008 an, 4009 tid->tid, 4010 tap->txa_start); 4011 #endif 4012 ni->ni_txseqs[tid->tid] = tap->txa_start; 4013 tid->baw_tail = tid->baw_head; 4014 } 4015 } 4016 4017 /* 4018 * Reset the TID state. This must be only called once the node has 4019 * had its frames flushed from this TID, to ensure that no other 4020 * pause / unpause logic can kick in. 4021 */ 4022 static void 4023 ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid) 4024 { 4025 4026 #if 0 4027 tid->bar_wait = tid->bar_tx = tid->isfiltered = 0; 4028 tid->paused = tid->sched = tid->addba_tx_pending = 0; 4029 tid->incomp = tid->cleanup_inprogress = 0; 4030 #endif 4031 4032 /* 4033 * If we have a bar_wait set, we need to unpause the TID 4034 * here. Otherwise once cleanup has finished, the TID won't 4035 * have the right paused counter. 4036 * 4037 * XXX I'm not going through resume here - I don't want the 4038 * node to be rescheuled just yet. This however should be 4039 * methodized! 4040 */ 4041 if (tid->bar_wait) { 4042 if (tid->paused > 0) { 4043 tid->paused --; 4044 } 4045 } 4046 4047 /* 4048 * XXX same with a currently filtered TID. 4049 * 4050 * Since this is being called during a flush, we assume that 4051 * the filtered frame list is actually empty. 4052 * 4053 * XXX TODO: add in a check to ensure that the filtered queue 4054 * depth is actually 0! 4055 */ 4056 if (tid->isfiltered) { 4057 if (tid->paused > 0) { 4058 tid->paused --; 4059 } 4060 } 4061 4062 /* 4063 * Clear BAR, filtered frames, scheduled and ADDBA pending. 4064 * The TID may be going through cleanup from the last association 4065 * where things in the BAW are still in the hardware queue. 4066 */ 4067 tid->bar_wait = 0; 4068 tid->bar_tx = 0; 4069 tid->isfiltered = 0; 4070 tid->sched = 0; 4071 tid->addba_tx_pending = 0; 4072 4073 /* 4074 * XXX TODO: it may just be enough to walk the HWQs and mark 4075 * frames for that node as non-aggregate; or mark the ath_node 4076 * with something that indicates that aggregation is no longer 4077 * occurring. Then we can just toss the BAW complaints and 4078 * do a complete hard reset of state here - no pause, no 4079 * complete counter, etc. 4080 */ 4081 4082 } 4083 4084 /* 4085 * Flush all software queued packets for the given node. 4086 * 4087 * This occurs when a completion handler frees the last buffer 4088 * for a node, and the node is thus freed. This causes the node 4089 * to be cleaned up, which ends up calling ath_tx_node_flush. 4090 */ 4091 void 4092 ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an) 4093 { 4094 int tid; 4095 ath_bufhead bf_cq; 4096 struct ath_buf *bf; 4097 4098 TAILQ_INIT(&bf_cq); 4099 4100 ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p", 4101 &an->an_node); 4102 4103 ATH_TX_LOCK(sc); 4104 DPRINTF(sc, ATH_DEBUG_NODE, 4105 "%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, " 4106 "swq_depth=%d, clrdmask=%d, leak_count=%d\n", 4107 __func__, 4108 an->an_node.ni_macaddr, 4109 ":", 4110 an->an_is_powersave, 4111 an->an_stack_psq, 4112 an->an_tim_set, 4113 an->an_swq_depth, 4114 an->clrdmask, 4115 an->an_leak_count); 4116 4117 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 4118 struct ath_tid *atid = &an->an_tid[tid]; 4119 4120 /* Free packets */ 4121 ath_tx_tid_drain(sc, an, atid, &bf_cq); 4122 4123 /* Remove this tid from the list of active tids */ 4124 ath_tx_tid_unsched(sc, atid); 4125 4126 /* Reset the per-TID pause, BAR, etc state */ 4127 ath_tx_tid_reset(sc, atid); 4128 } 4129 4130 /* 4131 * Clear global leak count 4132 */ 4133 an->an_leak_count = 0; 4134 ATH_TX_UNLOCK(sc); 4135 4136 /* Handle completed frames */ 4137 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4138 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4139 ath_tx_default_comp(sc, bf, 0); 4140 } 4141 } 4142 4143 /* 4144 * Drain all the software TXQs currently with traffic queued. 4145 */ 4146 void 4147 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq) 4148 { 4149 struct ath_tid *tid; 4150 ath_bufhead bf_cq; 4151 struct ath_buf *bf; 4152 4153 TAILQ_INIT(&bf_cq); 4154 ATH_TX_LOCK(sc); 4155 4156 /* 4157 * Iterate over all active tids for the given txq, 4158 * flushing and unsched'ing them 4159 */ 4160 while (! TAILQ_EMPTY(&txq->axq_tidq)) { 4161 tid = TAILQ_FIRST(&txq->axq_tidq); 4162 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq); 4163 ath_tx_tid_unsched(sc, tid); 4164 } 4165 4166 ATH_TX_UNLOCK(sc); 4167 4168 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4169 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4170 ath_tx_default_comp(sc, bf, 0); 4171 } 4172 } 4173 4174 /* 4175 * Handle completion of non-aggregate session frames. 4176 * 4177 * This (currently) doesn't implement software retransmission of 4178 * non-aggregate frames! 4179 * 4180 * Software retransmission of non-aggregate frames needs to obey 4181 * the strict sequence number ordering, and drop any frames that 4182 * will fail this. 4183 * 4184 * For now, filtered frames and frame transmission will cause 4185 * all kinds of issues. So we don't support them. 4186 * 4187 * So anyone queuing frames via ath_tx_normal_xmit() or 4188 * ath_tx_hw_queue_norm() must override and set CLRDMASK. 4189 */ 4190 void 4191 ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4192 { 4193 struct ieee80211_node *ni = bf->bf_node; 4194 struct ath_node *an = ATH_NODE(ni); 4195 int tid = bf->bf_state.bfs_tid; 4196 struct ath_tid *atid = &an->an_tid[tid]; 4197 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 4198 4199 /* The TID state is protected behind the TXQ lock */ 4200 ATH_TX_LOCK(sc); 4201 4202 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n", 4203 __func__, bf, fail, atid->hwq_depth - 1); 4204 4205 atid->hwq_depth--; 4206 4207 #if 0 4208 /* 4209 * If the frame was filtered, stick it on the filter frame 4210 * queue and complain about it. It shouldn't happen! 4211 */ 4212 if ((ts->ts_status & HAL_TXERR_FILT) || 4213 (ts->ts_status != 0 && atid->isfiltered)) { 4214 DPRINTF(sc, ATH_DEBUG_SW_TX, 4215 "%s: isfiltered=%d, ts_status=%d: huh?\n", 4216 __func__, 4217 atid->isfiltered, 4218 ts->ts_status); 4219 ath_tx_tid_filt_comp_buf(sc, atid, bf); 4220 } 4221 #endif 4222 if (atid->isfiltered) 4223 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__); 4224 if (atid->hwq_depth < 0) 4225 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", 4226 __func__, atid->hwq_depth); 4227 4228 /* If the TID is being cleaned up, track things */ 4229 /* XXX refactor! */ 4230 if (atid->cleanup_inprogress) { 4231 atid->incomp--; 4232 if (atid->incomp == 0) { 4233 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4234 "%s: TID %d: cleaned up! resume!\n", 4235 __func__, tid); 4236 atid->cleanup_inprogress = 0; 4237 ath_tx_tid_resume(sc, atid); 4238 } 4239 } 4240 4241 /* 4242 * If the queue is filtered, potentially mark it as complete 4243 * and reschedule it as needed. 4244 * 4245 * This is required as there may be a subsequent TX descriptor 4246 * for this end-node that has CLRDMASK set, so it's quite possible 4247 * that a filtered frame will be followed by a non-filtered 4248 * (complete or otherwise) frame. 4249 * 4250 * XXX should we do this before we complete the frame? 4251 */ 4252 if (atid->isfiltered) 4253 ath_tx_tid_filt_comp_complete(sc, atid); 4254 ATH_TX_UNLOCK(sc); 4255 4256 /* 4257 * punt to rate control if we're not being cleaned up 4258 * during a hw queue drain and the frame wanted an ACK. 4259 */ 4260 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 4261 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 4262 ts, bf->bf_state.bfs_pktlen, 4263 1, (ts->ts_status == 0) ? 0 : 1); 4264 4265 ath_tx_default_comp(sc, bf, fail); 4266 } 4267 4268 /* 4269 * Handle cleanup of aggregate session packets that aren't 4270 * an A-MPDU. 4271 * 4272 * There's no need to update the BAW here - the session is being 4273 * torn down. 4274 */ 4275 static void 4276 ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf) 4277 { 4278 struct ieee80211_node *ni = bf->bf_node; 4279 struct ath_node *an = ATH_NODE(ni); 4280 int tid = bf->bf_state.bfs_tid; 4281 struct ath_tid *atid = &an->an_tid[tid]; 4282 4283 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n", 4284 __func__, tid, atid->incomp); 4285 4286 ATH_TX_LOCK(sc); 4287 atid->incomp--; 4288 4289 /* XXX refactor! */ 4290 if (bf->bf_state.bfs_dobaw) { 4291 ath_tx_update_baw(sc, an, atid, bf); 4292 if (!bf->bf_state.bfs_addedbaw) 4293 DPRINTF(sc, ATH_DEBUG_SW_TX, 4294 "%s: wasn't added: seqno %d\n", 4295 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4296 } 4297 4298 if (atid->incomp == 0) { 4299 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4300 "%s: TID %d: cleaned up! resume!\n", 4301 __func__, tid); 4302 atid->cleanup_inprogress = 0; 4303 ath_tx_tid_resume(sc, atid); 4304 } 4305 ATH_TX_UNLOCK(sc); 4306 4307 ath_tx_default_comp(sc, bf, 0); 4308 } 4309 4310 4311 /* 4312 * This as it currently stands is a bit dumb. Ideally we'd just 4313 * fail the frame the normal way and have it permanently fail 4314 * via the normal aggregate completion path. 4315 */ 4316 static void 4317 ath_tx_tid_cleanup_frame(struct ath_softc *sc, struct ath_node *an, 4318 int tid, struct ath_buf *bf_head, ath_bufhead *bf_cq) 4319 { 4320 struct ath_tid *atid = &an->an_tid[tid]; 4321 struct ath_buf *bf, *bf_next; 4322 4323 ATH_TX_LOCK_ASSERT(sc); 4324 4325 /* 4326 * Remove this frame from the queue. 4327 */ 4328 ATH_TID_REMOVE(atid, bf_head, bf_list); 4329 4330 /* 4331 * Loop over all the frames in the aggregate. 4332 */ 4333 bf = bf_head; 4334 while (bf != NULL) { 4335 bf_next = bf->bf_next; /* next aggregate frame, or NULL */ 4336 4337 /* 4338 * If it's been added to the BAW we need to kick 4339 * it out of the BAW before we continue. 4340 * 4341 * XXX if it's an aggregate, assert that it's in the 4342 * BAW - we shouldn't have it be in an aggregate 4343 * otherwise! 4344 */ 4345 if (bf->bf_state.bfs_addedbaw) { 4346 ath_tx_update_baw(sc, an, atid, bf); 4347 bf->bf_state.bfs_dobaw = 0; 4348 } 4349 4350 /* 4351 * Give it the default completion handler. 4352 */ 4353 bf->bf_comp = ath_tx_normal_comp; 4354 bf->bf_next = NULL; 4355 4356 /* 4357 * Add it to the list to free. 4358 */ 4359 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 4360 4361 /* 4362 * Now advance to the next frame in the aggregate. 4363 */ 4364 bf = bf_next; 4365 } 4366 } 4367 4368 /* 4369 * Performs transmit side cleanup when TID changes from aggregated to 4370 * unaggregated and during reassociation. 4371 * 4372 * For now, this just tosses everything from the TID software queue 4373 * whether or not it has been retried and marks the TID as 4374 * pending completion if there's anything for this TID queued to 4375 * the hardware. 4376 * 4377 * The caller is responsible for pausing the TID and unpausing the 4378 * TID if no cleanup was required. Otherwise the cleanup path will 4379 * unpause the TID once the last hardware queued frame is completed. 4380 */ 4381 static void 4382 ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid, 4383 ath_bufhead *bf_cq) 4384 { 4385 struct ath_tid *atid = &an->an_tid[tid]; 4386 struct ath_buf *bf, *bf_next; 4387 4388 ATH_TX_LOCK_ASSERT(sc); 4389 4390 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4391 "%s: TID %d: called; inprogress=%d\n", __func__, tid, 4392 atid->cleanup_inprogress); 4393 4394 /* 4395 * Move the filtered frames to the TX queue, before 4396 * we run off and discard/process things. 4397 */ 4398 4399 /* XXX this is really quite inefficient */ 4400 while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) { 4401 ATH_TID_FILT_REMOVE(atid, bf, bf_list); 4402 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4403 } 4404 4405 /* 4406 * Update the frames in the software TX queue: 4407 * 4408 * + Discard retry frames in the queue 4409 * + Fix the completion function to be non-aggregate 4410 */ 4411 bf = ATH_TID_FIRST(atid); 4412 while (bf) { 4413 /* 4414 * Grab the next frame in the list, we may 4415 * be fiddling with the list. 4416 */ 4417 bf_next = TAILQ_NEXT(bf, bf_list); 4418 4419 /* 4420 * Free the frame and all subframes. 4421 */ 4422 ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq); 4423 4424 /* 4425 * Next frame! 4426 */ 4427 bf = bf_next; 4428 } 4429 4430 /* 4431 * If there's anything in the hardware queue we wait 4432 * for the TID HWQ to empty. 4433 */ 4434 if (atid->hwq_depth > 0) { 4435 /* 4436 * XXX how about we kill atid->incomp, and instead 4437 * replace it with a macro that checks that atid->hwq_depth 4438 * is 0? 4439 */ 4440 atid->incomp = atid->hwq_depth; 4441 atid->cleanup_inprogress = 1; 4442 } 4443 4444 if (atid->cleanup_inprogress) 4445 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4446 "%s: TID %d: cleanup needed: %d packets\n", 4447 __func__, tid, atid->incomp); 4448 4449 /* Owner now must free completed frames */ 4450 } 4451 4452 static struct ath_buf * 4453 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 4454 struct ath_tid *tid, struct ath_buf *bf) 4455 { 4456 struct ath_buf *nbf; 4457 int error; 4458 4459 /* 4460 * Clone the buffer. This will handle the dma unmap and 4461 * copy the node reference to the new buffer. If this 4462 * works out, 'bf' will have no DMA mapping, no mbuf 4463 * pointer and no node reference. 4464 */ 4465 nbf = ath_buf_clone(sc, bf); 4466 4467 #if 0 4468 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n", 4469 __func__); 4470 #endif 4471 4472 if (nbf == NULL) { 4473 /* Failed to clone */ 4474 DPRINTF(sc, ATH_DEBUG_XMIT, 4475 "%s: failed to clone a busy buffer\n", 4476 __func__); 4477 return NULL; 4478 } 4479 4480 /* Setup the dma for the new buffer */ 4481 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m); 4482 if (error != 0) { 4483 DPRINTF(sc, ATH_DEBUG_XMIT, 4484 "%s: failed to setup dma for clone\n", 4485 __func__); 4486 /* 4487 * Put this at the head of the list, not tail; 4488 * that way it doesn't interfere with the 4489 * busy buffer logic (which uses the tail of 4490 * the list.) 4491 */ 4492 ATH_TXBUF_LOCK(sc); 4493 ath_returnbuf_head(sc, nbf); 4494 ATH_TXBUF_UNLOCK(sc); 4495 return NULL; 4496 } 4497 4498 /* Update BAW if required, before we free the original buf */ 4499 if (bf->bf_state.bfs_dobaw) 4500 ath_tx_switch_baw_buf(sc, an, tid, bf, nbf); 4501 4502 /* Free original buffer; return new buffer */ 4503 ath_freebuf(sc, bf); 4504 4505 return nbf; 4506 } 4507 4508 /* 4509 * Handle retrying an unaggregate frame in an aggregate 4510 * session. 4511 * 4512 * If too many retries occur, pause the TID, wait for 4513 * any further retransmits (as there's no reason why 4514 * non-aggregate frames in an aggregate session are 4515 * transmitted in-order; they just have to be in-BAW) 4516 * and then queue a BAR. 4517 */ 4518 static void 4519 ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf) 4520 { 4521 struct ieee80211_node *ni = bf->bf_node; 4522 struct ath_node *an = ATH_NODE(ni); 4523 int tid = bf->bf_state.bfs_tid; 4524 struct ath_tid *atid = &an->an_tid[tid]; 4525 struct ieee80211_tx_ampdu *tap; 4526 4527 ATH_TX_LOCK(sc); 4528 4529 tap = ath_tx_get_tx_tid(an, tid); 4530 4531 /* 4532 * If the buffer is marked as busy, we can't directly 4533 * reuse it. Instead, try to clone the buffer. 4534 * If the clone is successful, recycle the old buffer. 4535 * If the clone is unsuccessful, set bfs_retries to max 4536 * to force the next bit of code to free the buffer 4537 * for us. 4538 */ 4539 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4540 (bf->bf_flags & ATH_BUF_BUSY)) { 4541 struct ath_buf *nbf; 4542 nbf = ath_tx_retry_clone(sc, an, atid, bf); 4543 if (nbf) 4544 /* bf has been freed at this point */ 4545 bf = nbf; 4546 else 4547 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4548 } 4549 4550 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4551 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4552 "%s: exceeded retries; seqno %d\n", 4553 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4554 sc->sc_stats.ast_tx_swretrymax++; 4555 4556 /* Update BAW anyway */ 4557 if (bf->bf_state.bfs_dobaw) { 4558 ath_tx_update_baw(sc, an, atid, bf); 4559 if (! bf->bf_state.bfs_addedbaw) 4560 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4561 "%s: wasn't added: seqno %d\n", 4562 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4563 } 4564 bf->bf_state.bfs_dobaw = 0; 4565 4566 /* Suspend the TX queue and get ready to send the BAR */ 4567 ath_tx_tid_bar_suspend(sc, atid); 4568 4569 /* Send the BAR if there are no other frames waiting */ 4570 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4571 ath_tx_tid_bar_tx(sc, atid); 4572 4573 ATH_TX_UNLOCK(sc); 4574 4575 /* Free buffer, bf is free after this call */ 4576 ath_tx_default_comp(sc, bf, 0); 4577 return; 4578 } 4579 4580 /* 4581 * This increments the retry counter as well as 4582 * sets the retry flag in the ath_buf and packet 4583 * body. 4584 */ 4585 ath_tx_set_retry(sc, bf); 4586 sc->sc_stats.ast_tx_swretries++; 4587 4588 /* 4589 * Insert this at the head of the queue, so it's 4590 * retried before any current/subsequent frames. 4591 */ 4592 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4593 ath_tx_tid_sched(sc, atid); 4594 /* Send the BAR if there are no other frames waiting */ 4595 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4596 ath_tx_tid_bar_tx(sc, atid); 4597 4598 ATH_TX_UNLOCK(sc); 4599 } 4600 4601 /* 4602 * Common code for aggregate excessive retry/subframe retry. 4603 * If retrying, queues buffers to bf_q. If not, frees the 4604 * buffers. 4605 * 4606 * XXX should unify this with ath_tx_aggr_retry_unaggr() 4607 */ 4608 static int 4609 ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf, 4610 ath_bufhead *bf_q) 4611 { 4612 struct ieee80211_node *ni = bf->bf_node; 4613 struct ath_node *an = ATH_NODE(ni); 4614 int tid = bf->bf_state.bfs_tid; 4615 struct ath_tid *atid = &an->an_tid[tid]; 4616 4617 ATH_TX_LOCK_ASSERT(sc); 4618 4619 /* XXX clr11naggr should be done for all subframes */ 4620 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 4621 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0); 4622 4623 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */ 4624 4625 /* 4626 * If the buffer is marked as busy, we can't directly 4627 * reuse it. Instead, try to clone the buffer. 4628 * If the clone is successful, recycle the old buffer. 4629 * If the clone is unsuccessful, set bfs_retries to max 4630 * to force the next bit of code to free the buffer 4631 * for us. 4632 */ 4633 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4634 (bf->bf_flags & ATH_BUF_BUSY)) { 4635 struct ath_buf *nbf; 4636 nbf = ath_tx_retry_clone(sc, an, atid, bf); 4637 if (nbf) 4638 /* bf has been freed at this point */ 4639 bf = nbf; 4640 else 4641 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4642 } 4643 4644 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4645 sc->sc_stats.ast_tx_swretrymax++; 4646 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4647 "%s: max retries: seqno %d\n", 4648 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4649 ath_tx_update_baw(sc, an, atid, bf); 4650 if (!bf->bf_state.bfs_addedbaw) 4651 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4652 "%s: wasn't added: seqno %d\n", 4653 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4654 bf->bf_state.bfs_dobaw = 0; 4655 return 1; 4656 } 4657 4658 ath_tx_set_retry(sc, bf); 4659 sc->sc_stats.ast_tx_swretries++; 4660 bf->bf_next = NULL; /* Just to make sure */ 4661 4662 /* Clear the aggregate state */ 4663 bf->bf_state.bfs_aggr = 0; 4664 bf->bf_state.bfs_ndelim = 0; /* ??? needed? */ 4665 bf->bf_state.bfs_nframes = 1; 4666 4667 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 4668 return 0; 4669 } 4670 4671 /* 4672 * error pkt completion for an aggregate destination 4673 */ 4674 static void 4675 ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first, 4676 struct ath_tid *tid) 4677 { 4678 struct ieee80211_node *ni = bf_first->bf_node; 4679 struct ath_node *an = ATH_NODE(ni); 4680 struct ath_buf *bf_next, *bf; 4681 ath_bufhead bf_q; 4682 int drops = 0; 4683 struct ieee80211_tx_ampdu *tap; 4684 ath_bufhead bf_cq; 4685 4686 TAILQ_INIT(&bf_q); 4687 TAILQ_INIT(&bf_cq); 4688 4689 /* 4690 * Update rate control - all frames have failed. 4691 * 4692 * XXX use the length in the first frame in the series; 4693 * XXX just so things are consistent for now. 4694 * 4695 * XXX TODO: need to signal this is a large frame no matter what... 4696 */ 4697 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc, 4698 &bf_first->bf_status.ds_txstat, 4699 bf_first->bf_state.bfs_pktlen, 4700 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes); 4701 4702 ATH_TX_LOCK(sc); 4703 tap = ath_tx_get_tx_tid(an, tid->tid); 4704 sc->sc_stats.ast_tx_aggr_failall++; 4705 4706 /* Retry all subframes */ 4707 bf = bf_first; 4708 while (bf) { 4709 bf_next = bf->bf_next; 4710 bf->bf_next = NULL; /* Remove it from the aggr list */ 4711 sc->sc_stats.ast_tx_aggr_fail++; 4712 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4713 drops++; 4714 bf->bf_next = NULL; 4715 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4716 } 4717 bf = bf_next; 4718 } 4719 4720 /* Prepend all frames to the beginning of the queue */ 4721 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 4722 TAILQ_REMOVE(&bf_q, bf, bf_list); 4723 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 4724 } 4725 4726 /* 4727 * Schedule the TID to be re-tried. 4728 */ 4729 ath_tx_tid_sched(sc, tid); 4730 4731 /* 4732 * send bar if we dropped any frames 4733 * 4734 * Keep the txq lock held for now, as we need to ensure 4735 * that ni_txseqs[] is consistent (as it's being updated 4736 * in the ifnet TX context or raw TX context.) 4737 */ 4738 if (drops) { 4739 /* Suspend the TX queue and get ready to send the BAR */ 4740 ath_tx_tid_bar_suspend(sc, tid); 4741 } 4742 4743 /* 4744 * Send BAR if required 4745 */ 4746 if (ath_tx_tid_bar_tx_ready(sc, tid)) 4747 ath_tx_tid_bar_tx(sc, tid); 4748 4749 ATH_TX_UNLOCK(sc); 4750 4751 /* Complete frames which errored out */ 4752 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4753 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4754 ath_tx_default_comp(sc, bf, 0); 4755 } 4756 } 4757 4758 /* 4759 * Handle clean-up of packets from an aggregate list. 4760 * 4761 * There's no need to update the BAW here - the session is being 4762 * torn down. 4763 */ 4764 static void 4765 ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first) 4766 { 4767 struct ath_buf *bf, *bf_next; 4768 struct ieee80211_node *ni = bf_first->bf_node; 4769 struct ath_node *an = ATH_NODE(ni); 4770 int tid = bf_first->bf_state.bfs_tid; 4771 struct ath_tid *atid = &an->an_tid[tid]; 4772 4773 ATH_TX_LOCK(sc); 4774 4775 /* update incomp */ 4776 atid->incomp--; 4777 4778 /* Update the BAW */ 4779 bf = bf_first; 4780 while (bf) { 4781 /* XXX refactor! */ 4782 if (bf->bf_state.bfs_dobaw) { 4783 ath_tx_update_baw(sc, an, atid, bf); 4784 if (!bf->bf_state.bfs_addedbaw) 4785 DPRINTF(sc, ATH_DEBUG_SW_TX, 4786 "%s: wasn't added: seqno %d\n", 4787 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4788 } 4789 bf = bf->bf_next; 4790 } 4791 4792 if (atid->incomp == 0) { 4793 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4794 "%s: TID %d: cleaned up! resume!\n", 4795 __func__, tid); 4796 atid->cleanup_inprogress = 0; 4797 ath_tx_tid_resume(sc, atid); 4798 } 4799 4800 /* Send BAR if required */ 4801 /* XXX why would we send a BAR when transitioning to non-aggregation? */ 4802 /* 4803 * XXX TODO: we should likely just tear down the BAR state here, 4804 * rather than sending a BAR. 4805 */ 4806 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4807 ath_tx_tid_bar_tx(sc, atid); 4808 4809 ATH_TX_UNLOCK(sc); 4810 4811 /* Handle frame completion as individual frames */ 4812 bf = bf_first; 4813 while (bf) { 4814 bf_next = bf->bf_next; 4815 bf->bf_next = NULL; 4816 ath_tx_default_comp(sc, bf, 1); 4817 bf = bf_next; 4818 } 4819 } 4820 4821 /* 4822 * Handle completion of an set of aggregate frames. 4823 * 4824 * Note: the completion handler is the last descriptor in the aggregate, 4825 * not the last descriptor in the first frame. 4826 */ 4827 static void 4828 ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first, 4829 int fail) 4830 { 4831 //struct ath_desc *ds = bf->bf_lastds; 4832 struct ieee80211_node *ni = bf_first->bf_node; 4833 struct ath_node *an = ATH_NODE(ni); 4834 int tid = bf_first->bf_state.bfs_tid; 4835 struct ath_tid *atid = &an->an_tid[tid]; 4836 struct ath_tx_status ts; 4837 struct ieee80211_tx_ampdu *tap; 4838 ath_bufhead bf_q; 4839 ath_bufhead bf_cq; 4840 int seq_st, tx_ok; 4841 int hasba, isaggr; 4842 uint32_t ba[2]; 4843 struct ath_buf *bf, *bf_next; 4844 int ba_index; 4845 int drops = 0; 4846 int nframes = 0, nbad = 0, nf; 4847 int pktlen; 4848 /* XXX there's too much on the stack? */ 4849 struct ath_rc_series rc[ATH_RC_NUM]; 4850 int txseq; 4851 4852 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n", 4853 __func__, atid->hwq_depth); 4854 4855 /* 4856 * Take a copy; this may be needed -after- bf_first 4857 * has been completed and freed. 4858 */ 4859 ts = bf_first->bf_status.ds_txstat; 4860 4861 TAILQ_INIT(&bf_q); 4862 TAILQ_INIT(&bf_cq); 4863 4864 /* The TID state is kept behind the TXQ lock */ 4865 ATH_TX_LOCK(sc); 4866 4867 atid->hwq_depth--; 4868 if (atid->hwq_depth < 0) 4869 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n", 4870 __func__, atid->hwq_depth); 4871 4872 /* 4873 * If the TID is filtered, handle completing the filter 4874 * transition before potentially kicking it to the cleanup 4875 * function. 4876 * 4877 * XXX this is duplicate work, ew. 4878 */ 4879 if (atid->isfiltered) 4880 ath_tx_tid_filt_comp_complete(sc, atid); 4881 4882 /* 4883 * Punt cleanup to the relevant function, not our problem now 4884 */ 4885 if (atid->cleanup_inprogress) { 4886 if (atid->isfiltered) 4887 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4888 "%s: isfiltered=1, normal_comp?\n", 4889 __func__); 4890 ATH_TX_UNLOCK(sc); 4891 ath_tx_comp_cleanup_aggr(sc, bf_first); 4892 return; 4893 } 4894 4895 /* 4896 * If the frame is filtered, transition to filtered frame 4897 * mode and add this to the filtered frame list. 4898 * 4899 * XXX TODO: figure out how this interoperates with 4900 * BAR, pause and cleanup states. 4901 */ 4902 if ((ts.ts_status & HAL_TXERR_FILT) || 4903 (ts.ts_status != 0 && atid->isfiltered)) { 4904 if (fail != 0) 4905 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4906 "%s: isfiltered=1, fail=%d\n", __func__, fail); 4907 ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq); 4908 4909 /* Remove from BAW */ 4910 TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) { 4911 if (bf->bf_state.bfs_addedbaw) 4912 drops++; 4913 if (bf->bf_state.bfs_dobaw) { 4914 ath_tx_update_baw(sc, an, atid, bf); 4915 if (!bf->bf_state.bfs_addedbaw) 4916 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4917 "%s: wasn't added: seqno %d\n", 4918 __func__, 4919 SEQNO(bf->bf_state.bfs_seqno)); 4920 } 4921 bf->bf_state.bfs_dobaw = 0; 4922 } 4923 /* 4924 * If any intermediate frames in the BAW were dropped when 4925 * handling filtering things, send a BAR. 4926 */ 4927 if (drops) 4928 ath_tx_tid_bar_suspend(sc, atid); 4929 4930 /* 4931 * Finish up by sending a BAR if required and freeing 4932 * the frames outside of the TX lock. 4933 */ 4934 goto finish_send_bar; 4935 } 4936 4937 /* 4938 * XXX for now, use the first frame in the aggregate for 4939 * XXX rate control completion; it's at least consistent. 4940 */ 4941 pktlen = bf_first->bf_state.bfs_pktlen; 4942 4943 /* 4944 * Handle errors first! 4945 * 4946 * Here, handle _any_ error as a "exceeded retries" error. 4947 * Later on (when filtered frames are to be specially handled) 4948 * it'll have to be expanded. 4949 */ 4950 #if 0 4951 if (ts.ts_status & HAL_TXERR_XRETRY) { 4952 #endif 4953 if (ts.ts_status != 0) { 4954 ATH_TX_UNLOCK(sc); 4955 ath_tx_comp_aggr_error(sc, bf_first, atid); 4956 return; 4957 } 4958 4959 tap = ath_tx_get_tx_tid(an, tid); 4960 4961 /* 4962 * extract starting sequence and block-ack bitmap 4963 */ 4964 /* XXX endian-ness of seq_st, ba? */ 4965 seq_st = ts.ts_seqnum; 4966 hasba = !! (ts.ts_flags & HAL_TX_BA); 4967 tx_ok = (ts.ts_status == 0); 4968 isaggr = bf_first->bf_state.bfs_aggr; 4969 ba[0] = ts.ts_ba_low; 4970 ba[1] = ts.ts_ba_high; 4971 4972 /* 4973 * Copy the TX completion status and the rate control 4974 * series from the first descriptor, as it may be freed 4975 * before the rate control code can get its grubby fingers 4976 * into things. 4977 */ 4978 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc)); 4979 4980 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4981 "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, " 4982 "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n", 4983 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags, 4984 isaggr, seq_st, hasba, ba[0], ba[1]); 4985 4986 /* 4987 * The reference driver doesn't do this; it simply ignores 4988 * this check in its entirety. 4989 * 4990 * I've seen this occur when using iperf to send traffic 4991 * out tid 1 - the aggregate frames are all marked as TID 1, 4992 * but the TXSTATUS has TID=0. So, let's just ignore this 4993 * check. 4994 */ 4995 #if 0 4996 /* Occasionally, the MAC sends a tx status for the wrong TID. */ 4997 if (tid != ts.ts_tid) { 4998 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n", 4999 __func__, tid, ts.ts_tid); 5000 tx_ok = 0; 5001 } 5002 #endif 5003 5004 /* AR5416 BA bug; this requires an interface reset */ 5005 if (isaggr && tx_ok && (! hasba)) { 5006 device_printf(sc->sc_dev, 5007 "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, " 5008 "seq_st=%d\n", 5009 __func__, hasba, tx_ok, isaggr, seq_st); 5010 /* XXX TODO: schedule an interface reset */ 5011 #ifdef ATH_DEBUG 5012 ath_printtxbuf(sc, bf_first, 5013 sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0); 5014 #endif 5015 } 5016 5017 /* 5018 * Walk the list of frames, figure out which ones were correctly 5019 * sent and which weren't. 5020 */ 5021 bf = bf_first; 5022 nf = bf_first->bf_state.bfs_nframes; 5023 5024 /* bf_first is going to be invalid once this list is walked */ 5025 bf_first = NULL; 5026 5027 /* 5028 * Walk the list of completed frames and determine 5029 * which need to be completed and which need to be 5030 * retransmitted. 5031 * 5032 * For completed frames, the completion functions need 5033 * to be called at the end of this function as the last 5034 * node reference may free the node. 5035 * 5036 * Finally, since the TXQ lock can't be held during the 5037 * completion callback (to avoid lock recursion), 5038 * the completion calls have to be done outside of the 5039 * lock. 5040 */ 5041 while (bf) { 5042 nframes++; 5043 ba_index = ATH_BA_INDEX(seq_st, 5044 SEQNO(bf->bf_state.bfs_seqno)); 5045 bf_next = bf->bf_next; 5046 bf->bf_next = NULL; /* Remove it from the aggr list */ 5047 5048 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5049 "%s: checking bf=%p seqno=%d; ack=%d\n", 5050 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 5051 ATH_BA_ISSET(ba, ba_index)); 5052 5053 if (tx_ok && ATH_BA_ISSET(ba, ba_index)) { 5054 sc->sc_stats.ast_tx_aggr_ok++; 5055 ath_tx_update_baw(sc, an, atid, bf); 5056 bf->bf_state.bfs_dobaw = 0; 5057 if (!bf->bf_state.bfs_addedbaw) 5058 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5059 "%s: wasn't added: seqno %d\n", 5060 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5061 bf->bf_next = NULL; 5062 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 5063 } else { 5064 sc->sc_stats.ast_tx_aggr_fail++; 5065 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 5066 drops++; 5067 bf->bf_next = NULL; 5068 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 5069 } 5070 nbad++; 5071 } 5072 bf = bf_next; 5073 } 5074 5075 /* 5076 * Now that the BAW updates have been done, unlock 5077 * 5078 * txseq is grabbed before the lock is released so we 5079 * have a consistent view of what -was- in the BAW. 5080 * Anything after this point will not yet have been 5081 * TXed. 5082 */ 5083 txseq = tap->txa_start; 5084 ATH_TX_UNLOCK(sc); 5085 5086 if (nframes != nf) 5087 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5088 "%s: num frames seen=%d; bf nframes=%d\n", 5089 __func__, nframes, nf); 5090 5091 /* 5092 * Now we know how many frames were bad, call the rate 5093 * control code. 5094 */ 5095 if (fail == 0) { 5096 /* XXX TODO: what's pktlen here? */ 5097 ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes, 5098 nbad); 5099 } 5100 5101 /* 5102 * send bar if we dropped any frames 5103 */ 5104 if (drops) { 5105 /* Suspend the TX queue and get ready to send the BAR */ 5106 ATH_TX_LOCK(sc); 5107 ath_tx_tid_bar_suspend(sc, atid); 5108 ATH_TX_UNLOCK(sc); 5109 } 5110 5111 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5112 "%s: txa_start now %d\n", __func__, tap->txa_start); 5113 5114 ATH_TX_LOCK(sc); 5115 5116 /* Prepend all frames to the beginning of the queue */ 5117 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 5118 TAILQ_REMOVE(&bf_q, bf, bf_list); 5119 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 5120 } 5121 5122 /* 5123 * Reschedule to grab some further frames. 5124 */ 5125 ath_tx_tid_sched(sc, atid); 5126 5127 /* 5128 * If the queue is filtered, re-schedule as required. 5129 * 5130 * This is required as there may be a subsequent TX descriptor 5131 * for this end-node that has CLRDMASK set, so it's quite possible 5132 * that a filtered frame will be followed by a non-filtered 5133 * (complete or otherwise) frame. 5134 * 5135 * XXX should we do this before we complete the frame? 5136 */ 5137 if (atid->isfiltered) 5138 ath_tx_tid_filt_comp_complete(sc, atid); 5139 5140 finish_send_bar: 5141 5142 /* 5143 * Send BAR if required 5144 */ 5145 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5146 ath_tx_tid_bar_tx(sc, atid); 5147 5148 ATH_TX_UNLOCK(sc); 5149 5150 /* Do deferred completion */ 5151 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 5152 TAILQ_REMOVE(&bf_cq, bf, bf_list); 5153 ath_tx_default_comp(sc, bf, 0); 5154 } 5155 } 5156 5157 /* 5158 * Handle completion of unaggregated frames in an ADDBA 5159 * session. 5160 * 5161 * Fail is set to 1 if the entry is being freed via a call to 5162 * ath_tx_draintxq(). 5163 */ 5164 static void 5165 ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail) 5166 { 5167 struct ieee80211_node *ni = bf->bf_node; 5168 struct ath_node *an = ATH_NODE(ni); 5169 int tid = bf->bf_state.bfs_tid; 5170 struct ath_tid *atid = &an->an_tid[tid]; 5171 struct ath_tx_status ts; 5172 int drops = 0; 5173 5174 /* 5175 * Take a copy of this; filtering/cloning the frame may free the 5176 * bf pointer. 5177 */ 5178 ts = bf->bf_status.ds_txstat; 5179 5180 /* 5181 * Update rate control status here, before we possibly 5182 * punt to retry or cleanup. 5183 * 5184 * Do it outside of the TXQ lock. 5185 */ 5186 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 5187 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 5188 &bf->bf_status.ds_txstat, 5189 bf->bf_state.bfs_pktlen, 5190 1, (ts.ts_status == 0) ? 0 : 1); 5191 5192 /* 5193 * This is called early so atid->hwq_depth can be tracked. 5194 * This unfortunately means that it's released and regrabbed 5195 * during retry and cleanup. That's rather inefficient. 5196 */ 5197 ATH_TX_LOCK(sc); 5198 5199 if (tid == IEEE80211_NONQOS_TID) 5200 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__); 5201 5202 DPRINTF(sc, ATH_DEBUG_SW_TX, 5203 "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n", 5204 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth, 5205 SEQNO(bf->bf_state.bfs_seqno)); 5206 5207 atid->hwq_depth--; 5208 if (atid->hwq_depth < 0) 5209 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", 5210 __func__, atid->hwq_depth); 5211 5212 /* 5213 * If the TID is filtered, handle completing the filter 5214 * transition before potentially kicking it to the cleanup 5215 * function. 5216 */ 5217 if (atid->isfiltered) 5218 ath_tx_tid_filt_comp_complete(sc, atid); 5219 5220 /* 5221 * If a cleanup is in progress, punt to comp_cleanup; 5222 * rather than handling it here. It's thus their 5223 * responsibility to clean up, call the completion 5224 * function in net80211, etc. 5225 */ 5226 if (atid->cleanup_inprogress) { 5227 if (atid->isfiltered) 5228 DPRINTF(sc, ATH_DEBUG_SW_TX, 5229 "%s: isfiltered=1, normal_comp?\n", 5230 __func__); 5231 ATH_TX_UNLOCK(sc); 5232 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n", 5233 __func__); 5234 ath_tx_comp_cleanup_unaggr(sc, bf); 5235 return; 5236 } 5237 5238 /* 5239 * XXX TODO: how does cleanup, BAR and filtered frame handling 5240 * overlap? 5241 * 5242 * If the frame is filtered OR if it's any failure but 5243 * the TID is filtered, the frame must be added to the 5244 * filtered frame list. 5245 * 5246 * However - a busy buffer can't be added to the filtered 5247 * list as it will end up being recycled without having 5248 * been made available for the hardware. 5249 */ 5250 if ((ts.ts_status & HAL_TXERR_FILT) || 5251 (ts.ts_status != 0 && atid->isfiltered)) { 5252 int freeframe; 5253 5254 if (fail != 0) 5255 DPRINTF(sc, ATH_DEBUG_SW_TX, 5256 "%s: isfiltered=1, fail=%d\n", 5257 __func__, fail); 5258 freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf); 5259 /* 5260 * If freeframe=0 then bf is no longer ours; don't 5261 * touch it. 5262 */ 5263 if (freeframe) { 5264 /* Remove from BAW */ 5265 if (bf->bf_state.bfs_addedbaw) 5266 drops++; 5267 if (bf->bf_state.bfs_dobaw) { 5268 ath_tx_update_baw(sc, an, atid, bf); 5269 if (!bf->bf_state.bfs_addedbaw) 5270 DPRINTF(sc, ATH_DEBUG_SW_TX, 5271 "%s: wasn't added: seqno %d\n", 5272 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5273 } 5274 bf->bf_state.bfs_dobaw = 0; 5275 } 5276 5277 /* 5278 * If the frame couldn't be filtered, treat it as a drop and 5279 * prepare to send a BAR. 5280 */ 5281 if (freeframe && drops) 5282 ath_tx_tid_bar_suspend(sc, atid); 5283 5284 /* 5285 * Send BAR if required 5286 */ 5287 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5288 ath_tx_tid_bar_tx(sc, atid); 5289 5290 ATH_TX_UNLOCK(sc); 5291 /* 5292 * If freeframe is set, then the frame couldn't be 5293 * cloned and bf is still valid. Just complete/free it. 5294 */ 5295 if (freeframe) 5296 ath_tx_default_comp(sc, bf, fail); 5297 5298 return; 5299 } 5300 /* 5301 * Don't bother with the retry check if all frames 5302 * are being failed (eg during queue deletion.) 5303 */ 5304 #if 0 5305 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) { 5306 #endif 5307 if (fail == 0 && ts.ts_status != 0) { 5308 ATH_TX_UNLOCK(sc); 5309 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n", 5310 __func__); 5311 ath_tx_aggr_retry_unaggr(sc, bf); 5312 return; 5313 } 5314 5315 /* Success? Complete */ 5316 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n", 5317 __func__, tid, SEQNO(bf->bf_state.bfs_seqno)); 5318 if (bf->bf_state.bfs_dobaw) { 5319 ath_tx_update_baw(sc, an, atid, bf); 5320 bf->bf_state.bfs_dobaw = 0; 5321 if (!bf->bf_state.bfs_addedbaw) 5322 DPRINTF(sc, ATH_DEBUG_SW_TX, 5323 "%s: wasn't added: seqno %d\n", 5324 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5325 } 5326 5327 /* 5328 * If the queue is filtered, re-schedule as required. 5329 * 5330 * This is required as there may be a subsequent TX descriptor 5331 * for this end-node that has CLRDMASK set, so it's quite possible 5332 * that a filtered frame will be followed by a non-filtered 5333 * (complete or otherwise) frame. 5334 * 5335 * XXX should we do this before we complete the frame? 5336 */ 5337 if (atid->isfiltered) 5338 ath_tx_tid_filt_comp_complete(sc, atid); 5339 5340 /* 5341 * Send BAR if required 5342 */ 5343 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5344 ath_tx_tid_bar_tx(sc, atid); 5345 5346 ATH_TX_UNLOCK(sc); 5347 5348 ath_tx_default_comp(sc, bf, fail); 5349 /* bf is freed at this point */ 5350 } 5351 5352 void 5353 ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 5354 { 5355 if (bf->bf_state.bfs_aggr) 5356 ath_tx_aggr_comp_aggr(sc, bf, fail); 5357 else 5358 ath_tx_aggr_comp_unaggr(sc, bf, fail); 5359 } 5360 5361 /* 5362 * Schedule some packets from the given node/TID to the hardware. 5363 * 5364 * This is the aggregate version. 5365 */ 5366 void 5367 ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an, 5368 struct ath_tid *tid) 5369 { 5370 struct ath_buf *bf; 5371 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 5372 struct ieee80211_tx_ampdu *tap; 5373 ATH_AGGR_STATUS status; 5374 ath_bufhead bf_q; 5375 5376 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid); 5377 ATH_TX_LOCK_ASSERT(sc); 5378 5379 /* 5380 * XXX TODO: If we're called for a queue that we're leaking frames to, 5381 * ensure we only leak one. 5382 */ 5383 5384 tap = ath_tx_get_tx_tid(an, tid->tid); 5385 5386 if (tid->tid == IEEE80211_NONQOS_TID) 5387 DPRINTF(sc, ATH_DEBUG_SW_TX, 5388 "%s: called for TID=NONQOS_TID?\n", __func__); 5389 5390 for (;;) { 5391 status = ATH_AGGR_DONE; 5392 5393 /* 5394 * If the upper layer has paused the TID, don't 5395 * queue any further packets. 5396 * 5397 * This can also occur from the completion task because 5398 * of packet loss; but as its serialised with this code, 5399 * it won't "appear" half way through queuing packets. 5400 */ 5401 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 5402 break; 5403 5404 bf = ATH_TID_FIRST(tid); 5405 if (bf == NULL) { 5406 break; 5407 } 5408 5409 /* 5410 * If the packet doesn't fall within the BAW (eg a NULL 5411 * data frame), schedule it directly; continue. 5412 */ 5413 if (! bf->bf_state.bfs_dobaw) { 5414 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5415 "%s: non-baw packet\n", 5416 __func__); 5417 ATH_TID_REMOVE(tid, bf, bf_list); 5418 5419 if (bf->bf_state.bfs_nframes > 1) 5420 DPRINTF(sc, ATH_DEBUG_SW_TX, 5421 "%s: aggr=%d, nframes=%d\n", 5422 __func__, 5423 bf->bf_state.bfs_aggr, 5424 bf->bf_state.bfs_nframes); 5425 5426 /* 5427 * This shouldn't happen - such frames shouldn't 5428 * ever have been queued as an aggregate in the 5429 * first place. However, make sure the fields 5430 * are correctly setup just to be totally sure. 5431 */ 5432 bf->bf_state.bfs_aggr = 0; 5433 bf->bf_state.bfs_nframes = 1; 5434 5435 /* Update CLRDMASK just before this frame is queued */ 5436 ath_tx_update_clrdmask(sc, tid, bf); 5437 5438 ath_tx_do_ratelookup(sc, bf, tid->tid, false); 5439 ath_tx_calc_duration(sc, bf); 5440 ath_tx_calc_protection(sc, bf); 5441 ath_tx_set_rtscts(sc, bf); 5442 ath_tx_rate_fill_rcflags(sc, bf); 5443 ath_tx_setds(sc, bf); 5444 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 5445 5446 sc->sc_aggr_stats.aggr_nonbaw_pkt++; 5447 5448 /* Queue the packet; continue */ 5449 goto queuepkt; 5450 } 5451 5452 TAILQ_INIT(&bf_q); 5453 5454 /* 5455 * Do a rate control lookup on the first frame in the 5456 * list. The rate control code needs that to occur 5457 * before it can determine whether to TX. 5458 * It's inaccurate because the rate control code doesn't 5459 * really "do" aggregate lookups, so it only considers 5460 * the size of the first frame. 5461 */ 5462 ath_tx_do_ratelookup(sc, bf, tid->tid, true); 5463 bf->bf_state.bfs_rc[3].rix = 0; 5464 bf->bf_state.bfs_rc[3].tries = 0; 5465 5466 ath_tx_calc_duration(sc, bf); 5467 ath_tx_calc_protection(sc, bf); 5468 5469 ath_tx_set_rtscts(sc, bf); 5470 ath_tx_rate_fill_rcflags(sc, bf); 5471 5472 status = ath_tx_form_aggr(sc, an, tid, &bf_q); 5473 5474 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5475 "%s: ath_tx_form_aggr() status=%d\n", __func__, status); 5476 5477 /* 5478 * No frames to be picked up - out of BAW 5479 */ 5480 if (TAILQ_EMPTY(&bf_q)) 5481 break; 5482 5483 /* 5484 * This assumes that the descriptor list in the ath_bufhead 5485 * are already linked together via bf_next pointers. 5486 */ 5487 bf = TAILQ_FIRST(&bf_q); 5488 5489 if (status == ATH_AGGR_8K_LIMITED) 5490 sc->sc_aggr_stats.aggr_rts_aggr_limited++; 5491 5492 /* 5493 * If it's the only frame send as non-aggregate 5494 * assume that ath_tx_form_aggr() has checked 5495 * whether it's in the BAW and added it appropriately. 5496 */ 5497 if (bf->bf_state.bfs_nframes == 1) { 5498 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5499 "%s: single-frame aggregate\n", __func__); 5500 5501 /* Update CLRDMASK just before this frame is queued */ 5502 ath_tx_update_clrdmask(sc, tid, bf); 5503 5504 bf->bf_state.bfs_aggr = 0; 5505 bf->bf_state.bfs_ndelim = 0; 5506 ath_tx_setds(sc, bf); 5507 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 5508 if (status == ATH_AGGR_BAW_CLOSED) 5509 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++; 5510 else 5511 sc->sc_aggr_stats.aggr_single_pkt++; 5512 } else { 5513 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5514 "%s: multi-frame aggregate: %d frames, " 5515 "length %d\n", 5516 __func__, bf->bf_state.bfs_nframes, 5517 bf->bf_state.bfs_al); 5518 bf->bf_state.bfs_aggr = 1; 5519 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++; 5520 sc->sc_aggr_stats.aggr_aggr_pkt++; 5521 5522 /* Update CLRDMASK just before this frame is queued */ 5523 ath_tx_update_clrdmask(sc, tid, bf); 5524 5525 /* 5526 * Calculate the duration/protection as required. 5527 */ 5528 ath_tx_calc_duration(sc, bf); 5529 ath_tx_calc_protection(sc, bf); 5530 5531 /* 5532 * Update the rate and rtscts information based on the 5533 * rate decision made by the rate control code; 5534 * the first frame in the aggregate needs it. 5535 */ 5536 ath_tx_set_rtscts(sc, bf); 5537 5538 /* 5539 * Setup the relevant descriptor fields 5540 * for aggregation. The first descriptor 5541 * already points to the rest in the chain. 5542 */ 5543 ath_tx_setds_11n(sc, bf); 5544 5545 } 5546 queuepkt: 5547 /* Set completion handler, multi-frame aggregate or not */ 5548 bf->bf_comp = ath_tx_aggr_comp; 5549 5550 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID) 5551 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__); 5552 5553 /* 5554 * Update leak count and frame config if were leaking frames. 5555 * 5556 * XXX TODO: it should update all frames in an aggregate 5557 * correctly! 5558 */ 5559 ath_tx_leak_count_update(sc, tid, bf); 5560 5561 /* Punt to txq */ 5562 ath_tx_handoff(sc, txq, bf); 5563 5564 /* Track outstanding buffer count to hardware */ 5565 /* aggregates are "one" buffer */ 5566 tid->hwq_depth++; 5567 5568 /* 5569 * Break out if ath_tx_form_aggr() indicated 5570 * there can't be any further progress (eg BAW is full.) 5571 * Checking for an empty txq is done above. 5572 * 5573 * XXX locking on txq here? 5574 */ 5575 /* XXX TXQ locking */ 5576 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr || 5577 (status == ATH_AGGR_BAW_CLOSED || 5578 status == ATH_AGGR_LEAK_CLOSED)) 5579 break; 5580 } 5581 } 5582 5583 /* 5584 * Schedule some packets from the given node/TID to the hardware. 5585 * 5586 * XXX TODO: this routine doesn't enforce the maximum TXQ depth. 5587 * It just dumps frames into the TXQ. We should limit how deep 5588 * the transmit queue can grow for frames dispatched to the given 5589 * TXQ. 5590 * 5591 * To avoid locking issues, either we need to own the TXQ lock 5592 * at this point, or we need to pass in the maximum frame count 5593 * from the caller. 5594 */ 5595 void 5596 ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an, 5597 struct ath_tid *tid) 5598 { 5599 struct ath_buf *bf; 5600 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 5601 5602 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n", 5603 __func__, an, tid->tid); 5604 5605 ATH_TX_LOCK_ASSERT(sc); 5606 5607 /* Check - is AMPDU pending or running? then print out something */ 5608 if (ath_tx_ampdu_pending(sc, an, tid->tid)) 5609 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n", 5610 __func__, tid->tid); 5611 if (ath_tx_ampdu_running(sc, an, tid->tid)) 5612 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n", 5613 __func__, tid->tid); 5614 5615 for (;;) { 5616 5617 /* 5618 * If the upper layers have paused the TID, don't 5619 * queue any further packets. 5620 * 5621 * XXX if we are leaking frames, make sure we decrement 5622 * that counter _and_ we continue here. 5623 */ 5624 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 5625 break; 5626 5627 bf = ATH_TID_FIRST(tid); 5628 if (bf == NULL) { 5629 break; 5630 } 5631 5632 ATH_TID_REMOVE(tid, bf, bf_list); 5633 5634 /* Sanity check! */ 5635 if (tid->tid != bf->bf_state.bfs_tid) { 5636 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !=" 5637 " tid %d\n", __func__, bf->bf_state.bfs_tid, 5638 tid->tid); 5639 } 5640 /* Normal completion handler */ 5641 bf->bf_comp = ath_tx_normal_comp; 5642 5643 /* 5644 * Override this for now, until the non-aggregate 5645 * completion handler correctly handles software retransmits. 5646 */ 5647 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 5648 5649 /* Update CLRDMASK just before this frame is queued */ 5650 ath_tx_update_clrdmask(sc, tid, bf); 5651 5652 /* Program descriptors + rate control */ 5653 ath_tx_do_ratelookup(sc, bf, tid->tid, false); 5654 ath_tx_calc_duration(sc, bf); 5655 ath_tx_calc_protection(sc, bf); 5656 ath_tx_set_rtscts(sc, bf); 5657 ath_tx_rate_fill_rcflags(sc, bf); 5658 ath_tx_setds(sc, bf); 5659 5660 /* 5661 * Update the current leak count if 5662 * we're leaking frames; and set the 5663 * MORE flag as appropriate. 5664 */ 5665 ath_tx_leak_count_update(sc, tid, bf); 5666 5667 /* Track outstanding buffer count to hardware */ 5668 /* aggregates are "one" buffer */ 5669 tid->hwq_depth++; 5670 5671 /* Punt to hardware or software txq */ 5672 ath_tx_handoff(sc, txq, bf); 5673 } 5674 } 5675 5676 /* 5677 * Schedule some packets to the given hardware queue. 5678 * 5679 * This function walks the list of TIDs (ie, ath_node TIDs 5680 * with queued traffic) and attempts to schedule traffic 5681 * from them. 5682 * 5683 * TID scheduling is implemented as a FIFO, with TIDs being 5684 * added to the end of the queue after some frames have been 5685 * scheduled. 5686 */ 5687 void 5688 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq) 5689 { 5690 struct ath_tid *tid, *next, *last; 5691 5692 ATH_TX_LOCK_ASSERT(sc); 5693 5694 /* 5695 * For non-EDMA chips, aggr frames that have been built are 5696 * in axq_aggr_depth, whether they've been scheduled or not. 5697 * There's no FIFO, so txq->axq_depth is what's been scheduled 5698 * to the hardware. 5699 * 5700 * For EDMA chips, we do it in two stages. The existing code 5701 * builds a list of frames to go to the hardware and the EDMA 5702 * code turns it into a single entry to push into the FIFO. 5703 * That way we don't take up one packet per FIFO slot. 5704 * We do push one aggregate per FIFO slot though, just to keep 5705 * things simple. 5706 * 5707 * The FIFO depth is what's in the hardware; the txq->axq_depth 5708 * is what's been scheduled to the FIFO. 5709 * 5710 * fifo.axq_depth is the number of frames (or aggregates) pushed 5711 * into the EDMA FIFO. For multi-frame lists, this is the number 5712 * of frames pushed in. 5713 * axq_fifo_depth is the number of FIFO slots currently busy. 5714 */ 5715 5716 /* For EDMA and non-EDMA, check built/scheduled against aggr limit */ 5717 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr) { 5718 sc->sc_aggr_stats.aggr_sched_nopkt++; 5719 return; 5720 } 5721 5722 /* 5723 * For non-EDMA chips, axq_depth is the "what's scheduled to 5724 * the hardware list". For EDMA it's "What's built for the hardware" 5725 * and fifo.axq_depth is how many frames have been dispatched 5726 * already to the hardware. 5727 */ 5728 if (txq->axq_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_nonaggr) { 5729 sc->sc_aggr_stats.aggr_sched_nopkt++; 5730 return; 5731 } 5732 5733 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s); 5734 5735 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) { 5736 /* 5737 * Suspend paused queues here; they'll be resumed 5738 * once the addba completes or times out. 5739 */ 5740 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n", 5741 __func__, tid->tid, tid->paused); 5742 ath_tx_tid_unsched(sc, tid); 5743 /* 5744 * This node may be in power-save and we're leaking 5745 * a frame; be careful. 5746 */ 5747 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { 5748 goto loop_done; 5749 } 5750 if (ath_tx_ampdu_running(sc, tid->an, tid->tid)) 5751 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid); 5752 else 5753 ath_tx_tid_hw_queue_norm(sc, tid->an, tid); 5754 5755 /* Not empty? Re-schedule */ 5756 if (tid->axq_depth != 0) 5757 ath_tx_tid_sched(sc, tid); 5758 5759 /* 5760 * Give the software queue time to aggregate more 5761 * packets. If we aren't running aggregation then 5762 * we should still limit the hardware queue depth. 5763 */ 5764 /* XXX TXQ locking */ 5765 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { 5766 break; 5767 } 5768 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { 5769 break; 5770 } 5771 loop_done: 5772 /* 5773 * If this was the last entry on the original list, stop. 5774 * Otherwise nodes that have been rescheduled onto the end 5775 * of the TID FIFO list will just keep being rescheduled. 5776 * 5777 * XXX What should we do about nodes that were paused 5778 * but are pending a leaking frame in response to a ps-poll? 5779 * They'll be put at the front of the list; so they'll 5780 * prematurely trigger this condition! Ew. 5781 */ 5782 if (tid == last) 5783 break; 5784 } 5785 } 5786 5787 /* 5788 * TX addba handling 5789 */ 5790 5791 /* 5792 * Return net80211 TID struct pointer, or NULL for none 5793 */ 5794 struct ieee80211_tx_ampdu * 5795 ath_tx_get_tx_tid(struct ath_node *an, int tid) 5796 { 5797 struct ieee80211_node *ni = &an->an_node; 5798 struct ieee80211_tx_ampdu *tap; 5799 5800 if (tid == IEEE80211_NONQOS_TID) 5801 return NULL; 5802 5803 tap = &ni->ni_tx_ampdu[tid]; 5804 return tap; 5805 } 5806 5807 /* 5808 * Is AMPDU-TX running? 5809 */ 5810 static int 5811 ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid) 5812 { 5813 struct ieee80211_tx_ampdu *tap; 5814 5815 if (tid == IEEE80211_NONQOS_TID) 5816 return 0; 5817 5818 tap = ath_tx_get_tx_tid(an, tid); 5819 if (tap == NULL) 5820 return 0; /* Not valid; default to not running */ 5821 5822 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING); 5823 } 5824 5825 /* 5826 * Is AMPDU-TX negotiation pending? 5827 */ 5828 static int 5829 ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid) 5830 { 5831 struct ieee80211_tx_ampdu *tap; 5832 5833 if (tid == IEEE80211_NONQOS_TID) 5834 return 0; 5835 5836 tap = ath_tx_get_tx_tid(an, tid); 5837 if (tap == NULL) 5838 return 0; /* Not valid; default to not pending */ 5839 5840 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND); 5841 } 5842 5843 /* 5844 * Is AMPDU-TX pending for the given TID? 5845 */ 5846 5847 5848 /* 5849 * Method to handle sending an ADDBA request. 5850 * 5851 * We tap this so the relevant flags can be set to pause the TID 5852 * whilst waiting for the response. 5853 * 5854 * XXX there's no timeout handler we can override? 5855 */ 5856 int 5857 ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5858 int dialogtoken, int baparamset, int batimeout) 5859 { 5860 struct ath_softc *sc = ni->ni_ic->ic_softc; 5861 int tid = tap->txa_tid; 5862 struct ath_node *an = ATH_NODE(ni); 5863 struct ath_tid *atid = &an->an_tid[tid]; 5864 5865 /* 5866 * XXX danger Will Robinson! 5867 * 5868 * Although the taskqueue may be running and scheduling some more 5869 * packets, these should all be _before_ the addba sequence number. 5870 * However, net80211 will keep self-assigning sequence numbers 5871 * until addba has been negotiated. 5872 * 5873 * In the past, these packets would be "paused" (which still works 5874 * fine, as they're being scheduled to the driver in the same 5875 * serialised method which is calling the addba request routine) 5876 * and when the aggregation session begins, they'll be dequeued 5877 * as aggregate packets and added to the BAW. However, now there's 5878 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these 5879 * packets. Thus they never get included in the BAW tracking and 5880 * this can cause the initial burst of packets after the addba 5881 * negotiation to "hang", as they quickly fall outside the BAW. 5882 * 5883 * The "eventual" solution should be to tag these packets with 5884 * dobaw. Although net80211 has given us a sequence number, 5885 * it'll be "after" the left edge of the BAW and thus it'll 5886 * fall within it. 5887 */ 5888 ATH_TX_LOCK(sc); 5889 /* 5890 * This is a bit annoying. Until net80211 HT code inherits some 5891 * (any) locking, we may have this called in parallel BUT only 5892 * one response/timeout will be called. Grr. 5893 */ 5894 if (atid->addba_tx_pending == 0) { 5895 ath_tx_tid_pause(sc, atid); 5896 atid->addba_tx_pending = 1; 5897 } 5898 ATH_TX_UNLOCK(sc); 5899 5900 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5901 "%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n", 5902 __func__, 5903 ni->ni_macaddr, 5904 ":", 5905 dialogtoken, baparamset, batimeout); 5906 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5907 "%s: txa_start=%d, ni_txseqs=%d\n", 5908 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5909 5910 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 5911 batimeout); 5912 } 5913 5914 /* 5915 * Handle an ADDBA response. 5916 * 5917 * We unpause the queue so TX'ing can resume. 5918 * 5919 * Any packets TX'ed from this point should be "aggregate" (whether 5920 * aggregate or not) so the BAW is updated. 5921 * 5922 * Note! net80211 keeps self-assigning sequence numbers until 5923 * ampdu is negotiated. This means the initially-negotiated BAW left 5924 * edge won't match the ni->ni_txseq. 5925 * 5926 * So, being very dirty, the BAW left edge is "slid" here to match 5927 * ni->ni_txseq. 5928 * 5929 * What likely SHOULD happen is that all packets subsequent to the 5930 * addba request should be tagged as aggregate and queued as non-aggregate 5931 * frames; thus updating the BAW. For now though, I'll just slide the 5932 * window. 5933 */ 5934 int 5935 ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5936 int status, int code, int batimeout) 5937 { 5938 struct ath_softc *sc = ni->ni_ic->ic_softc; 5939 int tid = tap->txa_tid; 5940 struct ath_node *an = ATH_NODE(ni); 5941 struct ath_tid *atid = &an->an_tid[tid]; 5942 int r; 5943 5944 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5945 "%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__, 5946 ni->ni_macaddr, 5947 ":", 5948 status, code, batimeout); 5949 5950 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5951 "%s: txa_start=%d, ni_txseqs=%d\n", 5952 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5953 5954 /* 5955 * Call this first, so the interface flags get updated 5956 * before the TID is unpaused. Otherwise a race condition 5957 * exists where the unpaused TID still doesn't yet have 5958 * IEEE80211_AGGR_RUNNING set. 5959 */ 5960 r = sc->sc_addba_response(ni, tap, status, code, batimeout); 5961 5962 ATH_TX_LOCK(sc); 5963 atid->addba_tx_pending = 0; 5964 /* 5965 * XXX dirty! 5966 * Slide the BAW left edge to wherever net80211 left it for us. 5967 * Read above for more information. 5968 */ 5969 tap->txa_start = ni->ni_txseqs[tid]; 5970 ath_tx_tid_resume(sc, atid); 5971 ATH_TX_UNLOCK(sc); 5972 return r; 5973 } 5974 5975 5976 /* 5977 * Stop ADDBA on a queue. 5978 * 5979 * This can be called whilst BAR TX is currently active on the queue, 5980 * so make sure this is unblocked before continuing. 5981 */ 5982 void 5983 ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 5984 { 5985 struct ath_softc *sc = ni->ni_ic->ic_softc; 5986 int tid = tap->txa_tid; 5987 struct ath_node *an = ATH_NODE(ni); 5988 struct ath_tid *atid = &an->an_tid[tid]; 5989 ath_bufhead bf_cq; 5990 struct ath_buf *bf; 5991 5992 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n", 5993 __func__, 5994 ni->ni_macaddr, 5995 ":"); 5996 5997 /* 5998 * Pause TID traffic early, so there aren't any races 5999 * Unblock the pending BAR held traffic, if it's currently paused. 6000 */ 6001 ATH_TX_LOCK(sc); 6002 ath_tx_tid_pause(sc, atid); 6003 if (atid->bar_wait) { 6004 /* 6005 * bar_unsuspend() expects bar_tx == 1, as it should be 6006 * called from the TX completion path. This quietens 6007 * the warning. It's cleared for us anyway. 6008 */ 6009 atid->bar_tx = 1; 6010 ath_tx_tid_bar_unsuspend(sc, atid); 6011 } 6012 ATH_TX_UNLOCK(sc); 6013 6014 /* There's no need to hold the TXQ lock here */ 6015 sc->sc_addba_stop(ni, tap); 6016 6017 /* 6018 * ath_tx_tid_cleanup will resume the TID if possible, otherwise 6019 * it'll set the cleanup flag, and it'll be unpaused once 6020 * things have been cleaned up. 6021 */ 6022 TAILQ_INIT(&bf_cq); 6023 ATH_TX_LOCK(sc); 6024 6025 /* 6026 * In case there's a followup call to this, only call it 6027 * if we don't have a cleanup in progress. 6028 * 6029 * Since we've paused the queue above, we need to make 6030 * sure we unpause if there's already a cleanup in 6031 * progress - it means something else is also doing 6032 * this stuff, so we don't need to also keep it paused. 6033 */ 6034 if (atid->cleanup_inprogress) { 6035 ath_tx_tid_resume(sc, atid); 6036 } else { 6037 ath_tx_tid_cleanup(sc, an, tid, &bf_cq); 6038 /* 6039 * Unpause the TID if no cleanup is required. 6040 */ 6041 if (! atid->cleanup_inprogress) 6042 ath_tx_tid_resume(sc, atid); 6043 } 6044 ATH_TX_UNLOCK(sc); 6045 6046 /* Handle completing frames and fail them */ 6047 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 6048 TAILQ_REMOVE(&bf_cq, bf, bf_list); 6049 ath_tx_default_comp(sc, bf, 1); 6050 } 6051 6052 } 6053 6054 /* 6055 * Handle a node reassociation. 6056 * 6057 * We may have a bunch of frames queued to the hardware; those need 6058 * to be marked as cleanup. 6059 */ 6060 void 6061 ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an) 6062 { 6063 struct ath_tid *tid; 6064 int i; 6065 ath_bufhead bf_cq; 6066 struct ath_buf *bf; 6067 6068 TAILQ_INIT(&bf_cq); 6069 6070 ATH_TX_UNLOCK_ASSERT(sc); 6071 6072 ATH_TX_LOCK(sc); 6073 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 6074 tid = &an->an_tid[i]; 6075 if (tid->hwq_depth == 0) 6076 continue; 6077 DPRINTF(sc, ATH_DEBUG_NODE, 6078 "%s: %6D: TID %d: cleaning up TID\n", 6079 __func__, 6080 an->an_node.ni_macaddr, 6081 ":", 6082 i); 6083 /* 6084 * In case there's a followup call to this, only call it 6085 * if we don't have a cleanup in progress. 6086 */ 6087 if (! tid->cleanup_inprogress) { 6088 ath_tx_tid_pause(sc, tid); 6089 ath_tx_tid_cleanup(sc, an, i, &bf_cq); 6090 /* 6091 * Unpause the TID if no cleanup is required. 6092 */ 6093 if (! tid->cleanup_inprogress) 6094 ath_tx_tid_resume(sc, tid); 6095 } 6096 } 6097 ATH_TX_UNLOCK(sc); 6098 6099 /* Handle completing frames and fail them */ 6100 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 6101 TAILQ_REMOVE(&bf_cq, bf, bf_list); 6102 ath_tx_default_comp(sc, bf, 1); 6103 } 6104 } 6105 6106 /* 6107 * Note: net80211 bar_timeout() doesn't call this function on BAR failure; 6108 * it simply tears down the aggregation session. Ew. 6109 * 6110 * It however will call ieee80211_ampdu_stop() which will call 6111 * ic->ic_addba_stop(). 6112 * 6113 * XXX This uses a hard-coded max BAR count value; the whole 6114 * XXX BAR TX success or failure should be better handled! 6115 */ 6116 void 6117 ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 6118 int status) 6119 { 6120 struct ath_softc *sc = ni->ni_ic->ic_softc; 6121 int tid = tap->txa_tid; 6122 struct ath_node *an = ATH_NODE(ni); 6123 struct ath_tid *atid = &an->an_tid[tid]; 6124 int attempts = tap->txa_attempts; 6125 int old_txa_start; 6126 6127 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 6128 "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n", 6129 __func__, 6130 ni->ni_macaddr, 6131 ":", 6132 tap->txa_tid, 6133 atid->tid, 6134 status, 6135 attempts, 6136 tap->txa_start, 6137 tap->txa_seqpending); 6138 6139 /* Note: This may update the BAW details */ 6140 /* 6141 * XXX What if this does slide the BAW along? We need to somehow 6142 * XXX either fix things when it does happen, or prevent the 6143 * XXX seqpending value to be anything other than exactly what 6144 * XXX the hell we want! 6145 * 6146 * XXX So for now, how I do this inside the TX lock for now 6147 * XXX and just correct it afterwards? The below condition should 6148 * XXX never happen and if it does I need to fix all kinds of things. 6149 */ 6150 ATH_TX_LOCK(sc); 6151 old_txa_start = tap->txa_start; 6152 sc->sc_bar_response(ni, tap, status); 6153 if (tap->txa_start != old_txa_start) { 6154 device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n", 6155 __func__, 6156 tid, 6157 tap->txa_start, 6158 old_txa_start); 6159 } 6160 tap->txa_start = old_txa_start; 6161 ATH_TX_UNLOCK(sc); 6162 6163 /* Unpause the TID */ 6164 /* 6165 * XXX if this is attempt=50, the TID will be downgraded 6166 * XXX to a non-aggregate session. So we must unpause the 6167 * XXX TID here or it'll never be done. 6168 * 6169 * Also, don't call it if bar_tx/bar_wait are 0; something 6170 * has beaten us to the punch? (XXX figure out what?) 6171 */ 6172 if (status == 0 || attempts == 50) { 6173 ATH_TX_LOCK(sc); 6174 if (atid->bar_tx == 0 || atid->bar_wait == 0) 6175 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 6176 "%s: huh? bar_tx=%d, bar_wait=%d\n", 6177 __func__, 6178 atid->bar_tx, atid->bar_wait); 6179 else 6180 ath_tx_tid_bar_unsuspend(sc, atid); 6181 ATH_TX_UNLOCK(sc); 6182 } 6183 } 6184 6185 /* 6186 * This is called whenever the pending ADDBA request times out. 6187 * Unpause and reschedule the TID. 6188 */ 6189 void 6190 ath_addba_response_timeout(struct ieee80211_node *ni, 6191 struct ieee80211_tx_ampdu *tap) 6192 { 6193 struct ath_softc *sc = ni->ni_ic->ic_softc; 6194 int tid = tap->txa_tid; 6195 struct ath_node *an = ATH_NODE(ni); 6196 struct ath_tid *atid = &an->an_tid[tid]; 6197 6198 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 6199 "%s: %6D: TID=%d, called; resuming\n", 6200 __func__, 6201 ni->ni_macaddr, 6202 ":", 6203 tid); 6204 6205 ATH_TX_LOCK(sc); 6206 atid->addba_tx_pending = 0; 6207 ATH_TX_UNLOCK(sc); 6208 6209 /* Note: This updates the aggregate state to (again) pending */ 6210 sc->sc_addba_response_timeout(ni, tap); 6211 6212 /* Unpause the TID; which reschedules it */ 6213 ATH_TX_LOCK(sc); 6214 ath_tx_tid_resume(sc, atid); 6215 ATH_TX_UNLOCK(sc); 6216 } 6217 6218 /* 6219 * Check if a node is asleep or not. 6220 */ 6221 int 6222 ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an) 6223 { 6224 6225 ATH_TX_LOCK_ASSERT(sc); 6226 6227 return (an->an_is_powersave); 6228 } 6229 6230 /* 6231 * Mark a node as currently "in powersaving." 6232 * This suspends all traffic on the node. 6233 * 6234 * This must be called with the node/tx locks free. 6235 * 6236 * XXX TODO: the locking silliness below is due to how the node 6237 * locking currently works. Right now, the node lock is grabbed 6238 * to do rate control lookups and these are done with the TX 6239 * queue lock held. This means the node lock can't be grabbed 6240 * first here or a LOR will occur. 6241 * 6242 * Eventually (hopefully!) the TX path code will only grab 6243 * the TXQ lock when transmitting and the ath_node lock when 6244 * doing node/TID operations. There are other complications - 6245 * the sched/unsched operations involve walking the per-txq 6246 * 'active tid' list and this requires both locks to be held. 6247 */ 6248 void 6249 ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an) 6250 { 6251 struct ath_tid *atid; 6252 struct ath_txq *txq; 6253 int tid; 6254 6255 ATH_TX_UNLOCK_ASSERT(sc); 6256 6257 /* Suspend all traffic on the node */ 6258 ATH_TX_LOCK(sc); 6259 6260 if (an->an_is_powersave) { 6261 DPRINTF(sc, ATH_DEBUG_XMIT, 6262 "%s: %6D: node was already asleep!\n", 6263 __func__, an->an_node.ni_macaddr, ":"); 6264 ATH_TX_UNLOCK(sc); 6265 return; 6266 } 6267 6268 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 6269 atid = &an->an_tid[tid]; 6270 txq = sc->sc_ac2q[atid->ac]; 6271 6272 ath_tx_tid_pause(sc, atid); 6273 } 6274 6275 /* Mark node as in powersaving */ 6276 an->an_is_powersave = 1; 6277 6278 ATH_TX_UNLOCK(sc); 6279 } 6280 6281 /* 6282 * Mark a node as currently "awake." 6283 * This resumes all traffic to the node. 6284 */ 6285 void 6286 ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an) 6287 { 6288 struct ath_tid *atid; 6289 struct ath_txq *txq; 6290 int tid; 6291 6292 ATH_TX_UNLOCK_ASSERT(sc); 6293 6294 ATH_TX_LOCK(sc); 6295 6296 /* !? */ 6297 if (an->an_is_powersave == 0) { 6298 ATH_TX_UNLOCK(sc); 6299 DPRINTF(sc, ATH_DEBUG_XMIT, 6300 "%s: an=%p: node was already awake\n", 6301 __func__, an); 6302 return; 6303 } 6304 6305 /* Mark node as awake */ 6306 an->an_is_powersave = 0; 6307 /* 6308 * Clear any pending leaked frame requests 6309 */ 6310 an->an_leak_count = 0; 6311 6312 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 6313 atid = &an->an_tid[tid]; 6314 txq = sc->sc_ac2q[atid->ac]; 6315 6316 ath_tx_tid_resume(sc, atid); 6317 } 6318 ATH_TX_UNLOCK(sc); 6319 } 6320 6321 static int 6322 ath_legacy_dma_txsetup(struct ath_softc *sc) 6323 { 6324 6325 /* nothing new needed */ 6326 return (0); 6327 } 6328 6329 static int 6330 ath_legacy_dma_txteardown(struct ath_softc *sc) 6331 { 6332 6333 /* nothing new needed */ 6334 return (0); 6335 } 6336 6337 void 6338 ath_xmit_setup_legacy(struct ath_softc *sc) 6339 { 6340 /* 6341 * For now, just set the descriptor length to sizeof(ath_desc); 6342 * worry about extracting the real length out of the HAL later. 6343 */ 6344 sc->sc_tx_desclen = sizeof(struct ath_desc); 6345 sc->sc_tx_statuslen = sizeof(struct ath_desc); 6346 sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */ 6347 6348 sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup; 6349 sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown; 6350 sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func; 6351 6352 sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart; 6353 sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff; 6354 6355 sc->sc_tx.xmit_drain = ath_legacy_tx_drain; 6356 } 6357