1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 5 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 15 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 16 * redistribution must be conditioned upon including a substantially 17 * similar Disclaimer requirement for further binary redistribution. 18 * 19 * NO WARRANTY 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 23 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 24 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 28 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGES. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* 37 * Driver for the Atheros Wireless LAN controller. 38 * 39 * This software is derived from work of Atsushi Onoe; his contribution 40 * is greatly appreciated. 41 */ 42 43 #include "opt_inet.h" 44 #include "opt_ath.h" 45 #include "opt_wlan.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/sysctl.h> 50 #include <sys/mbuf.h> 51 #include <sys/malloc.h> 52 #include <sys/lock.h> 53 #include <sys/mutex.h> 54 #include <sys/kernel.h> 55 #include <sys/socket.h> 56 #include <sys/sockio.h> 57 #include <sys/errno.h> 58 #include <sys/callout.h> 59 #include <sys/bus.h> 60 #include <sys/endian.h> 61 #include <sys/kthread.h> 62 #include <sys/taskqueue.h> 63 #include <sys/priv.h> 64 #include <sys/ktr.h> 65 66 #include <machine/bus.h> 67 68 #include <net/if.h> 69 #include <net/if_var.h> 70 #include <net/if_dl.h> 71 #include <net/if_media.h> 72 #include <net/if_types.h> 73 #include <net/if_arp.h> 74 #include <net/ethernet.h> 75 #include <net/if_llc.h> 76 77 #include <net80211/ieee80211_var.h> 78 #include <net80211/ieee80211_regdomain.h> 79 #ifdef IEEE80211_SUPPORT_SUPERG 80 #include <net80211/ieee80211_superg.h> 81 #endif 82 #ifdef IEEE80211_SUPPORT_TDMA 83 #include <net80211/ieee80211_tdma.h> 84 #endif 85 #include <net80211/ieee80211_ht.h> 86 87 #include <net/bpf.h> 88 89 #ifdef INET 90 #include <netinet/in.h> 91 #include <netinet/if_ether.h> 92 #endif 93 94 #include <dev/ath/if_athvar.h> 95 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 96 #include <dev/ath/ath_hal/ah_diagcodes.h> 97 98 #include <dev/ath/if_ath_debug.h> 99 100 #ifdef ATH_TX99_DIAG 101 #include <dev/ath/ath_tx99/ath_tx99.h> 102 #endif 103 104 #include <dev/ath/if_ath_misc.h> 105 #include <dev/ath/if_ath_tx.h> 106 #include <dev/ath/if_ath_tx_ht.h> 107 108 #ifdef ATH_DEBUG_ALQ 109 #include <dev/ath/if_ath_alq.h> 110 #endif 111 112 /* 113 * How many retries to perform in software 114 */ 115 #define SWMAX_RETRIES 10 116 117 /* 118 * What queue to throw the non-QoS TID traffic into 119 */ 120 #define ATH_NONQOS_TID_AC WME_AC_VO 121 122 #if 0 123 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an); 124 #endif 125 static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, 126 int tid); 127 static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, 128 int tid); 129 static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc, 130 struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0); 131 static int ath_tx_action_frame_override_queue(struct ath_softc *sc, 132 struct ieee80211_node *ni, struct mbuf *m0, int *tid); 133 static struct ath_buf * 134 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 135 struct ath_tid *tid, struct ath_buf *bf); 136 137 #ifdef ATH_DEBUG_ALQ 138 void 139 ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first) 140 { 141 struct ath_buf *bf; 142 int i, n; 143 const char *ds; 144 145 /* XXX we should skip out early if debugging isn't enabled! */ 146 bf = bf_first; 147 148 while (bf != NULL) { 149 /* XXX should ensure bf_nseg > 0! */ 150 if (bf->bf_nseg == 0) 151 break; 152 n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1; 153 for (i = 0, ds = (const char *) bf->bf_desc; 154 i < n; 155 i++, ds += sc->sc_tx_desclen) { 156 if_ath_alq_post(&sc->sc_alq, 157 ATH_ALQ_EDMA_TXDESC, 158 sc->sc_tx_desclen, 159 ds); 160 } 161 bf = bf->bf_next; 162 } 163 } 164 #endif /* ATH_DEBUG_ALQ */ 165 166 /* 167 * Whether to use the 11n rate scenario functions or not 168 */ 169 static inline int 170 ath_tx_is_11n(struct ath_softc *sc) 171 { 172 return ((sc->sc_ah->ah_magic == 0x20065416) || 173 (sc->sc_ah->ah_magic == 0x19741014)); 174 } 175 176 /* 177 * Obtain the current TID from the given frame. 178 * 179 * Non-QoS frames get mapped to a TID so frames consistently 180 * go on a sensible queue. 181 */ 182 static int 183 ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0) 184 { 185 const struct ieee80211_frame *wh; 186 187 wh = mtod(m0, const struct ieee80211_frame *); 188 189 /* Non-QoS: map frame to a TID queue for software queueing */ 190 if (! IEEE80211_QOS_HAS_SEQ(wh)) 191 return (WME_AC_TO_TID(M_WME_GETAC(m0))); 192 193 /* QoS - fetch the TID from the header, ignore mbuf WME */ 194 return (ieee80211_gettid(wh)); 195 } 196 197 static void 198 ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) 199 { 200 struct ieee80211_frame *wh; 201 202 wh = mtod(bf->bf_m, struct ieee80211_frame *); 203 /* Only update/resync if needed */ 204 if (bf->bf_state.bfs_isretried == 0) { 205 wh->i_fc[1] |= IEEE80211_FC1_RETRY; 206 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 207 BUS_DMASYNC_PREWRITE); 208 } 209 bf->bf_state.bfs_isretried = 1; 210 bf->bf_state.bfs_retries ++; 211 } 212 213 /* 214 * Determine what the correct AC queue for the given frame 215 * should be. 216 * 217 * For QoS frames, obey the TID. That way things like 218 * management frames that are related to a given TID 219 * are thus serialised with the rest of the TID traffic, 220 * regardless of net80211 overriding priority. 221 * 222 * For non-QoS frames, return the mbuf WMI priority. 223 * 224 * This has implications that higher priority non-QoS traffic 225 * may end up being scheduled before other non-QoS traffic, 226 * leading to out-of-sequence packets being emitted. 227 * 228 * (It'd be nice to log/count this so we can see if it 229 * really is a problem.) 230 * 231 * TODO: maybe we should throw multicast traffic, QoS or 232 * otherwise, into a separate TX queue? 233 */ 234 static int 235 ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0) 236 { 237 const struct ieee80211_frame *wh; 238 239 wh = mtod(m0, const struct ieee80211_frame *); 240 241 /* 242 * QoS data frame (sequence number or otherwise) - 243 * return hardware queue mapping for the underlying 244 * TID. 245 */ 246 if (IEEE80211_QOS_HAS_SEQ(wh)) 247 return TID_TO_WME_AC(ieee80211_gettid(wh)); 248 249 /* 250 * Otherwise - return mbuf QoS pri. 251 */ 252 return (M_WME_GETAC(m0)); 253 } 254 255 void 256 ath_txfrag_cleanup(struct ath_softc *sc, 257 ath_bufhead *frags, struct ieee80211_node *ni) 258 { 259 struct ath_buf *bf, *next; 260 261 ATH_TXBUF_LOCK_ASSERT(sc); 262 263 TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) { 264 /* NB: bf assumed clean */ 265 TAILQ_REMOVE(frags, bf, bf_list); 266 ath_returnbuf_head(sc, bf); 267 ieee80211_node_decref(ni); 268 } 269 } 270 271 /* 272 * Setup xmit of a fragmented frame. Allocate a buffer 273 * for each frag and bump the node reference count to 274 * reflect the held reference to be setup by ath_tx_start. 275 */ 276 int 277 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, 278 struct mbuf *m0, struct ieee80211_node *ni) 279 { 280 struct mbuf *m; 281 struct ath_buf *bf; 282 283 ATH_TXBUF_LOCK(sc); 284 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { 285 /* XXX non-management? */ 286 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 287 if (bf == NULL) { /* out of buffers, cleanup */ 288 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n", 289 __func__); 290 ath_txfrag_cleanup(sc, frags, ni); 291 break; 292 } 293 ieee80211_node_incref(ni); 294 TAILQ_INSERT_TAIL(frags, bf, bf_list); 295 } 296 ATH_TXBUF_UNLOCK(sc); 297 298 return !TAILQ_EMPTY(frags); 299 } 300 301 static int 302 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) 303 { 304 struct mbuf *m; 305 int error; 306 307 /* 308 * Load the DMA map so any coalescing is done. This 309 * also calculates the number of descriptors we need. 310 */ 311 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 312 bf->bf_segs, &bf->bf_nseg, 313 BUS_DMA_NOWAIT); 314 if (error == EFBIG) { 315 /* XXX packet requires too many descriptors */ 316 bf->bf_nseg = ATH_MAX_SCATTER + 1; 317 } else if (error != 0) { 318 sc->sc_stats.ast_tx_busdma++; 319 ieee80211_free_mbuf(m0); 320 return error; 321 } 322 /* 323 * Discard null packets and check for packets that 324 * require too many TX descriptors. We try to convert 325 * the latter to a cluster. 326 */ 327 if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */ 328 sc->sc_stats.ast_tx_linear++; 329 m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER); 330 if (m == NULL) { 331 ieee80211_free_mbuf(m0); 332 sc->sc_stats.ast_tx_nombuf++; 333 return ENOMEM; 334 } 335 m0 = m; 336 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 337 bf->bf_segs, &bf->bf_nseg, 338 BUS_DMA_NOWAIT); 339 if (error != 0) { 340 sc->sc_stats.ast_tx_busdma++; 341 ieee80211_free_mbuf(m0); 342 return error; 343 } 344 KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER, 345 ("too many segments after defrag; nseg %u", bf->bf_nseg)); 346 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 347 sc->sc_stats.ast_tx_nodata++; 348 ieee80211_free_mbuf(m0); 349 return EIO; 350 } 351 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", 352 __func__, m0, m0->m_pkthdr.len); 353 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 354 bf->bf_m = m0; 355 356 return 0; 357 } 358 359 /* 360 * Chain together segments+descriptors for a frame - 11n or otherwise. 361 * 362 * For aggregates, this is called on each frame in the aggregate. 363 */ 364 static void 365 ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0, 366 struct ath_buf *bf, bool is_aggr, int is_first_subframe, 367 int is_last_subframe) 368 { 369 struct ath_hal *ah = sc->sc_ah; 370 char *ds; 371 int i, bp, dsp; 372 HAL_DMA_ADDR bufAddrList[4]; 373 uint32_t segLenList[4]; 374 int numTxMaps = 1; 375 int isFirstDesc = 1; 376 377 /* 378 * XXX There's txdma and txdma_mgmt; the descriptor 379 * sizes must match. 380 */ 381 struct ath_descdma *dd = &sc->sc_txdma; 382 383 /* 384 * Fillin the remainder of the descriptor info. 385 */ 386 387 /* 388 * We need the number of TX data pointers in each descriptor. 389 * EDMA and later chips support 4 TX buffers per descriptor; 390 * previous chips just support one. 391 */ 392 numTxMaps = sc->sc_tx_nmaps; 393 394 /* 395 * For EDMA and later chips ensure the TX map is fully populated 396 * before advancing to the next descriptor. 397 */ 398 ds = (char *) bf->bf_desc; 399 bp = dsp = 0; 400 bzero(bufAddrList, sizeof(bufAddrList)); 401 bzero(segLenList, sizeof(segLenList)); 402 for (i = 0; i < bf->bf_nseg; i++) { 403 bufAddrList[bp] = bf->bf_segs[i].ds_addr; 404 segLenList[bp] = bf->bf_segs[i].ds_len; 405 bp++; 406 407 /* 408 * Go to the next segment if this isn't the last segment 409 * and there's space in the current TX map. 410 */ 411 if ((i != bf->bf_nseg - 1) && (bp < numTxMaps)) 412 continue; 413 414 /* 415 * Last segment or we're out of buffer pointers. 416 */ 417 bp = 0; 418 419 if (i == bf->bf_nseg - 1) 420 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0); 421 else 422 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 423 bf->bf_daddr + dd->dd_descsize * (dsp + 1)); 424 425 /* 426 * XXX This assumes that bfs_txq is the actual destination 427 * hardware queue at this point. It may not have been 428 * assigned, it may actually be pointing to the multicast 429 * software TXQ id. These must be fixed! 430 */ 431 ath_hal_filltxdesc(ah, (struct ath_desc *) ds 432 , bufAddrList 433 , segLenList 434 , bf->bf_descid /* XXX desc id */ 435 , bf->bf_state.bfs_tx_queue 436 , isFirstDesc /* first segment */ 437 , i == bf->bf_nseg - 1 /* last segment */ 438 , (struct ath_desc *) ds0 /* first descriptor */ 439 ); 440 441 /* 442 * Make sure the 11n aggregate fields are cleared. 443 * 444 * XXX TODO: this doesn't need to be called for 445 * aggregate frames; as it'll be called on all 446 * sub-frames. Since the descriptors are in 447 * non-cacheable memory, this leads to some 448 * rather slow writes on MIPS/ARM platforms. 449 */ 450 if (ath_tx_is_11n(sc)) 451 ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds); 452 453 /* 454 * If 11n is enabled, set it up as if it's an aggregate 455 * frame. 456 */ 457 if (is_last_subframe) { 458 ath_hal_set11n_aggr_last(sc->sc_ah, 459 (struct ath_desc *) ds); 460 } else if (is_aggr) { 461 /* 462 * This clears the aggrlen field; so 463 * the caller needs to call set_aggr_first()! 464 * 465 * XXX TODO: don't call this for the first 466 * descriptor in the first frame in an 467 * aggregate! 468 */ 469 ath_hal_set11n_aggr_middle(sc->sc_ah, 470 (struct ath_desc *) ds, 471 bf->bf_state.bfs_ndelim); 472 } 473 isFirstDesc = 0; 474 bf->bf_lastds = (struct ath_desc *) ds; 475 476 /* 477 * Don't forget to skip to the next descriptor. 478 */ 479 ds += sc->sc_tx_desclen; 480 dsp++; 481 482 /* 483 * .. and don't forget to blank these out! 484 */ 485 bzero(bufAddrList, sizeof(bufAddrList)); 486 bzero(segLenList, sizeof(segLenList)); 487 } 488 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 489 } 490 491 /* 492 * Set the rate control fields in the given descriptor based on 493 * the bf_state fields and node state. 494 * 495 * The bfs fields should already be set with the relevant rate 496 * control information, including whether MRR is to be enabled. 497 * 498 * Since the FreeBSD HAL currently sets up the first TX rate 499 * in ath_hal_setuptxdesc(), this will setup the MRR 500 * conditionally for the pre-11n chips, and call ath_buf_set_rate 501 * unconditionally for 11n chips. These require the 11n rate 502 * scenario to be set if MCS rates are enabled, so it's easier 503 * to just always call it. The caller can then only set rates 2, 3 504 * and 4 if multi-rate retry is needed. 505 */ 506 static void 507 ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 508 struct ath_buf *bf) 509 { 510 struct ath_rc_series *rc = bf->bf_state.bfs_rc; 511 512 /* If mrr is disabled, blank tries 1, 2, 3 */ 513 if (! bf->bf_state.bfs_ismrr) 514 rc[1].tries = rc[2].tries = rc[3].tries = 0; 515 516 #if 0 517 /* 518 * If NOACK is set, just set ntries=1. 519 */ 520 else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) { 521 rc[1].tries = rc[2].tries = rc[3].tries = 0; 522 rc[0].tries = 1; 523 } 524 #endif 525 526 /* 527 * Always call - that way a retried descriptor will 528 * have the MRR fields overwritten. 529 * 530 * XXX TODO: see if this is really needed - setting up 531 * the first descriptor should set the MRR fields to 0 532 * for us anyway. 533 */ 534 if (ath_tx_is_11n(sc)) { 535 ath_buf_set_rate(sc, ni, bf); 536 } else { 537 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc 538 , rc[1].ratecode, rc[1].tries 539 , rc[2].ratecode, rc[2].tries 540 , rc[3].ratecode, rc[3].tries 541 ); 542 } 543 } 544 545 /* 546 * Setup segments+descriptors for an 11n aggregate. 547 * bf_first is the first buffer in the aggregate. 548 * The descriptor list must already been linked together using 549 * bf->bf_next. 550 */ 551 static void 552 ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first) 553 { 554 struct ath_buf *bf, *bf_prev = NULL; 555 struct ath_desc *ds0 = bf_first->bf_desc; 556 557 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n", 558 __func__, bf_first->bf_state.bfs_nframes, 559 bf_first->bf_state.bfs_al); 560 561 bf = bf_first; 562 563 if (bf->bf_state.bfs_txrate0 == 0) 564 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n", 565 __func__, bf, 0); 566 if (bf->bf_state.bfs_rc[0].ratecode == 0) 567 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n", 568 __func__, bf, 0); 569 570 /* 571 * Setup all descriptors of all subframes - this will 572 * call ath_hal_set11naggrmiddle() on every frame. 573 */ 574 while (bf != NULL) { 575 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 576 "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n", 577 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen, 578 SEQNO(bf->bf_state.bfs_seqno)); 579 580 /* 581 * Setup the initial fields for the first descriptor - all 582 * the non-11n specific stuff. 583 */ 584 ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc 585 , bf->bf_state.bfs_pktlen /* packet length */ 586 , bf->bf_state.bfs_hdrlen /* header length */ 587 , bf->bf_state.bfs_atype /* Atheros packet type */ 588 , bf->bf_state.bfs_txpower /* txpower */ 589 , bf->bf_state.bfs_txrate0 590 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 591 , bf->bf_state.bfs_keyix /* key cache index */ 592 , bf->bf_state.bfs_txantenna /* antenna mode */ 593 , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */ 594 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 595 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 596 ); 597 598 /* 599 * First descriptor? Setup the rate control and initial 600 * aggregate header information. 601 */ 602 if (bf == bf_first) { 603 /* 604 * setup first desc with rate and aggr info 605 */ 606 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 607 } 608 609 /* 610 * Setup the descriptors for a multi-descriptor frame. 611 * This is both aggregate and non-aggregate aware. 612 */ 613 ath_tx_chaindesclist(sc, ds0, bf, 614 1, /* is_aggr */ 615 !! (bf == bf_first), /* is_first_subframe */ 616 !! (bf->bf_next == NULL) /* is_last_subframe */ 617 ); 618 619 if (bf == bf_first) { 620 /* 621 * Initialise the first 11n aggregate with the 622 * aggregate length and aggregate enable bits. 623 */ 624 ath_hal_set11n_aggr_first(sc->sc_ah, 625 ds0, 626 bf->bf_state.bfs_al, 627 bf->bf_state.bfs_ndelim); 628 } 629 630 /* 631 * Link the last descriptor of the previous frame 632 * to the beginning descriptor of this frame. 633 */ 634 if (bf_prev != NULL) 635 ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds, 636 bf->bf_daddr); 637 638 /* Save a copy so we can link the next descriptor in */ 639 bf_prev = bf; 640 bf = bf->bf_next; 641 } 642 643 /* 644 * Set the first descriptor bf_lastds field to point to 645 * the last descriptor in the last subframe, that's where 646 * the status update will occur. 647 */ 648 bf_first->bf_lastds = bf_prev->bf_lastds; 649 650 /* 651 * And bf_last in the first descriptor points to the end of 652 * the aggregate list. 653 */ 654 bf_first->bf_last = bf_prev; 655 656 /* 657 * For non-AR9300 NICs, which require the rate control 658 * in the final descriptor - let's set that up now. 659 * 660 * This is because the filltxdesc() HAL call doesn't 661 * populate the last segment with rate control information 662 * if firstSeg is also true. For non-aggregate frames 663 * that is fine, as the first frame already has rate control 664 * info. But if the last frame in an aggregate has one 665 * descriptor, both firstseg and lastseg will be true and 666 * the rate info isn't copied. 667 * 668 * This is inefficient on MIPS/ARM platforms that have 669 * non-cachable memory for TX descriptors, but we'll just 670 * make do for now. 671 * 672 * As to why the rate table is stashed in the last descriptor 673 * rather than the first descriptor? Because proctxdesc() 674 * is called on the final descriptor in an MPDU or A-MPDU - 675 * ie, the one that gets updated by the hardware upon 676 * completion. That way proctxdesc() doesn't need to know 677 * about the first _and_ last TX descriptor. 678 */ 679 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0); 680 681 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__); 682 } 683 684 /* 685 * Hand-off a frame to the multicast TX queue. 686 * 687 * This is a software TXQ which will be appended to the CAB queue 688 * during the beacon setup code. 689 * 690 * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID 691 * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated 692 * with the actual hardware txq, or all of this will fall apart. 693 * 694 * XXX It may not be a bad idea to just stuff the QCU ID into bf_state 695 * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated 696 * correctly. 697 */ 698 static void 699 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, 700 struct ath_buf *bf) 701 { 702 ATH_TX_LOCK_ASSERT(sc); 703 704 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 705 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 706 707 /* 708 * Ensure that the tx queue is the cabq, so things get 709 * mapped correctly. 710 */ 711 if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) { 712 DPRINTF(sc, ATH_DEBUG_XMIT, 713 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", 714 __func__, bf, bf->bf_state.bfs_tx_queue, 715 txq->axq_qnum); 716 } 717 718 ATH_TXQ_LOCK(txq); 719 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) { 720 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s); 721 struct ieee80211_frame *wh; 722 723 /* mark previous frame */ 724 wh = mtod(bf_last->bf_m, struct ieee80211_frame *); 725 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 726 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, 727 BUS_DMASYNC_PREWRITE); 728 729 /* link descriptor */ 730 ath_hal_settxdesclink(sc->sc_ah, 731 bf_last->bf_lastds, 732 bf->bf_daddr); 733 } 734 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 735 ATH_TXQ_UNLOCK(txq); 736 } 737 738 /* 739 * Hand-off packet to a hardware queue. 740 */ 741 static void 742 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, 743 struct ath_buf *bf) 744 { 745 struct ath_hal *ah = sc->sc_ah; 746 struct ath_buf *bf_first; 747 748 /* 749 * Insert the frame on the outbound list and pass it on 750 * to the hardware. Multicast frames buffered for power 751 * save stations and transmit from the CAB queue are stored 752 * on a s/w only queue and loaded on to the CAB queue in 753 * the SWBA handler since frames only go out on DTIM and 754 * to avoid possible races. 755 */ 756 ATH_TX_LOCK_ASSERT(sc); 757 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 758 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 759 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ, 760 ("ath_tx_handoff_hw called for mcast queue")); 761 762 /* 763 * XXX We should instead just verify that sc_txstart_cnt 764 * or ath_txproc_cnt > 0. That would mean that 765 * the reset is going to be waiting for us to complete. 766 */ 767 if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) { 768 device_printf(sc->sc_dev, 769 "%s: TX dispatch without holding txcount/txstart refcnt!\n", 770 __func__); 771 } 772 773 /* 774 * XXX .. this is going to cause the hardware to get upset; 775 * so we really should find some way to drop or queue 776 * things. 777 */ 778 779 ATH_TXQ_LOCK(txq); 780 781 /* 782 * XXX TODO: if there's a holdingbf, then 783 * ATH_TXQ_PUTRUNNING should be clear. 784 * 785 * If there is a holdingbf and the list is empty, 786 * then axq_link should be pointing to the holdingbf. 787 * 788 * Otherwise it should point to the last descriptor 789 * in the last ath_buf. 790 * 791 * In any case, we should really ensure that we 792 * update the previous descriptor link pointer to 793 * this descriptor, regardless of all of the above state. 794 * 795 * For now this is captured by having axq_link point 796 * to either the holdingbf (if the TXQ list is empty) 797 * or the end of the list (if the TXQ list isn't empty.) 798 * I'd rather just kill axq_link here and do it as above. 799 */ 800 801 /* 802 * Append the frame to the TX queue. 803 */ 804 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 805 ATH_KTR(sc, ATH_KTR_TX, 3, 806 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p " 807 "depth=%d", 808 txq->axq_qnum, 809 bf, 810 txq->axq_depth); 811 812 /* 813 * If there's a link pointer, update it. 814 * 815 * XXX we should replace this with the above logic, just 816 * to kill axq_link with fire. 817 */ 818 if (txq->axq_link != NULL) { 819 *txq->axq_link = bf->bf_daddr; 820 DPRINTF(sc, ATH_DEBUG_XMIT, 821 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 822 txq->axq_qnum, txq->axq_link, 823 (caddr_t)bf->bf_daddr, bf->bf_desc, 824 txq->axq_depth); 825 ATH_KTR(sc, ATH_KTR_TX, 5, 826 "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) " 827 "lastds=%d", 828 txq->axq_qnum, txq->axq_link, 829 (caddr_t)bf->bf_daddr, bf->bf_desc, 830 bf->bf_lastds); 831 } 832 833 /* 834 * If we've not pushed anything into the hardware yet, 835 * push the head of the queue into the TxDP. 836 * 837 * Once we've started DMA, there's no guarantee that 838 * updating the TxDP with a new value will actually work. 839 * So we just don't do that - if we hit the end of the list, 840 * we keep that buffer around (the "holding buffer") and 841 * re-start DMA by updating the link pointer of _that_ 842 * descriptor and then restart DMA. 843 */ 844 if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) { 845 bf_first = TAILQ_FIRST(&txq->axq_q); 846 txq->axq_flags |= ATH_TXQ_PUTRUNNING; 847 ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr); 848 DPRINTF(sc, ATH_DEBUG_XMIT, 849 "%s: TXDP[%u] = %p (%p) depth %d\n", 850 __func__, txq->axq_qnum, 851 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, 852 txq->axq_depth); 853 ATH_KTR(sc, ATH_KTR_TX, 5, 854 "ath_tx_handoff: TXDP[%u] = %p (%p) " 855 "lastds=%p depth %d", 856 txq->axq_qnum, 857 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, 858 bf_first->bf_lastds, 859 txq->axq_depth); 860 } 861 862 /* 863 * Ensure that the bf TXQ matches this TXQ, so later 864 * checking and holding buffer manipulation is sane. 865 */ 866 if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) { 867 DPRINTF(sc, ATH_DEBUG_XMIT, 868 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", 869 __func__, bf, bf->bf_state.bfs_tx_queue, 870 txq->axq_qnum); 871 } 872 873 /* 874 * Track aggregate queue depth. 875 */ 876 if (bf->bf_state.bfs_aggr) 877 txq->axq_aggr_depth++; 878 879 /* 880 * Update the link pointer. 881 */ 882 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link); 883 884 /* 885 * Start DMA. 886 * 887 * If we wrote a TxDP above, DMA will start from here. 888 * 889 * If DMA is running, it'll do nothing. 890 * 891 * If the DMA engine hit the end of the QCU list (ie LINK=NULL, 892 * or VEOL) then it stops at the last transmitted write. 893 * We then append a new frame by updating the link pointer 894 * in that descriptor and then kick TxE here; it will re-read 895 * that last descriptor and find the new descriptor to transmit. 896 * 897 * This is why we keep the holding descriptor around. 898 */ 899 ath_hal_txstart(ah, txq->axq_qnum); 900 ATH_TXQ_UNLOCK(txq); 901 ATH_KTR(sc, ATH_KTR_TX, 1, 902 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum); 903 } 904 905 /* 906 * Restart TX DMA for the given TXQ. 907 * 908 * This must be called whether the queue is empty or not. 909 */ 910 static void 911 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq) 912 { 913 struct ath_buf *bf, *bf_last; 914 915 ATH_TXQ_LOCK_ASSERT(txq); 916 917 /* XXX make this ATH_TXQ_FIRST */ 918 bf = TAILQ_FIRST(&txq->axq_q); 919 bf_last = ATH_TXQ_LAST(txq, axq_q_s); 920 921 if (bf == NULL) 922 return; 923 924 DPRINTF(sc, ATH_DEBUG_RESET, 925 "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n", 926 __func__, 927 txq->axq_qnum, 928 bf, 929 bf_last, 930 (uint32_t) bf->bf_daddr); 931 932 #ifdef ATH_DEBUG 933 if (sc->sc_debug & ATH_DEBUG_RESET) 934 ath_tx_dump(sc, txq); 935 #endif 936 937 /* 938 * This is called from a restart, so DMA is known to be 939 * completely stopped. 940 */ 941 KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)), 942 ("%s: Q%d: called with PUTRUNNING=1\n", 943 __func__, 944 txq->axq_qnum)); 945 946 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); 947 txq->axq_flags |= ATH_TXQ_PUTRUNNING; 948 949 ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds, 950 &txq->axq_link); 951 ath_hal_txstart(sc->sc_ah, txq->axq_qnum); 952 } 953 954 /* 955 * Hand off a packet to the hardware (or mcast queue.) 956 * 957 * The relevant hardware txq should be locked. 958 */ 959 static void 960 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, 961 struct ath_buf *bf) 962 { 963 ATH_TX_LOCK_ASSERT(sc); 964 965 #ifdef ATH_DEBUG_ALQ 966 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 967 ath_tx_alq_post(sc, bf); 968 #endif 969 970 if (txq->axq_qnum == ATH_TXQ_SWQ) 971 ath_tx_handoff_mcast(sc, txq, bf); 972 else 973 ath_tx_handoff_hw(sc, txq, bf); 974 } 975 976 static int 977 ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni, 978 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, 979 int *keyix) 980 { 981 DPRINTF(sc, ATH_DEBUG_XMIT, 982 "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n", 983 __func__, 984 *hdrlen, 985 *pktlen, 986 isfrag, 987 iswep, 988 m0); 989 990 if (iswep) { 991 const struct ieee80211_cipher *cip; 992 struct ieee80211_key *k; 993 994 /* 995 * Construct the 802.11 header+trailer for an encrypted 996 * frame. The only reason this can fail is because of an 997 * unknown or unsupported cipher/key type. 998 */ 999 k = ieee80211_crypto_encap(ni, m0); 1000 if (k == NULL) { 1001 /* 1002 * This can happen when the key is yanked after the 1003 * frame was queued. Just discard the frame; the 1004 * 802.11 layer counts failures and provides 1005 * debugging/diagnostics. 1006 */ 1007 return (0); 1008 } 1009 /* 1010 * Adjust the packet + header lengths for the crypto 1011 * additions and calculate the h/w key index. When 1012 * a s/w mic is done the frame will have had any mic 1013 * added to it prior to entry so m0->m_pkthdr.len will 1014 * account for it. Otherwise we need to add it to the 1015 * packet length. 1016 */ 1017 cip = k->wk_cipher; 1018 (*hdrlen) += cip->ic_header; 1019 (*pktlen) += cip->ic_header + cip->ic_trailer; 1020 /* NB: frags always have any TKIP MIC done in s/w */ 1021 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) 1022 (*pktlen) += cip->ic_miclen; 1023 (*keyix) = k->wk_keyix; 1024 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { 1025 /* 1026 * Use station key cache slot, if assigned. 1027 */ 1028 (*keyix) = ni->ni_ucastkey.wk_keyix; 1029 if ((*keyix) == IEEE80211_KEYIX_NONE) 1030 (*keyix) = HAL_TXKEYIX_INVALID; 1031 } else 1032 (*keyix) = HAL_TXKEYIX_INVALID; 1033 1034 return (1); 1035 } 1036 1037 /* 1038 * Calculate whether interoperability protection is required for 1039 * this frame. 1040 * 1041 * This requires the rate control information be filled in, 1042 * as the protection requirement depends upon the current 1043 * operating mode / PHY. 1044 */ 1045 static void 1046 ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf) 1047 { 1048 struct ieee80211_frame *wh; 1049 uint8_t rix; 1050 uint16_t flags; 1051 int shortPreamble; 1052 const HAL_RATE_TABLE *rt = sc->sc_currates; 1053 struct ieee80211com *ic = &sc->sc_ic; 1054 1055 flags = bf->bf_state.bfs_txflags; 1056 rix = bf->bf_state.bfs_rc[0].rix; 1057 shortPreamble = bf->bf_state.bfs_shpream; 1058 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1059 1060 /* Disable frame protection for TOA probe frames */ 1061 if (bf->bf_flags & ATH_BUF_TOA_PROBE) { 1062 /* XXX count */ 1063 flags &= ~(HAL_TXDESC_CTSENA | HAL_TXDESC_RTSENA); 1064 bf->bf_state.bfs_doprot = 0; 1065 goto finish; 1066 } 1067 1068 /* 1069 * If 802.11g protection is enabled, determine whether 1070 * to use RTS/CTS or just CTS. Note that this is only 1071 * done for OFDM unicast frames. 1072 */ 1073 if ((ic->ic_flags & IEEE80211_F_USEPROT) && 1074 rt->info[rix].phy == IEEE80211_T_OFDM && 1075 (flags & HAL_TXDESC_NOACK) == 0) { 1076 bf->bf_state.bfs_doprot = 1; 1077 /* XXX fragments must use CCK rates w/ protection */ 1078 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { 1079 flags |= HAL_TXDESC_RTSENA; 1080 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { 1081 flags |= HAL_TXDESC_CTSENA; 1082 } 1083 /* 1084 * For frags it would be desirable to use the 1085 * highest CCK rate for RTS/CTS. But stations 1086 * farther away may detect it at a lower CCK rate 1087 * so use the configured protection rate instead 1088 * (for now). 1089 */ 1090 sc->sc_stats.ast_tx_protect++; 1091 } 1092 1093 /* 1094 * If 11n protection is enabled and it's a HT frame, 1095 * enable RTS. 1096 * 1097 * XXX ic_htprotmode or ic_curhtprotmode? 1098 * XXX should it_htprotmode only matter if ic_curhtprotmode 1099 * XXX indicates it's not a HT pure environment? 1100 */ 1101 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && 1102 rt->info[rix].phy == IEEE80211_T_HT && 1103 (flags & HAL_TXDESC_NOACK) == 0) { 1104 flags |= HAL_TXDESC_RTSENA; 1105 sc->sc_stats.ast_tx_htprotect++; 1106 } 1107 1108 finish: 1109 bf->bf_state.bfs_txflags = flags; 1110 } 1111 1112 /* 1113 * Update the frame duration given the currently selected rate. 1114 * 1115 * This also updates the frame duration value, so it will require 1116 * a DMA flush. 1117 */ 1118 static void 1119 ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf) 1120 { 1121 struct ieee80211_frame *wh; 1122 uint8_t rix; 1123 uint16_t flags; 1124 int shortPreamble; 1125 struct ath_hal *ah = sc->sc_ah; 1126 const HAL_RATE_TABLE *rt = sc->sc_currates; 1127 int isfrag = bf->bf_m->m_flags & M_FRAG; 1128 1129 flags = bf->bf_state.bfs_txflags; 1130 rix = bf->bf_state.bfs_rc[0].rix; 1131 shortPreamble = bf->bf_state.bfs_shpream; 1132 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1133 1134 /* 1135 * Calculate duration. This logically belongs in the 802.11 1136 * layer but it lacks sufficient information to calculate it. 1137 */ 1138 if ((flags & HAL_TXDESC_NOACK) == 0 && 1139 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 1140 u_int16_t dur; 1141 if (shortPreamble) 1142 dur = rt->info[rix].spAckDuration; 1143 else 1144 dur = rt->info[rix].lpAckDuration; 1145 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 1146 dur += dur; /* additional SIFS+ACK */ 1147 /* 1148 * Include the size of next fragment so NAV is 1149 * updated properly. The last fragment uses only 1150 * the ACK duration 1151 * 1152 * XXX TODO: ensure that the rate lookup for each 1153 * fragment is the same as the rate used by the 1154 * first fragment! 1155 */ 1156 dur += ath_hal_computetxtime(ah, 1157 rt, 1158 bf->bf_nextfraglen, 1159 rix, shortPreamble, 1160 AH_TRUE); 1161 } 1162 if (isfrag) { 1163 /* 1164 * Force hardware to use computed duration for next 1165 * fragment by disabling multi-rate retry which updates 1166 * duration based on the multi-rate duration table. 1167 */ 1168 bf->bf_state.bfs_ismrr = 0; 1169 bf->bf_state.bfs_try0 = ATH_TXMGTTRY; 1170 /* XXX update bfs_rc[0].try? */ 1171 } 1172 1173 /* Update the duration field itself */ 1174 *(u_int16_t *)wh->i_dur = htole16(dur); 1175 } 1176 } 1177 1178 static uint8_t 1179 ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt, 1180 int cix, int shortPreamble) 1181 { 1182 uint8_t ctsrate; 1183 1184 /* 1185 * CTS transmit rate is derived from the transmit rate 1186 * by looking in the h/w rate table. We must also factor 1187 * in whether or not a short preamble is to be used. 1188 */ 1189 /* NB: cix is set above where RTS/CTS is enabled */ 1190 KASSERT(cix != 0xff, ("cix not setup")); 1191 ctsrate = rt->info[cix].rateCode; 1192 1193 /* XXX this should only matter for legacy rates */ 1194 if (shortPreamble) 1195 ctsrate |= rt->info[cix].shortPreamble; 1196 1197 return (ctsrate); 1198 } 1199 1200 /* 1201 * Calculate the RTS/CTS duration for legacy frames. 1202 */ 1203 static int 1204 ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix, 1205 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt, 1206 int flags) 1207 { 1208 int ctsduration = 0; 1209 1210 /* This mustn't be called for HT modes */ 1211 if (rt->info[cix].phy == IEEE80211_T_HT) { 1212 printf("%s: HT rate where it shouldn't be (0x%x)\n", 1213 __func__, rt->info[cix].rateCode); 1214 return (-1); 1215 } 1216 1217 /* 1218 * Compute the transmit duration based on the frame 1219 * size and the size of an ACK frame. We call into the 1220 * HAL to do the computation since it depends on the 1221 * characteristics of the actual PHY being used. 1222 * 1223 * NB: CTS is assumed the same size as an ACK so we can 1224 * use the precalculated ACK durations. 1225 */ 1226 if (shortPreamble) { 1227 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1228 ctsduration += rt->info[cix].spAckDuration; 1229 ctsduration += ath_hal_computetxtime(ah, 1230 rt, pktlen, rix, AH_TRUE, AH_TRUE); 1231 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1232 ctsduration += rt->info[rix].spAckDuration; 1233 } else { 1234 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1235 ctsduration += rt->info[cix].lpAckDuration; 1236 ctsduration += ath_hal_computetxtime(ah, 1237 rt, pktlen, rix, AH_FALSE, AH_TRUE); 1238 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1239 ctsduration += rt->info[rix].lpAckDuration; 1240 } 1241 1242 return (ctsduration); 1243 } 1244 1245 /* 1246 * Update the given ath_buf with updated rts/cts setup and duration 1247 * values. 1248 * 1249 * To support rate lookups for each software retry, the rts/cts rate 1250 * and cts duration must be re-calculated. 1251 * 1252 * This function assumes the RTS/CTS flags have been set as needed; 1253 * mrr has been disabled; and the rate control lookup has been done. 1254 * 1255 * XXX TODO: MRR need only be disabled for the pre-11n NICs. 1256 * XXX The 11n NICs support per-rate RTS/CTS configuration. 1257 */ 1258 static void 1259 ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf) 1260 { 1261 uint16_t ctsduration = 0; 1262 uint8_t ctsrate = 0; 1263 uint8_t rix = bf->bf_state.bfs_rc[0].rix; 1264 uint8_t cix = 0; 1265 const HAL_RATE_TABLE *rt = sc->sc_currates; 1266 1267 /* 1268 * No RTS/CTS enabled? Don't bother. 1269 */ 1270 if ((bf->bf_state.bfs_txflags & 1271 (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) { 1272 /* XXX is this really needed? */ 1273 bf->bf_state.bfs_ctsrate = 0; 1274 bf->bf_state.bfs_ctsduration = 0; 1275 return; 1276 } 1277 1278 /* 1279 * If protection is enabled, use the protection rix control 1280 * rate. Otherwise use the rate0 control rate. 1281 */ 1282 if (bf->bf_state.bfs_doprot) 1283 rix = sc->sc_protrix; 1284 else 1285 rix = bf->bf_state.bfs_rc[0].rix; 1286 1287 /* 1288 * If the raw path has hard-coded ctsrate0 to something, 1289 * use it. 1290 */ 1291 if (bf->bf_state.bfs_ctsrate0 != 0) 1292 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0); 1293 else 1294 /* Control rate from above */ 1295 cix = rt->info[rix].controlRate; 1296 1297 /* Calculate the rtscts rate for the given cix */ 1298 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix, 1299 bf->bf_state.bfs_shpream); 1300 1301 /* The 11n chipsets do ctsduration calculations for you */ 1302 if (! ath_tx_is_11n(sc)) 1303 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix, 1304 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, 1305 rt, bf->bf_state.bfs_txflags); 1306 1307 /* Squirrel away in ath_buf */ 1308 bf->bf_state.bfs_ctsrate = ctsrate; 1309 bf->bf_state.bfs_ctsduration = ctsduration; 1310 1311 /* 1312 * Must disable multi-rate retry when using RTS/CTS. 1313 */ 1314 if (!sc->sc_mrrprot) { 1315 bf->bf_state.bfs_ismrr = 0; 1316 bf->bf_state.bfs_try0 = 1317 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */ 1318 } 1319 } 1320 1321 /* 1322 * Setup the descriptor chain for a normal or fast-frame 1323 * frame. 1324 * 1325 * XXX TODO: extend to include the destination hardware QCU ID. 1326 * Make sure that is correct. Make sure that when being added 1327 * to the mcastq, the CABQ QCUID is set or things will get a bit 1328 * odd. 1329 */ 1330 static void 1331 ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf) 1332 { 1333 struct ath_desc *ds = bf->bf_desc; 1334 struct ath_hal *ah = sc->sc_ah; 1335 1336 if (bf->bf_state.bfs_txrate0 == 0) 1337 DPRINTF(sc, ATH_DEBUG_XMIT, 1338 "%s: bf=%p, txrate0=%d\n", __func__, bf, 0); 1339 1340 ath_hal_setuptxdesc(ah, ds 1341 , bf->bf_state.bfs_pktlen /* packet length */ 1342 , bf->bf_state.bfs_hdrlen /* header length */ 1343 , bf->bf_state.bfs_atype /* Atheros packet type */ 1344 , bf->bf_state.bfs_txpower /* txpower */ 1345 , bf->bf_state.bfs_txrate0 1346 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 1347 , bf->bf_state.bfs_keyix /* key cache index */ 1348 , bf->bf_state.bfs_txantenna /* antenna mode */ 1349 , bf->bf_state.bfs_txflags /* flags */ 1350 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 1351 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 1352 ); 1353 1354 /* 1355 * This will be overriden when the descriptor chain is written. 1356 */ 1357 bf->bf_lastds = ds; 1358 bf->bf_last = bf; 1359 1360 /* Set rate control and descriptor chain for this frame */ 1361 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 1362 ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0); 1363 } 1364 1365 /* 1366 * Do a rate lookup. 1367 * 1368 * This performs a rate lookup for the given ath_buf only if it's required. 1369 * Non-data frames and raw frames don't require it. 1370 * 1371 * This populates the primary and MRR entries; MRR values are 1372 * then disabled later on if something requires it (eg RTS/CTS on 1373 * pre-11n chipsets. 1374 * 1375 * This needs to be done before the RTS/CTS fields are calculated 1376 * as they may depend upon the rate chosen. 1377 */ 1378 static void 1379 ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf, int tid, 1380 int pktlen, int is_aggr) 1381 { 1382 uint8_t rate, rix; 1383 int try0; 1384 int maxdur; // Note: Unused for now 1385 int maxpktlen; 1386 1387 if (! bf->bf_state.bfs_doratelookup) 1388 return; 1389 1390 /* Get rid of any previous state */ 1391 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1392 1393 ATH_NODE_LOCK(ATH_NODE(bf->bf_node)); 1394 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream, 1395 pktlen, tid, is_aggr, &rix, &try0, &rate, &maxdur, &maxpktlen); 1396 1397 /* In case MRR is disabled, make sure rc[0] is setup correctly */ 1398 bf->bf_state.bfs_rc[0].rix = rix; 1399 bf->bf_state.bfs_rc[0].ratecode = rate; 1400 bf->bf_state.bfs_rc[0].tries = try0; 1401 1402 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY) 1403 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix, 1404 is_aggr, bf->bf_state.bfs_rc); 1405 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node)); 1406 1407 sc->sc_txrix = rix; /* for LED blinking */ 1408 sc->sc_lastdatarix = rix; /* for fast frames */ 1409 bf->bf_state.bfs_try0 = try0; 1410 bf->bf_state.bfs_txrate0 = rate; 1411 bf->bf_state.bfs_rc_maxpktlen = maxpktlen; 1412 } 1413 1414 /* 1415 * Update the CLRDMASK bit in the ath_buf if it needs to be set. 1416 */ 1417 static void 1418 ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid, 1419 struct ath_buf *bf) 1420 { 1421 struct ath_node *an = ATH_NODE(bf->bf_node); 1422 1423 ATH_TX_LOCK_ASSERT(sc); 1424 1425 if (an->clrdmask == 1) { 1426 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1427 an->clrdmask = 0; 1428 } 1429 } 1430 1431 /* 1432 * Return whether this frame should be software queued or 1433 * direct dispatched. 1434 * 1435 * When doing powersave, BAR frames should be queued but other management 1436 * frames should be directly sent. 1437 * 1438 * When not doing powersave, stick BAR frames into the hardware queue 1439 * so it goes out even though the queue is paused. 1440 * 1441 * For now, management frames are also software queued by default. 1442 */ 1443 static int 1444 ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an, 1445 struct mbuf *m0, int *queue_to_head) 1446 { 1447 struct ieee80211_node *ni = &an->an_node; 1448 struct ieee80211_frame *wh; 1449 uint8_t type, subtype; 1450 1451 wh = mtod(m0, struct ieee80211_frame *); 1452 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1453 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1454 1455 (*queue_to_head) = 0; 1456 1457 /* If it's not in powersave - direct-dispatch BAR */ 1458 if ((ATH_NODE(ni)->an_is_powersave == 0) 1459 && type == IEEE80211_FC0_TYPE_CTL && 1460 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1461 DPRINTF(sc, ATH_DEBUG_SW_TX, 1462 "%s: BAR: TX'ing direct\n", __func__); 1463 return (0); 1464 } else if ((ATH_NODE(ni)->an_is_powersave == 1) 1465 && type == IEEE80211_FC0_TYPE_CTL && 1466 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1467 /* BAR TX whilst asleep; queue */ 1468 DPRINTF(sc, ATH_DEBUG_SW_TX, 1469 "%s: swq: TX'ing\n", __func__); 1470 (*queue_to_head) = 1; 1471 return (1); 1472 } else if ((ATH_NODE(ni)->an_is_powersave == 1) 1473 && (type == IEEE80211_FC0_TYPE_MGT || 1474 type == IEEE80211_FC0_TYPE_CTL)) { 1475 /* 1476 * Other control/mgmt frame; bypass software queuing 1477 * for now! 1478 */ 1479 DPRINTF(sc, ATH_DEBUG_XMIT, 1480 "%s: %6D: Node is asleep; sending mgmt " 1481 "(type=%d, subtype=%d)\n", 1482 __func__, ni->ni_macaddr, ":", type, subtype); 1483 return (0); 1484 } else { 1485 return (1); 1486 } 1487 } 1488 1489 1490 /* 1491 * Transmit the given frame to the hardware. 1492 * 1493 * The frame must already be setup; rate control must already have 1494 * been done. 1495 * 1496 * XXX since the TXQ lock is being held here (and I dislike holding 1497 * it for this long when not doing software aggregation), later on 1498 * break this function into "setup_normal" and "xmit_normal". The 1499 * lock only needs to be held for the ath_tx_handoff call. 1500 * 1501 * XXX we don't update the leak count here - if we're doing 1502 * direct frame dispatch, we need to be able to do it without 1503 * decrementing the leak count (eg multicast queue frames.) 1504 */ 1505 static void 1506 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq, 1507 struct ath_buf *bf) 1508 { 1509 struct ath_node *an = ATH_NODE(bf->bf_node); 1510 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 1511 1512 ATH_TX_LOCK_ASSERT(sc); 1513 1514 /* 1515 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does 1516 * set a completion handler however it doesn't (yet) properly 1517 * handle the strict ordering requirements needed for normal, 1518 * non-aggregate session frames. 1519 * 1520 * Once this is implemented, only set CLRDMASK like this for 1521 * frames that must go out - eg management/raw frames. 1522 */ 1523 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1524 1525 /* Setup the descriptor before handoff */ 1526 ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen, false); 1527 ath_tx_calc_duration(sc, bf); 1528 ath_tx_calc_protection(sc, bf); 1529 ath_tx_set_rtscts(sc, bf); 1530 ath_tx_rate_fill_rcflags(sc, bf); 1531 ath_tx_setds(sc, bf); 1532 1533 /* Track per-TID hardware queue depth correctly */ 1534 tid->hwq_depth++; 1535 1536 /* Assign the completion handler */ 1537 bf->bf_comp = ath_tx_normal_comp; 1538 1539 /* Hand off to hardware */ 1540 ath_tx_handoff(sc, txq, bf); 1541 } 1542 1543 /* 1544 * Do the basic frame setup stuff that's required before the frame 1545 * is added to a software queue. 1546 * 1547 * All frames get mostly the same treatment and it's done once. 1548 * Retransmits fiddle with things like the rate control setup, 1549 * setting the retransmit bit in the packet; doing relevant DMA/bus 1550 * syncing and relinking it (back) into the hardware TX queue. 1551 * 1552 * Note that this may cause the mbuf to be reallocated, so 1553 * m0 may not be valid. 1554 */ 1555 static int 1556 ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni, 1557 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq) 1558 { 1559 struct ieee80211vap *vap = ni->ni_vap; 1560 struct ieee80211com *ic = &sc->sc_ic; 1561 int error, iswep, ismcast, isfrag, ismrr; 1562 int keyix, hdrlen, pktlen, try0 = 0; 1563 u_int8_t rix = 0, txrate = 0; 1564 struct ath_desc *ds; 1565 struct ieee80211_frame *wh; 1566 u_int subtype, flags; 1567 HAL_PKT_TYPE atype; 1568 const HAL_RATE_TABLE *rt; 1569 HAL_BOOL shortPreamble; 1570 struct ath_node *an; 1571 1572 /* XXX TODO: this pri is only used for non-QoS check, right? */ 1573 u_int pri; 1574 1575 /* 1576 * To ensure that both sequence numbers and the CCMP PN handling 1577 * is "correct", make sure that the relevant TID queue is locked. 1578 * Otherwise the CCMP PN and seqno may appear out of order, causing 1579 * re-ordered frames to have out of order CCMP PN's, resulting 1580 * in many, many frame drops. 1581 */ 1582 ATH_TX_LOCK_ASSERT(sc); 1583 1584 wh = mtod(m0, struct ieee80211_frame *); 1585 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; 1586 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1587 isfrag = m0->m_flags & M_FRAG; 1588 hdrlen = ieee80211_anyhdrsize(wh); 1589 /* 1590 * Packet length must not include any 1591 * pad bytes; deduct them here. 1592 */ 1593 pktlen = m0->m_pkthdr.len - (hdrlen & 3); 1594 1595 /* Handle encryption twiddling if needed */ 1596 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, 1597 &pktlen, &keyix)) { 1598 ieee80211_free_mbuf(m0); 1599 return EIO; 1600 } 1601 1602 /* packet header may have moved, reset our local pointer */ 1603 wh = mtod(m0, struct ieee80211_frame *); 1604 1605 pktlen += IEEE80211_CRC_LEN; 1606 1607 /* 1608 * Load the DMA map so any coalescing is done. This 1609 * also calculates the number of descriptors we need. 1610 */ 1611 error = ath_tx_dmasetup(sc, bf, m0); 1612 if (error != 0) 1613 return error; 1614 KASSERT((ni != NULL), ("%s: ni=NULL!", __func__)); 1615 bf->bf_node = ni; /* NB: held reference */ 1616 m0 = bf->bf_m; /* NB: may have changed */ 1617 wh = mtod(m0, struct ieee80211_frame *); 1618 1619 /* setup descriptors */ 1620 ds = bf->bf_desc; 1621 rt = sc->sc_currates; 1622 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1623 1624 /* 1625 * NB: the 802.11 layer marks whether or not we should 1626 * use short preamble based on the current mode and 1627 * negotiated parameters. 1628 */ 1629 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 1630 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 1631 shortPreamble = AH_TRUE; 1632 sc->sc_stats.ast_tx_shortpre++; 1633 } else { 1634 shortPreamble = AH_FALSE; 1635 } 1636 1637 an = ATH_NODE(ni); 1638 //flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 1639 flags = 0; 1640 ismrr = 0; /* default no multi-rate retry*/ 1641 1642 pri = ath_tx_getac(sc, m0); /* honor classification */ 1643 /* XXX use txparams instead of fixed values */ 1644 /* 1645 * Calculate Atheros packet type from IEEE80211 packet header, 1646 * setup for rate calculations, and select h/w transmit queue. 1647 */ 1648 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 1649 case IEEE80211_FC0_TYPE_MGT: 1650 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1651 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 1652 atype = HAL_PKT_TYPE_BEACON; 1653 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 1654 atype = HAL_PKT_TYPE_PROBE_RESP; 1655 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 1656 atype = HAL_PKT_TYPE_ATIM; 1657 else 1658 atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 1659 rix = an->an_mgmtrix; 1660 txrate = rt->info[rix].rateCode; 1661 if (shortPreamble) 1662 txrate |= rt->info[rix].shortPreamble; 1663 try0 = ATH_TXMGTTRY; 1664 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1665 break; 1666 case IEEE80211_FC0_TYPE_CTL: 1667 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 1668 rix = an->an_mgmtrix; 1669 txrate = rt->info[rix].rateCode; 1670 if (shortPreamble) 1671 txrate |= rt->info[rix].shortPreamble; 1672 try0 = ATH_TXMGTTRY; 1673 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1674 break; 1675 case IEEE80211_FC0_TYPE_DATA: 1676 atype = HAL_PKT_TYPE_NORMAL; /* default */ 1677 /* 1678 * Data frames: multicast frames go out at a fixed rate, 1679 * EAPOL frames use the mgmt frame rate; otherwise consult 1680 * the rate control module for the rate to use. 1681 */ 1682 if (ismcast) { 1683 rix = an->an_mcastrix; 1684 txrate = rt->info[rix].rateCode; 1685 if (shortPreamble) 1686 txrate |= rt->info[rix].shortPreamble; 1687 try0 = 1; 1688 } else if (m0->m_flags & M_EAPOL) { 1689 /* XXX? maybe always use long preamble? */ 1690 rix = an->an_mgmtrix; 1691 txrate = rt->info[rix].rateCode; 1692 if (shortPreamble) 1693 txrate |= rt->info[rix].shortPreamble; 1694 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 1695 } else { 1696 /* 1697 * Do rate lookup on each TX, rather than using 1698 * the hard-coded TX information decided here. 1699 */ 1700 ismrr = 1; 1701 bf->bf_state.bfs_doratelookup = 1; 1702 } 1703 1704 /* 1705 * Check whether to set NOACK for this WME category or not. 1706 */ 1707 if (ieee80211_wme_vap_ac_is_noack(vap, pri)) 1708 flags |= HAL_TXDESC_NOACK; 1709 break; 1710 default: 1711 device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n", 1712 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 1713 /* XXX statistic */ 1714 /* XXX free tx dmamap */ 1715 ieee80211_free_mbuf(m0); 1716 return EIO; 1717 } 1718 1719 /* 1720 * There are two known scenarios where the frame AC doesn't match 1721 * what the destination TXQ is. 1722 * 1723 * + non-QoS frames (eg management?) that the net80211 stack has 1724 * assigned a higher AC to, but since it's a non-QoS TID, it's 1725 * being thrown into TID 16. TID 16 gets the AC_BE queue. 1726 * It's quite possible that management frames should just be 1727 * direct dispatched to hardware rather than go via the software 1728 * queue; that should be investigated in the future. There are 1729 * some specific scenarios where this doesn't make sense, mostly 1730 * surrounding ADDBA request/response - hence why that is special 1731 * cased. 1732 * 1733 * + Multicast frames going into the VAP mcast queue. That shows up 1734 * as "TXQ 11". 1735 * 1736 * This driver should eventually support separate TID and TXQ locking, 1737 * allowing for arbitrary AC frames to appear on arbitrary software 1738 * queues, being queued to the "correct" hardware queue when needed. 1739 */ 1740 #if 0 1741 if (txq != sc->sc_ac2q[pri]) { 1742 DPRINTF(sc, ATH_DEBUG_XMIT, 1743 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n", 1744 __func__, 1745 txq, 1746 txq->axq_qnum, 1747 pri, 1748 sc->sc_ac2q[pri], 1749 sc->sc_ac2q[pri]->axq_qnum); 1750 } 1751 #endif 1752 1753 /* 1754 * Calculate miscellaneous flags. 1755 */ 1756 if (ismcast) { 1757 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 1758 } else if (pktlen > vap->iv_rtsthreshold && 1759 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { 1760 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 1761 sc->sc_stats.ast_tx_rts++; 1762 } 1763 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ 1764 sc->sc_stats.ast_tx_noack++; 1765 #ifdef IEEE80211_SUPPORT_TDMA 1766 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) { 1767 DPRINTF(sc, ATH_DEBUG_TDMA, 1768 "%s: discard frame, ACK required w/ TDMA\n", __func__); 1769 sc->sc_stats.ast_tdma_ack++; 1770 /* XXX free tx dmamap */ 1771 ieee80211_free_mbuf(m0); 1772 return EIO; 1773 } 1774 #endif 1775 1776 /* 1777 * If it's a frame to do location reporting on, 1778 * communicate it to the HAL. 1779 */ 1780 if (ieee80211_get_toa_params(m0, NULL)) { 1781 device_printf(sc->sc_dev, 1782 "%s: setting TX positioning bit\n", __func__); 1783 flags |= HAL_TXDESC_POS; 1784 1785 /* 1786 * Note: The hardware reports timestamps for 1787 * each of the RX'ed packets as part of the packet 1788 * exchange. So this means things like RTS/CTS 1789 * exchanges, as well as the final ACK. 1790 * 1791 * So, if you send a RTS-protected NULL data frame, 1792 * you'll get an RX report for the RTS response, then 1793 * an RX report for the NULL frame, and then the TX 1794 * completion at the end. 1795 * 1796 * NOTE: it doesn't work right for CCK frames; 1797 * there's no channel info data provided unless 1798 * it's OFDM or HT. Will have to dig into it. 1799 */ 1800 flags &= ~(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA); 1801 bf->bf_flags |= ATH_BUF_TOA_PROBE; 1802 } 1803 1804 #if 0 1805 /* 1806 * Placeholder: if you want to transmit with the azimuth 1807 * timestamp in the end of the payload, here's where you 1808 * should set the TXDESC field. 1809 */ 1810 flags |= HAL_TXDESC_HWTS; 1811 #endif 1812 1813 /* 1814 * Determine if a tx interrupt should be generated for 1815 * this descriptor. We take a tx interrupt to reap 1816 * descriptors when the h/w hits an EOL condition or 1817 * when the descriptor is specifically marked to generate 1818 * an interrupt. We periodically mark descriptors in this 1819 * way to insure timely replenishing of the supply needed 1820 * for sending frames. Defering interrupts reduces system 1821 * load and potentially allows more concurrent work to be 1822 * done but if done to aggressively can cause senders to 1823 * backup. 1824 * 1825 * NB: use >= to deal with sc_txintrperiod changing 1826 * dynamically through sysctl. 1827 */ 1828 if (flags & HAL_TXDESC_INTREQ) { 1829 txq->axq_intrcnt = 0; 1830 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 1831 flags |= HAL_TXDESC_INTREQ; 1832 txq->axq_intrcnt = 0; 1833 } 1834 1835 /* This point forward is actual TX bits */ 1836 1837 /* 1838 * At this point we are committed to sending the frame 1839 * and we don't need to look at m_nextpkt; clear it in 1840 * case this frame is part of frag chain. 1841 */ 1842 m0->m_nextpkt = NULL; 1843 1844 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 1845 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 1846 sc->sc_hwmap[rix].ieeerate, -1); 1847 1848 if (ieee80211_radiotap_active_vap(vap)) { 1849 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 1850 if (iswep) 1851 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 1852 if (isfrag) 1853 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 1854 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 1855 sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni); 1856 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 1857 1858 ieee80211_radiotap_tx(vap, m0); 1859 } 1860 1861 /* Blank the legacy rate array */ 1862 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1863 1864 /* 1865 * ath_buf_set_rate needs at least one rate/try to setup 1866 * the rate scenario. 1867 */ 1868 bf->bf_state.bfs_rc[0].rix = rix; 1869 bf->bf_state.bfs_rc[0].tries = try0; 1870 bf->bf_state.bfs_rc[0].ratecode = txrate; 1871 1872 /* Store the decided rate index values away */ 1873 bf->bf_state.bfs_pktlen = pktlen; 1874 bf->bf_state.bfs_hdrlen = hdrlen; 1875 bf->bf_state.bfs_atype = atype; 1876 bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni); 1877 bf->bf_state.bfs_txrate0 = txrate; 1878 bf->bf_state.bfs_try0 = try0; 1879 bf->bf_state.bfs_keyix = keyix; 1880 bf->bf_state.bfs_txantenna = sc->sc_txantenna; 1881 bf->bf_state.bfs_txflags = flags; 1882 bf->bf_state.bfs_shpream = shortPreamble; 1883 1884 /* XXX this should be done in ath_tx_setrate() */ 1885 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */ 1886 bf->bf_state.bfs_ctsrate = 0; /* calculated later */ 1887 bf->bf_state.bfs_ctsduration = 0; 1888 bf->bf_state.bfs_ismrr = ismrr; 1889 1890 return 0; 1891 } 1892 1893 /* 1894 * Queue a frame to the hardware or software queue. 1895 * 1896 * This can be called by the net80211 code. 1897 * 1898 * XXX what about locking? Or, push the seqno assign into the 1899 * XXX aggregate scheduler so its serialised? 1900 * 1901 * XXX When sending management frames via ath_raw_xmit(), 1902 * should CLRDMASK be set unconditionally? 1903 */ 1904 int 1905 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, 1906 struct ath_buf *bf, struct mbuf *m0) 1907 { 1908 struct ieee80211vap *vap = ni->ni_vap; 1909 struct ath_vap *avp = ATH_VAP(vap); 1910 int r = 0; 1911 u_int pri; 1912 int tid; 1913 struct ath_txq *txq; 1914 int ismcast; 1915 const struct ieee80211_frame *wh; 1916 int is_ampdu, is_ampdu_tx, is_ampdu_pending; 1917 ieee80211_seq seqno; 1918 uint8_t type, subtype; 1919 int queue_to_head; 1920 1921 ATH_TX_LOCK_ASSERT(sc); 1922 1923 /* 1924 * Determine the target hardware queue. 1925 * 1926 * For multicast frames, the txq gets overridden appropriately 1927 * depending upon the state of PS. If powersave is enabled 1928 * then they get added to the cabq for later transmit. 1929 * 1930 * The "fun" issue here is that group addressed frames should 1931 * have the sequence number from a different pool, rather than 1932 * the per-TID pool. That means that even QoS group addressed 1933 * frames will have a sequence number from that global value, 1934 * which means if we transmit different group addressed frames 1935 * at different traffic priorities, the sequence numbers will 1936 * all be out of whack. So - chances are, the right thing 1937 * to do here is to always put group addressed frames into the BE 1938 * queue, and ignore the TID for queue selection. 1939 * 1940 * For any other frame, we do a TID/QoS lookup inside the frame 1941 * to see what the TID should be. If it's a non-QoS frame, the 1942 * AC and TID are overridden. The TID/TXQ code assumes the 1943 * TID is on a predictable hardware TXQ, so we don't support 1944 * having a node TID queued to multiple hardware TXQs. 1945 * This may change in the future but would require some locking 1946 * fudgery. 1947 */ 1948 pri = ath_tx_getac(sc, m0); 1949 tid = ath_tx_gettid(sc, m0); 1950 1951 txq = sc->sc_ac2q[pri]; 1952 wh = mtod(m0, struct ieee80211_frame *); 1953 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1954 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1955 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1956 1957 /* 1958 * Enforce how deep the multicast queue can grow. 1959 * 1960 * XXX duplicated in ath_raw_xmit(). 1961 */ 1962 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1963 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 1964 > sc->sc_txq_mcastq_maxdepth) { 1965 sc->sc_stats.ast_tx_mcastq_overflow++; 1966 m_freem(m0); 1967 return (ENOBUFS); 1968 } 1969 } 1970 1971 /* 1972 * Enforce how deep the unicast queue can grow. 1973 * 1974 * If the node is in power save then we don't want 1975 * the software queue to grow too deep, or a node may 1976 * end up consuming all of the ath_buf entries. 1977 * 1978 * For now, only do this for DATA frames. 1979 * 1980 * We will want to cap how many management/control 1981 * frames get punted to the software queue so it doesn't 1982 * fill up. But the correct solution isn't yet obvious. 1983 * In any case, this check should at least let frames pass 1984 * that we are direct-dispatching. 1985 * 1986 * XXX TODO: duplicate this to the raw xmit path! 1987 */ 1988 if (type == IEEE80211_FC0_TYPE_DATA && 1989 ATH_NODE(ni)->an_is_powersave && 1990 ATH_NODE(ni)->an_swq_depth > 1991 sc->sc_txq_node_psq_maxdepth) { 1992 sc->sc_stats.ast_tx_node_psq_overflow++; 1993 m_freem(m0); 1994 return (ENOBUFS); 1995 } 1996 1997 /* A-MPDU TX */ 1998 is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid); 1999 is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid); 2000 is_ampdu = is_ampdu_tx | is_ampdu_pending; 2001 2002 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n", 2003 __func__, tid, pri, is_ampdu); 2004 2005 /* Set local packet state, used to queue packets to hardware */ 2006 bf->bf_state.bfs_tid = tid; 2007 bf->bf_state.bfs_tx_queue = txq->axq_qnum; 2008 bf->bf_state.bfs_pri = pri; 2009 2010 #if 1 2011 /* 2012 * When servicing one or more stations in power-save mode 2013 * (or) if there is some mcast data waiting on the mcast 2014 * queue (to prevent out of order delivery) multicast frames 2015 * must be bufferd until after the beacon. 2016 * 2017 * TODO: we should lock the mcastq before we check the length. 2018 */ 2019 if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { 2020 txq = &avp->av_mcastq; 2021 /* 2022 * Mark the frame as eventually belonging on the CAB 2023 * queue, so the descriptor setup functions will 2024 * correctly initialise the descriptor 'qcuId' field. 2025 */ 2026 bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum; 2027 } 2028 #endif 2029 2030 /* Do the generic frame setup */ 2031 /* XXX should just bzero the bf_state? */ 2032 bf->bf_state.bfs_dobaw = 0; 2033 2034 /* A-MPDU TX? Manually set sequence number */ 2035 /* 2036 * Don't do it whilst pending; the net80211 layer still 2037 * assigns them. 2038 * 2039 * Don't assign A-MPDU sequence numbers to group address 2040 * frames; they come from a different sequence number space. 2041 */ 2042 if (is_ampdu_tx && (! IEEE80211_IS_MULTICAST(wh->i_addr1))) { 2043 /* 2044 * Always call; this function will 2045 * handle making sure that null data frames 2046 * and group-addressed frames don't get a sequence number 2047 * from the current TID and thus mess with the BAW. 2048 */ 2049 seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0); 2050 2051 /* 2052 * Don't add QoS NULL frames and group-addressed frames 2053 * to the BAW. 2054 */ 2055 if (IEEE80211_QOS_HAS_SEQ(wh) && 2056 (! IEEE80211_IS_MULTICAST(wh->i_addr1)) && 2057 (subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL)) { 2058 bf->bf_state.bfs_dobaw = 1; 2059 } 2060 } 2061 2062 /* 2063 * If needed, the sequence number has been assigned. 2064 * Squirrel it away somewhere easy to get to. 2065 */ 2066 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT; 2067 2068 /* Is ampdu pending? fetch the seqno and print it out */ 2069 if (is_ampdu_pending) 2070 DPRINTF(sc, ATH_DEBUG_SW_TX, 2071 "%s: tid %d: ampdu pending, seqno %d\n", 2072 __func__, tid, M_SEQNO_GET(m0)); 2073 2074 /* This also sets up the DMA map; crypto; frame parameters, etc */ 2075 r = ath_tx_normal_setup(sc, ni, bf, m0, txq); 2076 2077 if (r != 0) 2078 goto done; 2079 2080 /* At this point m0 could have changed! */ 2081 m0 = bf->bf_m; 2082 2083 #if 1 2084 /* 2085 * If it's a multicast frame, do a direct-dispatch to the 2086 * destination hardware queue. Don't bother software 2087 * queuing it. 2088 */ 2089 /* 2090 * If it's a BAR frame, do a direct dispatch to the 2091 * destination hardware queue. Don't bother software 2092 * queuing it, as the TID will now be paused. 2093 * Sending a BAR frame can occur from the net80211 txa timer 2094 * (ie, retries) or from the ath txtask (completion call.) 2095 * It queues directly to hardware because the TID is paused 2096 * at this point (and won't be unpaused until the BAR has 2097 * either been TXed successfully or max retries has been 2098 * reached.) 2099 */ 2100 /* 2101 * Until things are better debugged - if this node is asleep 2102 * and we're sending it a non-BAR frame, direct dispatch it. 2103 * Why? Because we need to figure out what's actually being 2104 * sent - eg, during reassociation/reauthentication after 2105 * the node (last) disappeared whilst asleep, the driver should 2106 * have unpaused/unsleep'ed the node. So until that is 2107 * sorted out, use this workaround. 2108 */ 2109 if (txq == &avp->av_mcastq) { 2110 DPRINTF(sc, ATH_DEBUG_SW_TX, 2111 "%s: bf=%p: mcastq: TX'ing\n", __func__, bf); 2112 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2113 ath_tx_xmit_normal(sc, txq, bf); 2114 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, 2115 &queue_to_head)) { 2116 ath_tx_swq(sc, ni, txq, queue_to_head, bf); 2117 } else { 2118 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2119 ath_tx_xmit_normal(sc, txq, bf); 2120 } 2121 #else 2122 /* 2123 * For now, since there's no software queue, 2124 * direct-dispatch to the hardware. 2125 */ 2126 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2127 /* 2128 * Update the current leak count if 2129 * we're leaking frames; and set the 2130 * MORE flag as appropriate. 2131 */ 2132 ath_tx_leak_count_update(sc, tid, bf); 2133 ath_tx_xmit_normal(sc, txq, bf); 2134 #endif 2135 done: 2136 return 0; 2137 } 2138 2139 static int 2140 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, 2141 struct ath_buf *bf, struct mbuf *m0, 2142 const struct ieee80211_bpf_params *params) 2143 { 2144 struct ieee80211com *ic = &sc->sc_ic; 2145 struct ieee80211vap *vap = ni->ni_vap; 2146 int error, ismcast, ismrr; 2147 int keyix, hdrlen, pktlen, try0, txantenna; 2148 u_int8_t rix, txrate; 2149 struct ieee80211_frame *wh; 2150 u_int flags; 2151 HAL_PKT_TYPE atype; 2152 const HAL_RATE_TABLE *rt; 2153 struct ath_desc *ds; 2154 u_int pri; 2155 int o_tid = -1; 2156 int do_override; 2157 uint8_t type, subtype; 2158 int queue_to_head; 2159 struct ath_node *an = ATH_NODE(ni); 2160 2161 ATH_TX_LOCK_ASSERT(sc); 2162 2163 wh = mtod(m0, struct ieee80211_frame *); 2164 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2165 hdrlen = ieee80211_anyhdrsize(wh); 2166 /* 2167 * Packet length must not include any 2168 * pad bytes; deduct them here. 2169 */ 2170 /* XXX honor IEEE80211_BPF_DATAPAD */ 2171 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; 2172 2173 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2174 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2175 2176 ATH_KTR(sc, ATH_KTR_TX, 2, 2177 "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf); 2178 2179 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n", 2180 __func__, ismcast); 2181 2182 pri = params->ibp_pri & 3; 2183 /* Override pri if the frame isn't a QoS one */ 2184 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2185 pri = ath_tx_getac(sc, m0); 2186 2187 /* XXX If it's an ADDBA, override the correct queue */ 2188 do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid); 2189 2190 /* Map ADDBA to the correct priority */ 2191 if (do_override) { 2192 #if 1 2193 DPRINTF(sc, ATH_DEBUG_XMIT, 2194 "%s: overriding tid %d pri %d -> %d\n", 2195 __func__, o_tid, pri, TID_TO_WME_AC(o_tid)); 2196 #endif 2197 pri = TID_TO_WME_AC(o_tid); 2198 } 2199 2200 /* 2201 * "pri" is the hardware queue to transmit on. 2202 * 2203 * Look at the description in ath_tx_start() to understand 2204 * what needs to be "fixed" here so we just use the TID 2205 * for QoS frames. 2206 */ 2207 2208 /* Handle encryption twiddling if needed */ 2209 if (! ath_tx_tag_crypto(sc, ni, 2210 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, 2211 &hdrlen, &pktlen, &keyix)) { 2212 ieee80211_free_mbuf(m0); 2213 return EIO; 2214 } 2215 /* packet header may have moved, reset our local pointer */ 2216 wh = mtod(m0, struct ieee80211_frame *); 2217 2218 /* Do the generic frame setup */ 2219 /* XXX should just bzero the bf_state? */ 2220 bf->bf_state.bfs_dobaw = 0; 2221 2222 error = ath_tx_dmasetup(sc, bf, m0); 2223 if (error != 0) 2224 return error; 2225 m0 = bf->bf_m; /* NB: may have changed */ 2226 wh = mtod(m0, struct ieee80211_frame *); 2227 KASSERT((ni != NULL), ("%s: ni=NULL!", __func__)); 2228 bf->bf_node = ni; /* NB: held reference */ 2229 2230 /* Always enable CLRDMASK for raw frames for now.. */ 2231 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 2232 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 2233 if (params->ibp_flags & IEEE80211_BPF_RTS) 2234 flags |= HAL_TXDESC_RTSENA; 2235 else if (params->ibp_flags & IEEE80211_BPF_CTS) { 2236 /* XXX assume 11g/11n protection? */ 2237 bf->bf_state.bfs_doprot = 1; 2238 flags |= HAL_TXDESC_CTSENA; 2239 } 2240 /* XXX leave ismcast to injector? */ 2241 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) 2242 flags |= HAL_TXDESC_NOACK; 2243 2244 rt = sc->sc_currates; 2245 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 2246 2247 /* Fetch first rate information */ 2248 rix = ath_tx_findrix(sc, params->ibp_rate0); 2249 try0 = params->ibp_try0; 2250 2251 /* 2252 * Override EAPOL rate as appropriate. 2253 */ 2254 if (m0->m_flags & M_EAPOL) { 2255 /* XXX? maybe always use long preamble? */ 2256 rix = an->an_mgmtrix; 2257 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 2258 } 2259 2260 /* 2261 * If it's a frame to do location reporting on, 2262 * communicate it to the HAL. 2263 */ 2264 if (ieee80211_get_toa_params(m0, NULL)) { 2265 device_printf(sc->sc_dev, 2266 "%s: setting TX positioning bit\n", __func__); 2267 flags |= HAL_TXDESC_POS; 2268 flags &= ~(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA); 2269 bf->bf_flags |= ATH_BUF_TOA_PROBE; 2270 } 2271 2272 txrate = rt->info[rix].rateCode; 2273 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 2274 txrate |= rt->info[rix].shortPreamble; 2275 sc->sc_txrix = rix; 2276 ismrr = (params->ibp_try1 != 0); 2277 txantenna = params->ibp_pri >> 2; 2278 if (txantenna == 0) /* XXX? */ 2279 txantenna = sc->sc_txantenna; 2280 2281 /* 2282 * Since ctsrate is fixed, store it away for later 2283 * use when the descriptor fields are being set. 2284 */ 2285 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) 2286 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate; 2287 2288 /* 2289 * NB: we mark all packets as type PSPOLL so the h/w won't 2290 * set the sequence number, duration, etc. 2291 */ 2292 atype = HAL_PKT_TYPE_PSPOLL; 2293 2294 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 2295 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, 2296 sc->sc_hwmap[rix].ieeerate, -1); 2297 2298 if (ieee80211_radiotap_active_vap(vap)) { 2299 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 2300 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) 2301 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2302 if (m0->m_flags & M_FRAG) 2303 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 2304 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 2305 sc->sc_tx_th.wt_txpower = MIN(params->ibp_power, 2306 ieee80211_get_node_txpower(ni)); 2307 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 2308 2309 ieee80211_radiotap_tx(vap, m0); 2310 } 2311 2312 /* 2313 * Formulate first tx descriptor with tx controls. 2314 */ 2315 ds = bf->bf_desc; 2316 /* XXX check return value? */ 2317 2318 /* Store the decided rate index values away */ 2319 bf->bf_state.bfs_pktlen = pktlen; 2320 bf->bf_state.bfs_hdrlen = hdrlen; 2321 bf->bf_state.bfs_atype = atype; 2322 bf->bf_state.bfs_txpower = MIN(params->ibp_power, 2323 ieee80211_get_node_txpower(ni)); 2324 bf->bf_state.bfs_txrate0 = txrate; 2325 bf->bf_state.bfs_try0 = try0; 2326 bf->bf_state.bfs_keyix = keyix; 2327 bf->bf_state.bfs_txantenna = txantenna; 2328 bf->bf_state.bfs_txflags = flags; 2329 bf->bf_state.bfs_shpream = 2330 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE); 2331 2332 /* Set local packet state, used to queue packets to hardware */ 2333 bf->bf_state.bfs_tid = WME_AC_TO_TID(pri); 2334 bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum; 2335 bf->bf_state.bfs_pri = pri; 2336 2337 /* XXX this should be done in ath_tx_setrate() */ 2338 bf->bf_state.bfs_ctsrate = 0; 2339 bf->bf_state.bfs_ctsduration = 0; 2340 bf->bf_state.bfs_ismrr = ismrr; 2341 2342 /* Blank the legacy rate array */ 2343 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 2344 2345 bf->bf_state.bfs_rc[0].rix = rix; 2346 bf->bf_state.bfs_rc[0].tries = try0; 2347 bf->bf_state.bfs_rc[0].ratecode = txrate; 2348 2349 if (ismrr) { 2350 int rix; 2351 2352 rix = ath_tx_findrix(sc, params->ibp_rate1); 2353 bf->bf_state.bfs_rc[1].rix = rix; 2354 bf->bf_state.bfs_rc[1].tries = params->ibp_try1; 2355 2356 rix = ath_tx_findrix(sc, params->ibp_rate2); 2357 bf->bf_state.bfs_rc[2].rix = rix; 2358 bf->bf_state.bfs_rc[2].tries = params->ibp_try2; 2359 2360 rix = ath_tx_findrix(sc, params->ibp_rate3); 2361 bf->bf_state.bfs_rc[3].rix = rix; 2362 bf->bf_state.bfs_rc[3].tries = params->ibp_try3; 2363 } 2364 /* 2365 * All the required rate control decisions have been made; 2366 * fill in the rc flags. 2367 */ 2368 ath_tx_rate_fill_rcflags(sc, bf); 2369 2370 /* NB: no buffered multicast in power save support */ 2371 2372 /* 2373 * If we're overiding the ADDBA destination, dump directly 2374 * into the hardware queue, right after any pending 2375 * frames to that node are. 2376 */ 2377 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n", 2378 __func__, do_override); 2379 2380 #if 1 2381 /* 2382 * Put addba frames in the right place in the right TID/HWQ. 2383 */ 2384 if (do_override) { 2385 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2386 /* 2387 * XXX if it's addba frames, should we be leaking 2388 * them out via the frame leak method? 2389 * XXX for now let's not risk it; but we may wish 2390 * to investigate this later. 2391 */ 2392 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2393 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, 2394 &queue_to_head)) { 2395 /* Queue to software queue */ 2396 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf); 2397 } else { 2398 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2399 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2400 } 2401 #else 2402 /* Direct-dispatch to the hardware */ 2403 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2404 /* 2405 * Update the current leak count if 2406 * we're leaking frames; and set the 2407 * MORE flag as appropriate. 2408 */ 2409 ath_tx_leak_count_update(sc, tid, bf); 2410 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2411 #endif 2412 return 0; 2413 } 2414 2415 /* 2416 * Send a raw frame. 2417 * 2418 * This can be called by net80211. 2419 */ 2420 int 2421 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 2422 const struct ieee80211_bpf_params *params) 2423 { 2424 struct ieee80211com *ic = ni->ni_ic; 2425 struct ath_softc *sc = ic->ic_softc; 2426 struct ath_buf *bf; 2427 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 2428 int error = 0; 2429 2430 ATH_PCU_LOCK(sc); 2431 if (sc->sc_inreset_cnt > 0) { 2432 DPRINTF(sc, ATH_DEBUG_XMIT, 2433 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2434 error = EIO; 2435 ATH_PCU_UNLOCK(sc); 2436 goto badbad; 2437 } 2438 sc->sc_txstart_cnt++; 2439 ATH_PCU_UNLOCK(sc); 2440 2441 /* Wake the hardware up already */ 2442 ATH_LOCK(sc); 2443 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2444 ATH_UNLOCK(sc); 2445 2446 ATH_TX_LOCK(sc); 2447 2448 if (!sc->sc_running || sc->sc_invalid) { 2449 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, r/i: %d/%d", 2450 __func__, sc->sc_running, sc->sc_invalid); 2451 m_freem(m); 2452 error = ENETDOWN; 2453 goto bad; 2454 } 2455 2456 /* 2457 * Enforce how deep the multicast queue can grow. 2458 * 2459 * XXX duplicated in ath_tx_start(). 2460 */ 2461 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2462 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 2463 > sc->sc_txq_mcastq_maxdepth) { 2464 sc->sc_stats.ast_tx_mcastq_overflow++; 2465 error = ENOBUFS; 2466 } 2467 2468 if (error != 0) { 2469 m_freem(m); 2470 goto bad; 2471 } 2472 } 2473 2474 /* 2475 * Grab a TX buffer and associated resources. 2476 */ 2477 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); 2478 if (bf == NULL) { 2479 sc->sc_stats.ast_tx_nobuf++; 2480 m_freem(m); 2481 error = ENOBUFS; 2482 goto bad; 2483 } 2484 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n", 2485 m, params, bf); 2486 2487 if (params == NULL) { 2488 /* 2489 * Legacy path; interpret frame contents to decide 2490 * precisely how to send the frame. 2491 */ 2492 if (ath_tx_start(sc, ni, bf, m)) { 2493 error = EIO; /* XXX */ 2494 goto bad2; 2495 } 2496 } else { 2497 /* 2498 * Caller supplied explicit parameters to use in 2499 * sending the frame. 2500 */ 2501 if (ath_tx_raw_start(sc, ni, bf, m, params)) { 2502 error = EIO; /* XXX */ 2503 goto bad2; 2504 } 2505 } 2506 sc->sc_wd_timer = 5; 2507 sc->sc_stats.ast_tx_raw++; 2508 2509 /* 2510 * Update the TIM - if there's anything queued to the 2511 * software queue and power save is enabled, we should 2512 * set the TIM. 2513 */ 2514 ath_tx_update_tim(sc, ni, 1); 2515 2516 ATH_TX_UNLOCK(sc); 2517 2518 ATH_PCU_LOCK(sc); 2519 sc->sc_txstart_cnt--; 2520 ATH_PCU_UNLOCK(sc); 2521 2522 2523 /* Put the hardware back to sleep if required */ 2524 ATH_LOCK(sc); 2525 ath_power_restore_power_state(sc); 2526 ATH_UNLOCK(sc); 2527 2528 return 0; 2529 2530 bad2: 2531 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, " 2532 "bf=%p", 2533 m, 2534 params, 2535 bf); 2536 ATH_TXBUF_LOCK(sc); 2537 ath_returnbuf_head(sc, bf); 2538 ATH_TXBUF_UNLOCK(sc); 2539 2540 bad: 2541 ATH_TX_UNLOCK(sc); 2542 2543 ATH_PCU_LOCK(sc); 2544 sc->sc_txstart_cnt--; 2545 ATH_PCU_UNLOCK(sc); 2546 2547 /* Put the hardware back to sleep if required */ 2548 ATH_LOCK(sc); 2549 ath_power_restore_power_state(sc); 2550 ATH_UNLOCK(sc); 2551 2552 badbad: 2553 ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p", 2554 m, params); 2555 sc->sc_stats.ast_tx_raw_fail++; 2556 2557 return error; 2558 } 2559 2560 /* Some helper functions */ 2561 2562 /* 2563 * ADDBA (and potentially others) need to be placed in the same 2564 * hardware queue as the TID/node it's relating to. This is so 2565 * it goes out after any pending non-aggregate frames to the 2566 * same node/TID. 2567 * 2568 * If this isn't done, the ADDBA can go out before the frames 2569 * queued in hardware. Even though these frames have a sequence 2570 * number -earlier- than the ADDBA can be transmitted (but 2571 * no frames whose sequence numbers are after the ADDBA should 2572 * be!) they'll arrive after the ADDBA - and the receiving end 2573 * will simply drop them as being out of the BAW. 2574 * 2575 * The frames can't be appended to the TID software queue - it'll 2576 * never be sent out. So these frames have to be directly 2577 * dispatched to the hardware, rather than queued in software. 2578 * So if this function returns true, the TXQ has to be 2579 * overridden and it has to be directly dispatched. 2580 * 2581 * It's a dirty hack, but someone's gotta do it. 2582 */ 2583 2584 /* 2585 * XXX doesn't belong here! 2586 */ 2587 static int 2588 ieee80211_is_action(struct ieee80211_frame *wh) 2589 { 2590 /* Type: Management frame? */ 2591 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 2592 IEEE80211_FC0_TYPE_MGT) 2593 return 0; 2594 2595 /* Subtype: Action frame? */ 2596 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) != 2597 IEEE80211_FC0_SUBTYPE_ACTION) 2598 return 0; 2599 2600 return 1; 2601 } 2602 2603 #define MS(_v, _f) (((_v) & _f) >> _f##_S) 2604 /* 2605 * Return an alternate TID for ADDBA request frames. 2606 * 2607 * Yes, this likely should be done in the net80211 layer. 2608 */ 2609 static int 2610 ath_tx_action_frame_override_queue(struct ath_softc *sc, 2611 struct ieee80211_node *ni, 2612 struct mbuf *m0, int *tid) 2613 { 2614 struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *); 2615 struct ieee80211_action_ba_addbarequest *ia; 2616 uint8_t *frm; 2617 uint16_t baparamset; 2618 2619 /* Not action frame? Bail */ 2620 if (! ieee80211_is_action(wh)) 2621 return 0; 2622 2623 /* XXX Not needed for frames we send? */ 2624 #if 0 2625 /* Correct length? */ 2626 if (! ieee80211_parse_action(ni, m)) 2627 return 0; 2628 #endif 2629 2630 /* Extract out action frame */ 2631 frm = (u_int8_t *)&wh[1]; 2632 ia = (struct ieee80211_action_ba_addbarequest *) frm; 2633 2634 /* Not ADDBA? Bail */ 2635 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA) 2636 return 0; 2637 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST) 2638 return 0; 2639 2640 /* Extract TID, return it */ 2641 baparamset = le16toh(ia->rq_baparamset); 2642 *tid = (int) MS(baparamset, IEEE80211_BAPS_TID); 2643 2644 return 1; 2645 } 2646 #undef MS 2647 2648 /* Per-node software queue operations */ 2649 2650 /* 2651 * Add the current packet to the given BAW. 2652 * It is assumed that the current packet 2653 * 2654 * + fits inside the BAW; 2655 * + already has had a sequence number allocated. 2656 * 2657 * Since the BAW status may be modified by both the ath task and 2658 * the net80211/ifnet contexts, the TID must be locked. 2659 */ 2660 void 2661 ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an, 2662 struct ath_tid *tid, struct ath_buf *bf) 2663 { 2664 int index, cindex; 2665 struct ieee80211_tx_ampdu *tap; 2666 2667 ATH_TX_LOCK_ASSERT(sc); 2668 2669 if (bf->bf_state.bfs_isretried) 2670 return; 2671 2672 tap = ath_tx_get_tx_tid(an, tid->tid); 2673 2674 if (! bf->bf_state.bfs_dobaw) { 2675 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2676 "%s: dobaw=0, seqno=%d, window %d:%d\n", 2677 __func__, SEQNO(bf->bf_state.bfs_seqno), 2678 tap->txa_start, tap->txa_wnd); 2679 } 2680 2681 if (bf->bf_state.bfs_addedbaw) 2682 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2683 "%s: re-added? tid=%d, seqno %d; window %d:%d; " 2684 "baw head=%d tail=%d\n", 2685 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2686 tap->txa_start, tap->txa_wnd, tid->baw_head, 2687 tid->baw_tail); 2688 2689 /* 2690 * Verify that the given sequence number is not outside of the 2691 * BAW. Complain loudly if that's the case. 2692 */ 2693 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2694 SEQNO(bf->bf_state.bfs_seqno))) { 2695 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2696 "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; " 2697 "baw head=%d tail=%d\n", 2698 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2699 tap->txa_start, tap->txa_wnd, tid->baw_head, 2700 tid->baw_tail); 2701 } 2702 2703 /* 2704 * ni->ni_txseqs[] is the currently allocated seqno. 2705 * the txa state contains the current baw start. 2706 */ 2707 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno)); 2708 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2709 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2710 "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d " 2711 "baw head=%d tail=%d\n", 2712 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2713 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, 2714 tid->baw_tail); 2715 2716 2717 #if 0 2718 assert(tid->tx_buf[cindex] == NULL); 2719 #endif 2720 if (tid->tx_buf[cindex] != NULL) { 2721 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2722 "%s: ba packet dup (index=%d, cindex=%d, " 2723 "head=%d, tail=%d)\n", 2724 __func__, index, cindex, tid->baw_head, tid->baw_tail); 2725 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2726 "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n", 2727 __func__, 2728 tid->tx_buf[cindex], 2729 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno), 2730 bf, 2731 SEQNO(bf->bf_state.bfs_seqno) 2732 ); 2733 } 2734 tid->tx_buf[cindex] = bf; 2735 2736 if (index >= ((tid->baw_tail - tid->baw_head) & 2737 (ATH_TID_MAX_BUFS - 1))) { 2738 tid->baw_tail = cindex; 2739 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 2740 } 2741 } 2742 2743 /* 2744 * Flip the BAW buffer entry over from the existing one to the new one. 2745 * 2746 * When software retransmitting a (sub-)frame, it is entirely possible that 2747 * the frame ath_buf is marked as BUSY and can't be immediately reused. 2748 * In that instance the buffer is cloned and the new buffer is used for 2749 * retransmit. We thus need to update the ath_buf slot in the BAW buf 2750 * tracking array to maintain consistency. 2751 */ 2752 static void 2753 ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an, 2754 struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf) 2755 { 2756 int index, cindex; 2757 struct ieee80211_tx_ampdu *tap; 2758 int seqno = SEQNO(old_bf->bf_state.bfs_seqno); 2759 2760 ATH_TX_LOCK_ASSERT(sc); 2761 2762 tap = ath_tx_get_tx_tid(an, tid->tid); 2763 index = ATH_BA_INDEX(tap->txa_start, seqno); 2764 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2765 2766 /* 2767 * Just warn for now; if it happens then we should find out 2768 * about it. It's highly likely the aggregation session will 2769 * soon hang. 2770 */ 2771 if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) { 2772 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2773 "%s: retransmitted buffer" 2774 " has mismatching seqno's, BA session may hang.\n", 2775 __func__); 2776 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2777 "%s: old seqno=%d, new_seqno=%d\n", __func__, 2778 old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno); 2779 } 2780 2781 if (tid->tx_buf[cindex] != old_bf) { 2782 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2783 "%s: ath_buf pointer incorrect; " 2784 " has m BA session may hang.\n", __func__); 2785 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2786 "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf); 2787 } 2788 2789 tid->tx_buf[cindex] = new_bf; 2790 } 2791 2792 /* 2793 * seq_start - left edge of BAW 2794 * seq_next - current/next sequence number to allocate 2795 * 2796 * Since the BAW status may be modified by both the ath task and 2797 * the net80211/ifnet contexts, the TID must be locked. 2798 */ 2799 static void 2800 ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an, 2801 struct ath_tid *tid, const struct ath_buf *bf) 2802 { 2803 int index, cindex; 2804 struct ieee80211_tx_ampdu *tap; 2805 int seqno = SEQNO(bf->bf_state.bfs_seqno); 2806 2807 ATH_TX_LOCK_ASSERT(sc); 2808 2809 tap = ath_tx_get_tx_tid(an, tid->tid); 2810 index = ATH_BA_INDEX(tap->txa_start, seqno); 2811 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2812 2813 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2814 "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, " 2815 "baw head=%d, tail=%d\n", 2816 __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index, 2817 cindex, tid->baw_head, tid->baw_tail); 2818 2819 /* 2820 * If this occurs then we have a big problem - something else 2821 * has slid tap->txa_start along without updating the BAW 2822 * tracking start/end pointers. Thus the TX BAW state is now 2823 * completely busted. 2824 * 2825 * But for now, since I haven't yet fixed TDMA and buffer cloning, 2826 * it's quite possible that a cloned buffer is making its way 2827 * here and causing it to fire off. Disable TDMA for now. 2828 */ 2829 if (tid->tx_buf[cindex] != bf) { 2830 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2831 "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n", 2832 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 2833 tid->tx_buf[cindex], 2834 (tid->tx_buf[cindex] != NULL) ? 2835 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1); 2836 } 2837 2838 tid->tx_buf[cindex] = NULL; 2839 2840 while (tid->baw_head != tid->baw_tail && 2841 !tid->tx_buf[tid->baw_head]) { 2842 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 2843 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 2844 } 2845 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2846 "%s: tid=%d: baw is now %d:%d, baw head=%d\n", 2847 __func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head); 2848 } 2849 2850 static void 2851 ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid, 2852 struct ath_buf *bf) 2853 { 2854 struct ieee80211_frame *wh; 2855 2856 ATH_TX_LOCK_ASSERT(sc); 2857 2858 if (tid->an->an_leak_count > 0) { 2859 wh = mtod(bf->bf_m, struct ieee80211_frame *); 2860 2861 /* 2862 * Update MORE based on the software/net80211 queue states. 2863 */ 2864 if ((tid->an->an_stack_psq > 0) 2865 || (tid->an->an_swq_depth > 0)) 2866 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 2867 else 2868 wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA; 2869 2870 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 2871 "%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n", 2872 __func__, 2873 tid->an->an_node.ni_macaddr, 2874 ":", 2875 tid->an->an_leak_count, 2876 tid->an->an_stack_psq, 2877 tid->an->an_swq_depth, 2878 !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA)); 2879 2880 /* 2881 * Re-sync the underlying buffer. 2882 */ 2883 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 2884 BUS_DMASYNC_PREWRITE); 2885 2886 tid->an->an_leak_count --; 2887 } 2888 } 2889 2890 static int 2891 ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid) 2892 { 2893 2894 ATH_TX_LOCK_ASSERT(sc); 2895 2896 if (tid->an->an_leak_count > 0) { 2897 return (1); 2898 } 2899 if (tid->paused) 2900 return (0); 2901 return (1); 2902 } 2903 2904 /* 2905 * Mark the current node/TID as ready to TX. 2906 * 2907 * This is done to make it easy for the software scheduler to 2908 * find which nodes have data to send. 2909 * 2910 * The TXQ lock must be held. 2911 */ 2912 void 2913 ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid) 2914 { 2915 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2916 2917 ATH_TX_LOCK_ASSERT(sc); 2918 2919 /* 2920 * If we are leaking out a frame to this destination 2921 * for PS-POLL, ensure that we allow scheduling to 2922 * occur. 2923 */ 2924 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 2925 return; /* paused, can't schedule yet */ 2926 2927 if (tid->sched) 2928 return; /* already scheduled */ 2929 2930 tid->sched = 1; 2931 2932 #if 0 2933 /* 2934 * If this is a sleeping node we're leaking to, given 2935 * it a higher priority. This is so bad for QoS it hurts. 2936 */ 2937 if (tid->an->an_leak_count) { 2938 TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem); 2939 } else { 2940 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2941 } 2942 #endif 2943 2944 /* 2945 * We can't do the above - it'll confuse the TXQ software 2946 * scheduler which will keep checking the _head_ TID 2947 * in the list to see if it has traffic. If we queue 2948 * a TID to the head of the list and it doesn't transmit, 2949 * we'll check it again. 2950 * 2951 * So, get the rest of this leaking frames support working 2952 * and reliable first and _then_ optimise it so they're 2953 * pushed out in front of any other pending software 2954 * queued nodes. 2955 */ 2956 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2957 } 2958 2959 /* 2960 * Mark the current node as no longer needing to be polled for 2961 * TX packets. 2962 * 2963 * The TXQ lock must be held. 2964 */ 2965 static void 2966 ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid) 2967 { 2968 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2969 2970 ATH_TX_LOCK_ASSERT(sc); 2971 2972 if (tid->sched == 0) 2973 return; 2974 2975 tid->sched = 0; 2976 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem); 2977 } 2978 2979 /* 2980 * Assign a sequence number manually to the given frame. 2981 * 2982 * This should only be called for A-MPDU TX frames. 2983 * 2984 * Note: for group addressed frames, the sequence number 2985 * should be from NONQOS_TID, and net80211 should have 2986 * already assigned it for us. 2987 */ 2988 static ieee80211_seq 2989 ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, 2990 struct ath_buf *bf, struct mbuf *m0) 2991 { 2992 struct ieee80211_frame *wh; 2993 int tid; 2994 ieee80211_seq seqno; 2995 uint8_t subtype; 2996 2997 wh = mtod(m0, struct ieee80211_frame *); 2998 tid = ieee80211_gettid(wh); 2999 3000 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, qos has seq=%d\n", 3001 __func__, tid, IEEE80211_QOS_HAS_SEQ(wh)); 3002 3003 /* XXX Is it a control frame? Ignore */ 3004 3005 /* Does the packet require a sequence number? */ 3006 if (! IEEE80211_QOS_HAS_SEQ(wh)) 3007 return -1; 3008 3009 ATH_TX_LOCK_ASSERT(sc); 3010 3011 /* 3012 * Is it a QOS NULL Data frame? Give it a sequence number from 3013 * the default TID (IEEE80211_NONQOS_TID.) 3014 * 3015 * The RX path of everything I've looked at doesn't include the NULL 3016 * data frame sequence number in the aggregation state updates, so 3017 * assigning it a sequence number there will cause a BAW hole on the 3018 * RX side. 3019 */ 3020 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3021 if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) { 3022 /* XXX no locking for this TID? This is a bit of a problem. */ 3023 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; 3024 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); 3025 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3026 /* 3027 * group addressed frames get a sequence number from 3028 * a different sequence number space. 3029 */ 3030 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; 3031 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); 3032 } else { 3033 /* Manually assign sequence number */ 3034 seqno = ni->ni_txseqs[tid]; 3035 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE); 3036 } 3037 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 3038 M_SEQNO_SET(m0, seqno); 3039 3040 /* Return so caller can do something with it if needed */ 3041 DPRINTF(sc, ATH_DEBUG_SW_TX, 3042 "%s: -> subtype=0x%x, tid=%d, seqno=%d\n", 3043 __func__, subtype, tid, seqno); 3044 return seqno; 3045 } 3046 3047 /* 3048 * Attempt to direct dispatch an aggregate frame to hardware. 3049 * If the frame is out of BAW, queue. 3050 * Otherwise, schedule it as a single frame. 3051 */ 3052 static void 3053 ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an, 3054 struct ath_txq *txq, struct ath_buf *bf) 3055 { 3056 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 3057 struct ieee80211_tx_ampdu *tap; 3058 3059 ATH_TX_LOCK_ASSERT(sc); 3060 3061 tap = ath_tx_get_tx_tid(an, tid->tid); 3062 3063 /* paused? queue */ 3064 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { 3065 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3066 /* XXX don't sched - we're paused! */ 3067 return; 3068 } 3069 3070 /* outside baw? queue */ 3071 if (bf->bf_state.bfs_dobaw && 3072 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 3073 SEQNO(bf->bf_state.bfs_seqno)))) { 3074 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3075 ath_tx_tid_sched(sc, tid); 3076 return; 3077 } 3078 3079 /* 3080 * This is a temporary check and should be removed once 3081 * all the relevant code paths have been fixed. 3082 * 3083 * During aggregate retries, it's possible that the head 3084 * frame will fail (which has the bfs_aggr and bfs_nframes 3085 * fields set for said aggregate) and will be retried as 3086 * a single frame. In this instance, the values should 3087 * be reset or the completion code will get upset with you. 3088 */ 3089 if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) { 3090 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 3091 "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__, 3092 bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes); 3093 bf->bf_state.bfs_aggr = 0; 3094 bf->bf_state.bfs_nframes = 1; 3095 } 3096 3097 /* Update CLRDMASK just before this frame is queued */ 3098 ath_tx_update_clrdmask(sc, tid, bf); 3099 3100 /* Direct dispatch to hardware */ 3101 ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen, 3102 false); 3103 ath_tx_calc_duration(sc, bf); 3104 ath_tx_calc_protection(sc, bf); 3105 ath_tx_set_rtscts(sc, bf); 3106 ath_tx_rate_fill_rcflags(sc, bf); 3107 ath_tx_setds(sc, bf); 3108 3109 /* Statistics */ 3110 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++; 3111 3112 /* Track per-TID hardware queue depth correctly */ 3113 tid->hwq_depth++; 3114 3115 /* Add to BAW */ 3116 if (bf->bf_state.bfs_dobaw) { 3117 ath_tx_addto_baw(sc, an, tid, bf); 3118 bf->bf_state.bfs_addedbaw = 1; 3119 } 3120 3121 /* Set completion handler, multi-frame aggregate or not */ 3122 bf->bf_comp = ath_tx_aggr_comp; 3123 3124 /* 3125 * Update the current leak count if 3126 * we're leaking frames; and set the 3127 * MORE flag as appropriate. 3128 */ 3129 ath_tx_leak_count_update(sc, tid, bf); 3130 3131 /* Hand off to hardware */ 3132 ath_tx_handoff(sc, txq, bf); 3133 } 3134 3135 /* 3136 * Attempt to send the packet. 3137 * If the queue isn't busy, direct-dispatch. 3138 * If the queue is busy enough, queue the given packet on the 3139 * relevant software queue. 3140 */ 3141 void 3142 ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, 3143 struct ath_txq *txq, int queue_to_head, struct ath_buf *bf) 3144 { 3145 struct ath_node *an = ATH_NODE(ni); 3146 struct ieee80211_frame *wh; 3147 struct ath_tid *atid; 3148 int pri, tid; 3149 struct mbuf *m0 = bf->bf_m; 3150 3151 ATH_TX_LOCK_ASSERT(sc); 3152 3153 /* Fetch the TID - non-QoS frames get assigned to TID 16 */ 3154 wh = mtod(m0, struct ieee80211_frame *); 3155 pri = ath_tx_getac(sc, m0); 3156 tid = ath_tx_gettid(sc, m0); 3157 atid = &an->an_tid[tid]; 3158 3159 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n", 3160 __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 3161 3162 /* Set local packet state, used to queue packets to hardware */ 3163 /* XXX potentially duplicate info, re-check */ 3164 bf->bf_state.bfs_tid = tid; 3165 bf->bf_state.bfs_tx_queue = txq->axq_qnum; 3166 bf->bf_state.bfs_pri = pri; 3167 3168 /* 3169 * If the hardware queue isn't busy, queue it directly. 3170 * If the hardware queue is busy, queue it. 3171 * If the TID is paused or the traffic it outside BAW, software 3172 * queue it. 3173 * 3174 * If the node is in power-save and we're leaking a frame, 3175 * leak a single frame. 3176 */ 3177 if (! ath_tx_tid_can_tx_or_sched(sc, atid)) { 3178 /* TID is paused, queue */ 3179 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__); 3180 /* 3181 * If the caller requested that it be sent at a high 3182 * priority, queue it at the head of the list. 3183 */ 3184 if (queue_to_head) 3185 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 3186 else 3187 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3188 } else if (ath_tx_ampdu_pending(sc, an, tid)) { 3189 /* AMPDU pending; queue */ 3190 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__); 3191 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3192 /* XXX sched? */ 3193 } else if (ath_tx_ampdu_running(sc, an, tid)) { 3194 /* 3195 * AMPDU running, queue single-frame if the hardware queue 3196 * isn't busy. 3197 * 3198 * If the hardware queue is busy, sending an aggregate frame 3199 * then just hold off so we can queue more aggregate frames. 3200 * 3201 * Otherwise we may end up with single frames leaking through 3202 * because we are dispatching them too quickly. 3203 * 3204 * TODO: maybe we should treat this as two policies - minimise 3205 * latency, or maximise throughput. Then for BE/BK we can 3206 * maximise throughput, and VO/VI (if AMPDU is enabled!) 3207 * minimise latency. 3208 */ 3209 3210 /* 3211 * Always queue the frame to the tail of the list. 3212 */ 3213 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3214 3215 /* 3216 * If the hardware queue isn't busy, direct dispatch 3217 * the head frame in the list. 3218 * 3219 * Note: if we're say, configured to do ADDBA but not A-MPDU 3220 * then maybe we want to still queue two non-aggregate frames 3221 * to the hardware. Again with the per-TID policy 3222 * configuration..) 3223 * 3224 * Otherwise, schedule the TID. 3225 */ 3226 /* XXX TXQ locking */ 3227 if (txq->axq_depth + txq->fifo.axq_depth == 0) { 3228 3229 bf = ATH_TID_FIRST(atid); 3230 ATH_TID_REMOVE(atid, bf, bf_list); 3231 3232 /* 3233 * Ensure it's definitely treated as a non-AMPDU 3234 * frame - this information may have been left 3235 * over from a previous attempt. 3236 */ 3237 bf->bf_state.bfs_aggr = 0; 3238 bf->bf_state.bfs_nframes = 1; 3239 3240 /* Queue to the hardware */ 3241 ath_tx_xmit_aggr(sc, an, txq, bf); 3242 DPRINTF(sc, ATH_DEBUG_SW_TX, 3243 "%s: xmit_aggr\n", 3244 __func__); 3245 } else { 3246 DPRINTF(sc, ATH_DEBUG_SW_TX, 3247 "%s: ampdu; swq'ing\n", 3248 __func__); 3249 3250 ath_tx_tid_sched(sc, atid); 3251 } 3252 /* 3253 * If we're not doing A-MPDU, be prepared to direct dispatch 3254 * up to both limits if possible. This particular corner 3255 * case may end up with packet starvation between aggregate 3256 * traffic and non-aggregate traffic: we want to ensure 3257 * that non-aggregate stations get a few frames queued to the 3258 * hardware before the aggregate station(s) get their chance. 3259 * 3260 * So if you only ever see a couple of frames direct dispatched 3261 * to the hardware from a non-AMPDU client, check both here 3262 * and in the software queue dispatcher to ensure that those 3263 * non-AMPDU stations get a fair chance to transmit. 3264 */ 3265 /* XXX TXQ locking */ 3266 } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) && 3267 (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) { 3268 /* AMPDU not running, attempt direct dispatch */ 3269 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__); 3270 /* See if clrdmask needs to be set */ 3271 ath_tx_update_clrdmask(sc, atid, bf); 3272 3273 /* 3274 * Update the current leak count if 3275 * we're leaking frames; and set the 3276 * MORE flag as appropriate. 3277 */ 3278 ath_tx_leak_count_update(sc, atid, bf); 3279 3280 /* 3281 * Dispatch the frame. 3282 */ 3283 ath_tx_xmit_normal(sc, txq, bf); 3284 } else { 3285 /* Busy; queue */ 3286 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__); 3287 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3288 ath_tx_tid_sched(sc, atid); 3289 } 3290 } 3291 3292 /* 3293 * Only set the clrdmask bit if none of the nodes are currently 3294 * filtered. 3295 * 3296 * XXX TODO: go through all the callers and check to see 3297 * which are being called in the context of looping over all 3298 * TIDs (eg, if all tids are being paused, resumed, etc.) 3299 * That'll avoid O(n^2) complexity here. 3300 */ 3301 static void 3302 ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an) 3303 { 3304 int i; 3305 3306 ATH_TX_LOCK_ASSERT(sc); 3307 3308 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 3309 if (an->an_tid[i].isfiltered == 1) 3310 return; 3311 } 3312 an->clrdmask = 1; 3313 } 3314 3315 /* 3316 * Configure the per-TID node state. 3317 * 3318 * This likely belongs in if_ath_node.c but I can't think of anywhere 3319 * else to put it just yet. 3320 * 3321 * This sets up the SLISTs and the mutex as appropriate. 3322 */ 3323 void 3324 ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an) 3325 { 3326 int i, j; 3327 struct ath_tid *atid; 3328 3329 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 3330 atid = &an->an_tid[i]; 3331 3332 /* XXX now with this bzer(), is the field 0'ing needed? */ 3333 bzero(atid, sizeof(*atid)); 3334 3335 TAILQ_INIT(&atid->tid_q); 3336 TAILQ_INIT(&atid->filtq.tid_q); 3337 atid->tid = i; 3338 atid->an = an; 3339 for (j = 0; j < ATH_TID_MAX_BUFS; j++) 3340 atid->tx_buf[j] = NULL; 3341 atid->baw_head = atid->baw_tail = 0; 3342 atid->paused = 0; 3343 atid->sched = 0; 3344 atid->hwq_depth = 0; 3345 atid->cleanup_inprogress = 0; 3346 if (i == IEEE80211_NONQOS_TID) 3347 atid->ac = ATH_NONQOS_TID_AC; 3348 else 3349 atid->ac = TID_TO_WME_AC(i); 3350 } 3351 an->clrdmask = 1; /* Always start by setting this bit */ 3352 } 3353 3354 /* 3355 * Pause the current TID. This stops packets from being transmitted 3356 * on it. 3357 * 3358 * Since this is also called from upper layers as well as the driver, 3359 * it will get the TID lock. 3360 */ 3361 static void 3362 ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid) 3363 { 3364 3365 ATH_TX_LOCK_ASSERT(sc); 3366 tid->paused++; 3367 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%6D]: tid=%d, paused = %d\n", 3368 __func__, 3369 tid->an->an_node.ni_macaddr, ":", 3370 tid->tid, 3371 tid->paused); 3372 } 3373 3374 /* 3375 * Unpause the current TID, and schedule it if needed. 3376 */ 3377 static void 3378 ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid) 3379 { 3380 ATH_TX_LOCK_ASSERT(sc); 3381 3382 /* 3383 * There's some odd places where ath_tx_tid_resume() is called 3384 * when it shouldn't be; this works around that particular issue 3385 * until it's actually resolved. 3386 */ 3387 if (tid->paused == 0) { 3388 device_printf(sc->sc_dev, 3389 "%s: [%6D]: tid=%d, paused=0?\n", 3390 __func__, 3391 tid->an->an_node.ni_macaddr, ":", 3392 tid->tid); 3393 } else { 3394 tid->paused--; 3395 } 3396 3397 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3398 "%s: [%6D]: tid=%d, unpaused = %d\n", 3399 __func__, 3400 tid->an->an_node.ni_macaddr, ":", 3401 tid->tid, 3402 tid->paused); 3403 3404 if (tid->paused) 3405 return; 3406 3407 /* 3408 * Override the clrdmask configuration for the next frame 3409 * from this TID, just to get the ball rolling. 3410 */ 3411 ath_tx_set_clrdmask(sc, tid->an); 3412 3413 if (tid->axq_depth == 0) 3414 return; 3415 3416 /* XXX isfiltered shouldn't ever be 0 at this point */ 3417 if (tid->isfiltered == 1) { 3418 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n", 3419 __func__); 3420 return; 3421 } 3422 3423 ath_tx_tid_sched(sc, tid); 3424 3425 /* 3426 * Queue the software TX scheduler. 3427 */ 3428 ath_tx_swq_kick(sc); 3429 } 3430 3431 /* 3432 * Add the given ath_buf to the TID filtered frame list. 3433 * This requires the TID be filtered. 3434 */ 3435 static void 3436 ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid, 3437 struct ath_buf *bf) 3438 { 3439 3440 ATH_TX_LOCK_ASSERT(sc); 3441 3442 if (!tid->isfiltered) 3443 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n", 3444 __func__); 3445 3446 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf); 3447 3448 /* Set the retry bit and bump the retry counter */ 3449 ath_tx_set_retry(sc, bf); 3450 sc->sc_stats.ast_tx_swfiltered++; 3451 3452 ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list); 3453 } 3454 3455 /* 3456 * Handle a completed filtered frame from the given TID. 3457 * This just enables/pauses the filtered frame state if required 3458 * and appends the filtered frame to the filtered queue. 3459 */ 3460 static void 3461 ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid, 3462 struct ath_buf *bf) 3463 { 3464 3465 ATH_TX_LOCK_ASSERT(sc); 3466 3467 if (! tid->isfiltered) { 3468 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d; filter transition\n", 3469 __func__, tid->tid); 3470 tid->isfiltered = 1; 3471 ath_tx_tid_pause(sc, tid); 3472 } 3473 3474 /* Add the frame to the filter queue */ 3475 ath_tx_tid_filt_addbuf(sc, tid, bf); 3476 } 3477 3478 /* 3479 * Complete the filtered frame TX completion. 3480 * 3481 * If there are no more frames in the hardware queue, unpause/unfilter 3482 * the TID if applicable. Otherwise we will wait for a node PS transition 3483 * to unfilter. 3484 */ 3485 static void 3486 ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid) 3487 { 3488 struct ath_buf *bf; 3489 int do_resume = 0; 3490 3491 ATH_TX_LOCK_ASSERT(sc); 3492 3493 if (tid->hwq_depth != 0) 3494 return; 3495 3496 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, hwq=0, transition back\n", 3497 __func__, tid->tid); 3498 if (tid->isfiltered == 1) { 3499 tid->isfiltered = 0; 3500 do_resume = 1; 3501 } 3502 3503 /* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */ 3504 ath_tx_set_clrdmask(sc, tid->an); 3505 3506 /* XXX this is really quite inefficient */ 3507 while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) { 3508 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3509 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3510 } 3511 3512 /* And only resume if we had paused before */ 3513 if (do_resume) 3514 ath_tx_tid_resume(sc, tid); 3515 } 3516 3517 /* 3518 * Called when a single (aggregate or otherwise) frame is completed. 3519 * 3520 * Returns 0 if the buffer could be added to the filtered list 3521 * (cloned or otherwise), 1 if the buffer couldn't be added to the 3522 * filtered list (failed clone; expired retry) and the caller should 3523 * free it and handle it like a failure (eg by sending a BAR.) 3524 * 3525 * since the buffer may be cloned, bf must be not touched after this 3526 * if the return value is 0. 3527 */ 3528 static int 3529 ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid, 3530 struct ath_buf *bf) 3531 { 3532 struct ath_buf *nbf; 3533 int retval; 3534 3535 ATH_TX_LOCK_ASSERT(sc); 3536 3537 /* 3538 * Don't allow a filtered frame to live forever. 3539 */ 3540 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3541 sc->sc_stats.ast_tx_swretrymax++; 3542 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3543 "%s: bf=%p, seqno=%d, exceeded retries\n", 3544 __func__, 3545 bf, 3546 SEQNO(bf->bf_state.bfs_seqno)); 3547 retval = 1; /* error */ 3548 goto finish; 3549 } 3550 3551 /* 3552 * A busy buffer can't be added to the retry list. 3553 * It needs to be cloned. 3554 */ 3555 if (bf->bf_flags & ATH_BUF_BUSY) { 3556 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3557 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3558 "%s: busy buffer clone: %p -> %p\n", 3559 __func__, bf, nbf); 3560 } else { 3561 nbf = bf; 3562 } 3563 3564 if (nbf == NULL) { 3565 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3566 "%s: busy buffer couldn't be cloned (%p)!\n", 3567 __func__, bf); 3568 retval = 1; /* error */ 3569 } else { 3570 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3571 retval = 0; /* ok */ 3572 } 3573 finish: 3574 ath_tx_tid_filt_comp_complete(sc, tid); 3575 3576 return (retval); 3577 } 3578 3579 static void 3580 ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid, 3581 struct ath_buf *bf_first, ath_bufhead *bf_q) 3582 { 3583 struct ath_buf *bf, *bf_next, *nbf; 3584 3585 ATH_TX_LOCK_ASSERT(sc); 3586 3587 bf = bf_first; 3588 while (bf) { 3589 bf_next = bf->bf_next; 3590 bf->bf_next = NULL; /* Remove it from the aggr list */ 3591 3592 /* 3593 * Don't allow a filtered frame to live forever. 3594 */ 3595 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3596 sc->sc_stats.ast_tx_swretrymax++; 3597 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3598 "%s: tid=%d, bf=%p, seqno=%d, exceeded retries\n", 3599 __func__, 3600 tid->tid, 3601 bf, 3602 SEQNO(bf->bf_state.bfs_seqno)); 3603 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3604 goto next; 3605 } 3606 3607 if (bf->bf_flags & ATH_BUF_BUSY) { 3608 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3609 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3610 "%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n", 3611 __func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno)); 3612 } else { 3613 nbf = bf; 3614 } 3615 3616 /* 3617 * If the buffer couldn't be cloned, add it to bf_q; 3618 * the caller will free the buffer(s) as required. 3619 */ 3620 if (nbf == NULL) { 3621 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3622 "%s: tid=%d, buffer couldn't be cloned! (%p) seqno=%d\n", 3623 __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno)); 3624 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3625 } else { 3626 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3627 } 3628 next: 3629 bf = bf_next; 3630 } 3631 3632 ath_tx_tid_filt_comp_complete(sc, tid); 3633 } 3634 3635 /* 3636 * Suspend the queue because we need to TX a BAR. 3637 */ 3638 static void 3639 ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid) 3640 { 3641 3642 ATH_TX_LOCK_ASSERT(sc); 3643 3644 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3645 "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n", 3646 __func__, 3647 tid->tid, 3648 tid->bar_wait, 3649 tid->bar_tx); 3650 3651 /* We shouldn't be called when bar_tx is 1 */ 3652 if (tid->bar_tx) { 3653 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3654 "%s: bar_tx is 1?!\n", __func__); 3655 } 3656 3657 /* If we've already been called, just be patient. */ 3658 if (tid->bar_wait) 3659 return; 3660 3661 /* Wait! */ 3662 tid->bar_wait = 1; 3663 3664 /* Only one pause, no matter how many frames fail */ 3665 ath_tx_tid_pause(sc, tid); 3666 } 3667 3668 /* 3669 * We've finished with BAR handling - either we succeeded or 3670 * failed. Either way, unsuspend TX. 3671 */ 3672 static void 3673 ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid) 3674 { 3675 3676 ATH_TX_LOCK_ASSERT(sc); 3677 3678 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3679 "%s: %6D: TID=%d, called\n", 3680 __func__, 3681 tid->an->an_node.ni_macaddr, 3682 ":", 3683 tid->tid); 3684 3685 if (tid->bar_tx == 0 || tid->bar_wait == 0) { 3686 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3687 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3688 __func__, tid->an->an_node.ni_macaddr, ":", 3689 tid->tid, tid->bar_tx, tid->bar_wait); 3690 } 3691 3692 tid->bar_tx = tid->bar_wait = 0; 3693 ath_tx_tid_resume(sc, tid); 3694 } 3695 3696 /* 3697 * Return whether we're ready to TX a BAR frame. 3698 * 3699 * Requires the TID lock be held. 3700 */ 3701 static int 3702 ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid) 3703 { 3704 3705 ATH_TX_LOCK_ASSERT(sc); 3706 3707 if (tid->bar_wait == 0 || tid->hwq_depth > 0) 3708 return (0); 3709 3710 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3711 "%s: %6D: TID=%d, bar ready\n", 3712 __func__, 3713 tid->an->an_node.ni_macaddr, 3714 ":", 3715 tid->tid); 3716 3717 return (1); 3718 } 3719 3720 /* 3721 * Check whether the current TID is ready to have a BAR 3722 * TXed and if so, do the TX. 3723 * 3724 * Since the TID/TXQ lock can't be held during a call to 3725 * ieee80211_send_bar(), we have to do the dirty thing of unlocking it, 3726 * sending the BAR and locking it again. 3727 * 3728 * Eventually, the code to send the BAR should be broken out 3729 * from this routine so the lock doesn't have to be reacquired 3730 * just to be immediately dropped by the caller. 3731 */ 3732 static void 3733 ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid) 3734 { 3735 struct ieee80211_tx_ampdu *tap; 3736 3737 ATH_TX_LOCK_ASSERT(sc); 3738 3739 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3740 "%s: %6D: TID=%d, called\n", 3741 __func__, 3742 tid->an->an_node.ni_macaddr, 3743 ":", 3744 tid->tid); 3745 3746 tap = ath_tx_get_tx_tid(tid->an, tid->tid); 3747 3748 /* 3749 * This is an error condition! 3750 */ 3751 if (tid->bar_wait == 0 || tid->bar_tx == 1) { 3752 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3753 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3754 __func__, tid->an->an_node.ni_macaddr, ":", 3755 tid->tid, tid->bar_tx, tid->bar_wait); 3756 return; 3757 } 3758 3759 /* Don't do anything if we still have pending frames */ 3760 if (tid->hwq_depth > 0) { 3761 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3762 "%s: %6D: TID=%d, hwq_depth=%d, waiting\n", 3763 __func__, 3764 tid->an->an_node.ni_macaddr, 3765 ":", 3766 tid->tid, 3767 tid->hwq_depth); 3768 return; 3769 } 3770 3771 /* We're now about to TX */ 3772 tid->bar_tx = 1; 3773 3774 /* 3775 * Override the clrdmask configuration for the next frame, 3776 * just to get the ball rolling. 3777 */ 3778 ath_tx_set_clrdmask(sc, tid->an); 3779 3780 /* 3781 * Calculate new BAW left edge, now that all frames have either 3782 * succeeded or failed. 3783 * 3784 * XXX verify this is _actually_ the valid value to begin at! 3785 */ 3786 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3787 "%s: %6D: TID=%d, new BAW left edge=%d\n", 3788 __func__, 3789 tid->an->an_node.ni_macaddr, 3790 ":", 3791 tid->tid, 3792 tap->txa_start); 3793 3794 /* Try sending the BAR frame */ 3795 /* We can't hold the lock here! */ 3796 3797 ATH_TX_UNLOCK(sc); 3798 if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) { 3799 /* Success? Now we wait for notification that it's done */ 3800 ATH_TX_LOCK(sc); 3801 return; 3802 } 3803 3804 /* Failure? For now, warn loudly and continue */ 3805 ATH_TX_LOCK(sc); 3806 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3807 "%s: %6D: TID=%d, failed to TX BAR, continue!\n", 3808 __func__, tid->an->an_node.ni_macaddr, ":", 3809 tid->tid); 3810 ath_tx_tid_bar_unsuspend(sc, tid); 3811 } 3812 3813 static void 3814 ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an, 3815 struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf) 3816 { 3817 3818 ATH_TX_LOCK_ASSERT(sc); 3819 3820 /* 3821 * If the current TID is running AMPDU, update 3822 * the BAW. 3823 */ 3824 if (ath_tx_ampdu_running(sc, an, tid->tid) && 3825 bf->bf_state.bfs_dobaw) { 3826 /* 3827 * Only remove the frame from the BAW if it's 3828 * been transmitted at least once; this means 3829 * the frame was in the BAW to begin with. 3830 */ 3831 if (bf->bf_state.bfs_retries > 0) { 3832 ath_tx_update_baw(sc, an, tid, bf); 3833 bf->bf_state.bfs_dobaw = 0; 3834 } 3835 #if 0 3836 /* 3837 * This has become a non-fatal error now 3838 */ 3839 if (! bf->bf_state.bfs_addedbaw) 3840 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW 3841 "%s: wasn't added: seqno %d\n", 3842 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3843 #endif 3844 } 3845 3846 /* Strip it out of an aggregate list if it was in one */ 3847 bf->bf_next = NULL; 3848 3849 /* Insert on the free queue to be freed by the caller */ 3850 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 3851 } 3852 3853 static void 3854 ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an, 3855 const char *pfx, struct ath_tid *tid, struct ath_buf *bf) 3856 { 3857 struct ieee80211_node *ni = &an->an_node; 3858 struct ath_txq *txq; 3859 struct ieee80211_tx_ampdu *tap; 3860 3861 txq = sc->sc_ac2q[tid->ac]; 3862 tap = ath_tx_get_tx_tid(an, tid->tid); 3863 3864 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3865 "%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, " 3866 "seqno=%d, retry=%d\n", 3867 __func__, 3868 pfx, 3869 ni->ni_macaddr, 3870 ":", 3871 bf, 3872 bf->bf_state.bfs_addedbaw, 3873 bf->bf_state.bfs_dobaw, 3874 SEQNO(bf->bf_state.bfs_seqno), 3875 bf->bf_state.bfs_retries); 3876 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3877 "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n", 3878 __func__, 3879 pfx, 3880 ni->ni_macaddr, 3881 ":", 3882 bf, 3883 txq->axq_qnum, 3884 txq->axq_depth, 3885 txq->axq_aggr_depth); 3886 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3887 "%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, " 3888 "isfiltered=%d\n", 3889 __func__, 3890 pfx, 3891 ni->ni_macaddr, 3892 ":", 3893 bf, 3894 tid->axq_depth, 3895 tid->hwq_depth, 3896 tid->bar_wait, 3897 tid->isfiltered); 3898 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, 3899 "%s: %s: %6D: tid %d: " 3900 "sched=%d, paused=%d, " 3901 "incomp=%d, baw_head=%d, " 3902 "baw_tail=%d txa_start=%d, ni_txseqs=%d\n", 3903 __func__, 3904 pfx, 3905 ni->ni_macaddr, 3906 ":", 3907 tid->tid, 3908 tid->sched, tid->paused, 3909 tid->incomp, tid->baw_head, 3910 tid->baw_tail, tap == NULL ? -1 : tap->txa_start, 3911 ni->ni_txseqs[tid->tid]); 3912 3913 /* XXX Dump the frame, see what it is? */ 3914 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 3915 ieee80211_dump_pkt(ni->ni_ic, 3916 mtod(bf->bf_m, const uint8_t *), 3917 bf->bf_m->m_len, 0, -1); 3918 } 3919 3920 /* 3921 * Free any packets currently pending in the software TX queue. 3922 * 3923 * This will be called when a node is being deleted. 3924 * 3925 * It can also be called on an active node during an interface 3926 * reset or state transition. 3927 * 3928 * (From Linux/reference): 3929 * 3930 * TODO: For frame(s) that are in the retry state, we will reuse the 3931 * sequence number(s) without setting the retry bit. The 3932 * alternative is to give up on these and BAR the receiver's window 3933 * forward. 3934 */ 3935 static void 3936 ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an, 3937 struct ath_tid *tid, ath_bufhead *bf_cq) 3938 { 3939 struct ath_buf *bf; 3940 struct ieee80211_tx_ampdu *tap; 3941 struct ieee80211_node *ni = &an->an_node; 3942 int t; 3943 3944 tap = ath_tx_get_tx_tid(an, tid->tid); 3945 3946 ATH_TX_LOCK_ASSERT(sc); 3947 3948 /* Walk the queue, free frames */ 3949 t = 0; 3950 for (;;) { 3951 bf = ATH_TID_FIRST(tid); 3952 if (bf == NULL) { 3953 break; 3954 } 3955 3956 if (t == 0) { 3957 ath_tx_tid_drain_print(sc, an, "norm", tid, bf); 3958 // t = 1; 3959 } 3960 3961 ATH_TID_REMOVE(tid, bf, bf_list); 3962 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3963 } 3964 3965 /* And now, drain the filtered frame queue */ 3966 t = 0; 3967 for (;;) { 3968 bf = ATH_TID_FILT_FIRST(tid); 3969 if (bf == NULL) 3970 break; 3971 3972 if (t == 0) { 3973 ath_tx_tid_drain_print(sc, an, "filt", tid, bf); 3974 // t = 1; 3975 } 3976 3977 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3978 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3979 } 3980 3981 /* 3982 * Override the clrdmask configuration for the next frame 3983 * in case there is some future transmission, just to get 3984 * the ball rolling. 3985 * 3986 * This won't hurt things if the TID is about to be freed. 3987 */ 3988 ath_tx_set_clrdmask(sc, tid->an); 3989 3990 /* 3991 * Now that it's completed, grab the TID lock and update 3992 * the sequence number and BAW window. 3993 * Because sequence numbers have been assigned to frames 3994 * that haven't been sent yet, it's entirely possible 3995 * we'll be called with some pending frames that have not 3996 * been transmitted. 3997 * 3998 * The cleaner solution is to do the sequence number allocation 3999 * when the packet is first transmitted - and thus the "retries" 4000 * check above would be enough to update the BAW/seqno. 4001 */ 4002 4003 /* But don't do it for non-QoS TIDs */ 4004 if (tap) { 4005 #if 1 4006 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4007 "%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n", 4008 __func__, 4009 ni->ni_macaddr, 4010 ":", 4011 an, 4012 tid->tid, 4013 tap->txa_start); 4014 #endif 4015 ni->ni_txseqs[tid->tid] = tap->txa_start; 4016 tid->baw_tail = tid->baw_head; 4017 } 4018 } 4019 4020 /* 4021 * Reset the TID state. This must be only called once the node has 4022 * had its frames flushed from this TID, to ensure that no other 4023 * pause / unpause logic can kick in. 4024 */ 4025 static void 4026 ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid) 4027 { 4028 4029 #if 0 4030 tid->bar_wait = tid->bar_tx = tid->isfiltered = 0; 4031 tid->paused = tid->sched = tid->addba_tx_pending = 0; 4032 tid->incomp = tid->cleanup_inprogress = 0; 4033 #endif 4034 4035 /* 4036 * If we have a bar_wait set, we need to unpause the TID 4037 * here. Otherwise once cleanup has finished, the TID won't 4038 * have the right paused counter. 4039 * 4040 * XXX I'm not going through resume here - I don't want the 4041 * node to be rescheuled just yet. This however should be 4042 * methodized! 4043 */ 4044 if (tid->bar_wait) { 4045 if (tid->paused > 0) { 4046 tid->paused --; 4047 } 4048 } 4049 4050 /* 4051 * XXX same with a currently filtered TID. 4052 * 4053 * Since this is being called during a flush, we assume that 4054 * the filtered frame list is actually empty. 4055 * 4056 * XXX TODO: add in a check to ensure that the filtered queue 4057 * depth is actually 0! 4058 */ 4059 if (tid->isfiltered) { 4060 if (tid->paused > 0) { 4061 tid->paused --; 4062 } 4063 } 4064 4065 /* 4066 * Clear BAR, filtered frames, scheduled and ADDBA pending. 4067 * The TID may be going through cleanup from the last association 4068 * where things in the BAW are still in the hardware queue. 4069 */ 4070 tid->bar_wait = 0; 4071 tid->bar_tx = 0; 4072 tid->isfiltered = 0; 4073 tid->sched = 0; 4074 tid->addba_tx_pending = 0; 4075 4076 /* 4077 * XXX TODO: it may just be enough to walk the HWQs and mark 4078 * frames for that node as non-aggregate; or mark the ath_node 4079 * with something that indicates that aggregation is no longer 4080 * occurring. Then we can just toss the BAW complaints and 4081 * do a complete hard reset of state here - no pause, no 4082 * complete counter, etc. 4083 */ 4084 4085 } 4086 4087 /* 4088 * Flush all software queued packets for the given node. 4089 * 4090 * This occurs when a completion handler frees the last buffer 4091 * for a node, and the node is thus freed. This causes the node 4092 * to be cleaned up, which ends up calling ath_tx_node_flush. 4093 */ 4094 void 4095 ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an) 4096 { 4097 int tid; 4098 ath_bufhead bf_cq; 4099 struct ath_buf *bf; 4100 4101 TAILQ_INIT(&bf_cq); 4102 4103 ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p", 4104 &an->an_node); 4105 4106 ATH_TX_LOCK(sc); 4107 DPRINTF(sc, ATH_DEBUG_NODE, 4108 "%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, " 4109 "swq_depth=%d, clrdmask=%d, leak_count=%d\n", 4110 __func__, 4111 an->an_node.ni_macaddr, 4112 ":", 4113 an->an_is_powersave, 4114 an->an_stack_psq, 4115 an->an_tim_set, 4116 an->an_swq_depth, 4117 an->clrdmask, 4118 an->an_leak_count); 4119 4120 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 4121 struct ath_tid *atid = &an->an_tid[tid]; 4122 4123 /* Free packets */ 4124 ath_tx_tid_drain(sc, an, atid, &bf_cq); 4125 4126 /* Remove this tid from the list of active tids */ 4127 ath_tx_tid_unsched(sc, atid); 4128 4129 /* Reset the per-TID pause, BAR, etc state */ 4130 ath_tx_tid_reset(sc, atid); 4131 } 4132 4133 /* 4134 * Clear global leak count 4135 */ 4136 an->an_leak_count = 0; 4137 ATH_TX_UNLOCK(sc); 4138 4139 /* Handle completed frames */ 4140 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4141 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4142 ath_tx_default_comp(sc, bf, 0); 4143 } 4144 } 4145 4146 /* 4147 * Drain all the software TXQs currently with traffic queued. 4148 */ 4149 void 4150 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq) 4151 { 4152 struct ath_tid *tid; 4153 ath_bufhead bf_cq; 4154 struct ath_buf *bf; 4155 4156 TAILQ_INIT(&bf_cq); 4157 ATH_TX_LOCK(sc); 4158 4159 /* 4160 * Iterate over all active tids for the given txq, 4161 * flushing and unsched'ing them 4162 */ 4163 while (! TAILQ_EMPTY(&txq->axq_tidq)) { 4164 tid = TAILQ_FIRST(&txq->axq_tidq); 4165 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq); 4166 ath_tx_tid_unsched(sc, tid); 4167 } 4168 4169 ATH_TX_UNLOCK(sc); 4170 4171 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4172 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4173 ath_tx_default_comp(sc, bf, 0); 4174 } 4175 } 4176 4177 /* 4178 * Handle completion of non-aggregate session frames. 4179 * 4180 * This (currently) doesn't implement software retransmission of 4181 * non-aggregate frames! 4182 * 4183 * Software retransmission of non-aggregate frames needs to obey 4184 * the strict sequence number ordering, and drop any frames that 4185 * will fail this. 4186 * 4187 * For now, filtered frames and frame transmission will cause 4188 * all kinds of issues. So we don't support them. 4189 * 4190 * So anyone queuing frames via ath_tx_normal_xmit() or 4191 * ath_tx_hw_queue_norm() must override and set CLRDMASK. 4192 */ 4193 void 4194 ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4195 { 4196 struct ieee80211_node *ni = bf->bf_node; 4197 struct ath_node *an = ATH_NODE(ni); 4198 int tid = bf->bf_state.bfs_tid; 4199 struct ath_tid *atid = &an->an_tid[tid]; 4200 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 4201 4202 /* The TID state is protected behind the TXQ lock */ 4203 ATH_TX_LOCK(sc); 4204 4205 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n", 4206 __func__, bf, fail, atid->hwq_depth - 1); 4207 4208 atid->hwq_depth--; 4209 4210 #if 0 4211 /* 4212 * If the frame was filtered, stick it on the filter frame 4213 * queue and complain about it. It shouldn't happen! 4214 */ 4215 if ((ts->ts_status & HAL_TXERR_FILT) || 4216 (ts->ts_status != 0 && atid->isfiltered)) { 4217 DPRINTF(sc, ATH_DEBUG_SW_TX, 4218 "%s: isfiltered=%d, ts_status=%d: huh?\n", 4219 __func__, 4220 atid->isfiltered, 4221 ts->ts_status); 4222 ath_tx_tid_filt_comp_buf(sc, atid, bf); 4223 } 4224 #endif 4225 if (atid->isfiltered) 4226 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__); 4227 if (atid->hwq_depth < 0) 4228 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", 4229 __func__, atid->hwq_depth); 4230 4231 /* If the TID is being cleaned up, track things */ 4232 /* XXX refactor! */ 4233 if (atid->cleanup_inprogress) { 4234 atid->incomp--; 4235 if (atid->incomp == 0) { 4236 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4237 "%s: TID %d: cleaned up! resume!\n", 4238 __func__, tid); 4239 atid->cleanup_inprogress = 0; 4240 ath_tx_tid_resume(sc, atid); 4241 } 4242 } 4243 4244 /* 4245 * If the queue is filtered, potentially mark it as complete 4246 * and reschedule it as needed. 4247 * 4248 * This is required as there may be a subsequent TX descriptor 4249 * for this end-node that has CLRDMASK set, so it's quite possible 4250 * that a filtered frame will be followed by a non-filtered 4251 * (complete or otherwise) frame. 4252 * 4253 * XXX should we do this before we complete the frame? 4254 */ 4255 if (atid->isfiltered) 4256 ath_tx_tid_filt_comp_complete(sc, atid); 4257 ATH_TX_UNLOCK(sc); 4258 4259 /* 4260 * punt to rate control if we're not being cleaned up 4261 * during a hw queue drain and the frame wanted an ACK. 4262 */ 4263 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 4264 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 4265 ts, 4266 bf->bf_state.bfs_pktlen, 4267 bf->bf_state.bfs_pktlen, 4268 1, (ts->ts_status == 0) ? 0 : 1); 4269 4270 ath_tx_default_comp(sc, bf, fail); 4271 } 4272 4273 /* 4274 * Handle cleanup of aggregate session packets that aren't 4275 * an A-MPDU. 4276 * 4277 * There's no need to update the BAW here - the session is being 4278 * torn down. 4279 */ 4280 static void 4281 ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf) 4282 { 4283 struct ieee80211_node *ni = bf->bf_node; 4284 struct ath_node *an = ATH_NODE(ni); 4285 int tid = bf->bf_state.bfs_tid; 4286 struct ath_tid *atid = &an->an_tid[tid]; 4287 4288 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n", 4289 __func__, tid, atid->incomp); 4290 4291 ATH_TX_LOCK(sc); 4292 atid->incomp--; 4293 4294 /* XXX refactor! */ 4295 if (bf->bf_state.bfs_dobaw) { 4296 ath_tx_update_baw(sc, an, atid, bf); 4297 if (!bf->bf_state.bfs_addedbaw) 4298 DPRINTF(sc, ATH_DEBUG_SW_TX, 4299 "%s: wasn't added: seqno %d\n", 4300 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4301 } 4302 4303 if (atid->incomp == 0) { 4304 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4305 "%s: TID %d: cleaned up! resume!\n", 4306 __func__, tid); 4307 atid->cleanup_inprogress = 0; 4308 ath_tx_tid_resume(sc, atid); 4309 } 4310 ATH_TX_UNLOCK(sc); 4311 4312 ath_tx_default_comp(sc, bf, 0); 4313 } 4314 4315 4316 /* 4317 * This as it currently stands is a bit dumb. Ideally we'd just 4318 * fail the frame the normal way and have it permanently fail 4319 * via the normal aggregate completion path. 4320 */ 4321 static void 4322 ath_tx_tid_cleanup_frame(struct ath_softc *sc, struct ath_node *an, 4323 int tid, struct ath_buf *bf_head, ath_bufhead *bf_cq) 4324 { 4325 struct ath_tid *atid = &an->an_tid[tid]; 4326 struct ath_buf *bf, *bf_next; 4327 4328 ATH_TX_LOCK_ASSERT(sc); 4329 4330 /* 4331 * Remove this frame from the queue. 4332 */ 4333 ATH_TID_REMOVE(atid, bf_head, bf_list); 4334 4335 /* 4336 * Loop over all the frames in the aggregate. 4337 */ 4338 bf = bf_head; 4339 while (bf != NULL) { 4340 bf_next = bf->bf_next; /* next aggregate frame, or NULL */ 4341 4342 /* 4343 * If it's been added to the BAW we need to kick 4344 * it out of the BAW before we continue. 4345 * 4346 * XXX if it's an aggregate, assert that it's in the 4347 * BAW - we shouldn't have it be in an aggregate 4348 * otherwise! 4349 */ 4350 if (bf->bf_state.bfs_addedbaw) { 4351 ath_tx_update_baw(sc, an, atid, bf); 4352 bf->bf_state.bfs_dobaw = 0; 4353 } 4354 4355 /* 4356 * Give it the default completion handler. 4357 */ 4358 bf->bf_comp = ath_tx_normal_comp; 4359 bf->bf_next = NULL; 4360 4361 /* 4362 * Add it to the list to free. 4363 */ 4364 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 4365 4366 /* 4367 * Now advance to the next frame in the aggregate. 4368 */ 4369 bf = bf_next; 4370 } 4371 } 4372 4373 /* 4374 * Performs transmit side cleanup when TID changes from aggregated to 4375 * unaggregated and during reassociation. 4376 * 4377 * For now, this just tosses everything from the TID software queue 4378 * whether or not it has been retried and marks the TID as 4379 * pending completion if there's anything for this TID queued to 4380 * the hardware. 4381 * 4382 * The caller is responsible for pausing the TID and unpausing the 4383 * TID if no cleanup was required. Otherwise the cleanup path will 4384 * unpause the TID once the last hardware queued frame is completed. 4385 */ 4386 static void 4387 ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid, 4388 ath_bufhead *bf_cq) 4389 { 4390 struct ath_tid *atid = &an->an_tid[tid]; 4391 struct ath_buf *bf, *bf_next; 4392 4393 ATH_TX_LOCK_ASSERT(sc); 4394 4395 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4396 "%s: TID %d: called; inprogress=%d\n", __func__, tid, 4397 atid->cleanup_inprogress); 4398 4399 /* 4400 * Move the filtered frames to the TX queue, before 4401 * we run off and discard/process things. 4402 */ 4403 4404 /* XXX this is really quite inefficient */ 4405 while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) { 4406 ATH_TID_FILT_REMOVE(atid, bf, bf_list); 4407 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4408 } 4409 4410 /* 4411 * Update the frames in the software TX queue: 4412 * 4413 * + Discard retry frames in the queue 4414 * + Fix the completion function to be non-aggregate 4415 */ 4416 bf = ATH_TID_FIRST(atid); 4417 while (bf) { 4418 /* 4419 * Grab the next frame in the list, we may 4420 * be fiddling with the list. 4421 */ 4422 bf_next = TAILQ_NEXT(bf, bf_list); 4423 4424 /* 4425 * Free the frame and all subframes. 4426 */ 4427 ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq); 4428 4429 /* 4430 * Next frame! 4431 */ 4432 bf = bf_next; 4433 } 4434 4435 /* 4436 * If there's anything in the hardware queue we wait 4437 * for the TID HWQ to empty. 4438 */ 4439 if (atid->hwq_depth > 0) { 4440 /* 4441 * XXX how about we kill atid->incomp, and instead 4442 * replace it with a macro that checks that atid->hwq_depth 4443 * is 0? 4444 */ 4445 atid->incomp = atid->hwq_depth; 4446 atid->cleanup_inprogress = 1; 4447 } 4448 4449 if (atid->cleanup_inprogress) 4450 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4451 "%s: TID %d: cleanup needed: %d packets\n", 4452 __func__, tid, atid->incomp); 4453 4454 /* Owner now must free completed frames */ 4455 } 4456 4457 static struct ath_buf * 4458 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 4459 struct ath_tid *tid, struct ath_buf *bf) 4460 { 4461 struct ath_buf *nbf; 4462 int error; 4463 4464 /* 4465 * Clone the buffer. This will handle the dma unmap and 4466 * copy the node reference to the new buffer. If this 4467 * works out, 'bf' will have no DMA mapping, no mbuf 4468 * pointer and no node reference. 4469 */ 4470 nbf = ath_buf_clone(sc, bf); 4471 4472 #if 0 4473 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n", 4474 __func__); 4475 #endif 4476 4477 if (nbf == NULL) { 4478 /* Failed to clone */ 4479 DPRINTF(sc, ATH_DEBUG_XMIT, 4480 "%s: failed to clone a busy buffer\n", 4481 __func__); 4482 return NULL; 4483 } 4484 4485 /* Setup the dma for the new buffer */ 4486 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m); 4487 if (error != 0) { 4488 DPRINTF(sc, ATH_DEBUG_XMIT, 4489 "%s: failed to setup dma for clone\n", 4490 __func__); 4491 /* 4492 * Put this at the head of the list, not tail; 4493 * that way it doesn't interfere with the 4494 * busy buffer logic (which uses the tail of 4495 * the list.) 4496 */ 4497 ATH_TXBUF_LOCK(sc); 4498 ath_returnbuf_head(sc, nbf); 4499 ATH_TXBUF_UNLOCK(sc); 4500 return NULL; 4501 } 4502 4503 /* Update BAW if required, before we free the original buf */ 4504 if (bf->bf_state.bfs_dobaw) 4505 ath_tx_switch_baw_buf(sc, an, tid, bf, nbf); 4506 4507 /* Free original buffer; return new buffer */ 4508 ath_freebuf(sc, bf); 4509 4510 return nbf; 4511 } 4512 4513 /* 4514 * Handle retrying an unaggregate frame in an aggregate 4515 * session. 4516 * 4517 * If too many retries occur, pause the TID, wait for 4518 * any further retransmits (as there's no reason why 4519 * non-aggregate frames in an aggregate session are 4520 * transmitted in-order; they just have to be in-BAW) 4521 * and then queue a BAR. 4522 */ 4523 static void 4524 ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf) 4525 { 4526 struct ieee80211_node *ni = bf->bf_node; 4527 struct ath_node *an = ATH_NODE(ni); 4528 int tid = bf->bf_state.bfs_tid; 4529 struct ath_tid *atid = &an->an_tid[tid]; 4530 struct ieee80211_tx_ampdu *tap; 4531 4532 ATH_TX_LOCK(sc); 4533 4534 tap = ath_tx_get_tx_tid(an, tid); 4535 4536 /* 4537 * If the buffer is marked as busy, we can't directly 4538 * reuse it. Instead, try to clone the buffer. 4539 * If the clone is successful, recycle the old buffer. 4540 * If the clone is unsuccessful, set bfs_retries to max 4541 * to force the next bit of code to free the buffer 4542 * for us. 4543 */ 4544 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4545 (bf->bf_flags & ATH_BUF_BUSY)) { 4546 struct ath_buf *nbf; 4547 nbf = ath_tx_retry_clone(sc, an, atid, bf); 4548 if (nbf) 4549 /* bf has been freed at this point */ 4550 bf = nbf; 4551 else 4552 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4553 } 4554 4555 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4556 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4557 "%s: exceeded retries; seqno %d\n", 4558 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4559 sc->sc_stats.ast_tx_swretrymax++; 4560 4561 /* Update BAW anyway */ 4562 if (bf->bf_state.bfs_dobaw) { 4563 ath_tx_update_baw(sc, an, atid, bf); 4564 if (! bf->bf_state.bfs_addedbaw) 4565 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4566 "%s: wasn't added: seqno %d\n", 4567 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4568 } 4569 bf->bf_state.bfs_dobaw = 0; 4570 4571 /* Suspend the TX queue and get ready to send the BAR */ 4572 ath_tx_tid_bar_suspend(sc, atid); 4573 4574 /* Send the BAR if there are no other frames waiting */ 4575 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4576 ath_tx_tid_bar_tx(sc, atid); 4577 4578 ATH_TX_UNLOCK(sc); 4579 4580 /* Free buffer, bf is free after this call */ 4581 ath_tx_default_comp(sc, bf, 0); 4582 return; 4583 } 4584 4585 /* 4586 * This increments the retry counter as well as 4587 * sets the retry flag in the ath_buf and packet 4588 * body. 4589 */ 4590 ath_tx_set_retry(sc, bf); 4591 sc->sc_stats.ast_tx_swretries++; 4592 4593 /* 4594 * Insert this at the head of the queue, so it's 4595 * retried before any current/subsequent frames. 4596 */ 4597 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4598 ath_tx_tid_sched(sc, atid); 4599 /* Send the BAR if there are no other frames waiting */ 4600 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4601 ath_tx_tid_bar_tx(sc, atid); 4602 4603 ATH_TX_UNLOCK(sc); 4604 } 4605 4606 /* 4607 * Common code for aggregate excessive retry/subframe retry. 4608 * If retrying, queues buffers to bf_q. If not, frees the 4609 * buffers. 4610 * 4611 * XXX should unify this with ath_tx_aggr_retry_unaggr() 4612 */ 4613 static int 4614 ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf, 4615 ath_bufhead *bf_q) 4616 { 4617 struct ieee80211_node *ni = bf->bf_node; 4618 struct ath_node *an = ATH_NODE(ni); 4619 int tid = bf->bf_state.bfs_tid; 4620 struct ath_tid *atid = &an->an_tid[tid]; 4621 4622 ATH_TX_LOCK_ASSERT(sc); 4623 4624 /* XXX clr11naggr should be done for all subframes */ 4625 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 4626 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0); 4627 4628 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */ 4629 4630 /* 4631 * If the buffer is marked as busy, we can't directly 4632 * reuse it. Instead, try to clone the buffer. 4633 * If the clone is successful, recycle the old buffer. 4634 * If the clone is unsuccessful, set bfs_retries to max 4635 * to force the next bit of code to free the buffer 4636 * for us. 4637 */ 4638 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4639 (bf->bf_flags & ATH_BUF_BUSY)) { 4640 struct ath_buf *nbf; 4641 nbf = ath_tx_retry_clone(sc, an, atid, bf); 4642 if (nbf) 4643 /* bf has been freed at this point */ 4644 bf = nbf; 4645 else 4646 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4647 } 4648 4649 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4650 sc->sc_stats.ast_tx_swretrymax++; 4651 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4652 "%s: max retries: seqno %d\n", 4653 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4654 ath_tx_update_baw(sc, an, atid, bf); 4655 if (!bf->bf_state.bfs_addedbaw) 4656 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4657 "%s: wasn't added: seqno %d\n", 4658 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4659 bf->bf_state.bfs_dobaw = 0; 4660 return 1; 4661 } 4662 4663 ath_tx_set_retry(sc, bf); 4664 sc->sc_stats.ast_tx_swretries++; 4665 bf->bf_next = NULL; /* Just to make sure */ 4666 4667 /* Clear the aggregate state */ 4668 bf->bf_state.bfs_aggr = 0; 4669 bf->bf_state.bfs_ndelim = 0; /* ??? needed? */ 4670 bf->bf_state.bfs_nframes = 1; 4671 4672 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 4673 return 0; 4674 } 4675 4676 /* 4677 * error pkt completion for an aggregate destination 4678 */ 4679 static void 4680 ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first, 4681 struct ath_tid *tid) 4682 { 4683 struct ieee80211_node *ni = bf_first->bf_node; 4684 struct ath_node *an = ATH_NODE(ni); 4685 struct ath_buf *bf_next, *bf; 4686 ath_bufhead bf_q; 4687 int drops = 0; 4688 struct ieee80211_tx_ampdu *tap; 4689 ath_bufhead bf_cq; 4690 4691 TAILQ_INIT(&bf_q); 4692 TAILQ_INIT(&bf_cq); 4693 4694 /* 4695 * Update rate control - all frames have failed. 4696 */ 4697 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc, 4698 &bf_first->bf_status.ds_txstat, 4699 bf_first->bf_state.bfs_al, 4700 bf_first->bf_state.bfs_rc_maxpktlen, 4701 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes); 4702 4703 ATH_TX_LOCK(sc); 4704 tap = ath_tx_get_tx_tid(an, tid->tid); 4705 sc->sc_stats.ast_tx_aggr_failall++; 4706 4707 /* Retry all subframes */ 4708 bf = bf_first; 4709 while (bf) { 4710 bf_next = bf->bf_next; 4711 bf->bf_next = NULL; /* Remove it from the aggr list */ 4712 sc->sc_stats.ast_tx_aggr_fail++; 4713 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4714 drops++; 4715 bf->bf_next = NULL; 4716 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4717 } 4718 bf = bf_next; 4719 } 4720 4721 /* Prepend all frames to the beginning of the queue */ 4722 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 4723 TAILQ_REMOVE(&bf_q, bf, bf_list); 4724 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 4725 } 4726 4727 /* 4728 * Schedule the TID to be re-tried. 4729 */ 4730 ath_tx_tid_sched(sc, tid); 4731 4732 /* 4733 * send bar if we dropped any frames 4734 * 4735 * Keep the txq lock held for now, as we need to ensure 4736 * that ni_txseqs[] is consistent (as it's being updated 4737 * in the ifnet TX context or raw TX context.) 4738 */ 4739 if (drops) { 4740 /* Suspend the TX queue and get ready to send the BAR */ 4741 ath_tx_tid_bar_suspend(sc, tid); 4742 } 4743 4744 /* 4745 * Send BAR if required 4746 */ 4747 if (ath_tx_tid_bar_tx_ready(sc, tid)) 4748 ath_tx_tid_bar_tx(sc, tid); 4749 4750 ATH_TX_UNLOCK(sc); 4751 4752 /* Complete frames which errored out */ 4753 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4754 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4755 ath_tx_default_comp(sc, bf, 0); 4756 } 4757 } 4758 4759 /* 4760 * Handle clean-up of packets from an aggregate list. 4761 * 4762 * There's no need to update the BAW here - the session is being 4763 * torn down. 4764 */ 4765 static void 4766 ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first) 4767 { 4768 struct ath_buf *bf, *bf_next; 4769 struct ieee80211_node *ni = bf_first->bf_node; 4770 struct ath_node *an = ATH_NODE(ni); 4771 int tid = bf_first->bf_state.bfs_tid; 4772 struct ath_tid *atid = &an->an_tid[tid]; 4773 4774 ATH_TX_LOCK(sc); 4775 4776 /* update incomp */ 4777 atid->incomp--; 4778 4779 /* Update the BAW */ 4780 bf = bf_first; 4781 while (bf) { 4782 /* XXX refactor! */ 4783 if (bf->bf_state.bfs_dobaw) { 4784 ath_tx_update_baw(sc, an, atid, bf); 4785 if (!bf->bf_state.bfs_addedbaw) 4786 DPRINTF(sc, ATH_DEBUG_SW_TX, 4787 "%s: wasn't added: seqno %d\n", 4788 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4789 } 4790 bf = bf->bf_next; 4791 } 4792 4793 if (atid->incomp == 0) { 4794 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4795 "%s: TID %d: cleaned up! resume!\n", 4796 __func__, tid); 4797 atid->cleanup_inprogress = 0; 4798 ath_tx_tid_resume(sc, atid); 4799 } 4800 4801 /* Send BAR if required */ 4802 /* XXX why would we send a BAR when transitioning to non-aggregation? */ 4803 /* 4804 * XXX TODO: we should likely just tear down the BAR state here, 4805 * rather than sending a BAR. 4806 */ 4807 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4808 ath_tx_tid_bar_tx(sc, atid); 4809 4810 ATH_TX_UNLOCK(sc); 4811 4812 /* Handle frame completion as individual frames */ 4813 bf = bf_first; 4814 while (bf) { 4815 bf_next = bf->bf_next; 4816 bf->bf_next = NULL; 4817 ath_tx_default_comp(sc, bf, 1); 4818 bf = bf_next; 4819 } 4820 } 4821 4822 /* 4823 * Handle completion of an set of aggregate frames. 4824 * 4825 * Note: the completion handler is the last descriptor in the aggregate, 4826 * not the last descriptor in the first frame. 4827 */ 4828 static void 4829 ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first, 4830 int fail) 4831 { 4832 //struct ath_desc *ds = bf->bf_lastds; 4833 struct ieee80211_node *ni = bf_first->bf_node; 4834 struct ath_node *an = ATH_NODE(ni); 4835 int tid = bf_first->bf_state.bfs_tid; 4836 struct ath_tid *atid = &an->an_tid[tid]; 4837 struct ath_tx_status ts; 4838 struct ieee80211_tx_ampdu *tap; 4839 ath_bufhead bf_q; 4840 ath_bufhead bf_cq; 4841 int seq_st, tx_ok; 4842 int hasba, isaggr; 4843 uint32_t ba[2]; 4844 struct ath_buf *bf, *bf_next; 4845 int ba_index; 4846 int drops = 0; 4847 int nframes = 0, nbad = 0, nf; 4848 int pktlen; 4849 int agglen, rc_agglen; 4850 /* XXX there's too much on the stack? */ 4851 struct ath_rc_series rc[ATH_RC_NUM]; 4852 int txseq; 4853 4854 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n", 4855 __func__, atid->hwq_depth); 4856 4857 /* 4858 * Take a copy; this may be needed -after- bf_first 4859 * has been completed and freed. 4860 */ 4861 ts = bf_first->bf_status.ds_txstat; 4862 agglen = bf_first->bf_state.bfs_al; 4863 rc_agglen = bf_first->bf_state.bfs_rc_maxpktlen; 4864 4865 TAILQ_INIT(&bf_q); 4866 TAILQ_INIT(&bf_cq); 4867 4868 /* The TID state is kept behind the TXQ lock */ 4869 ATH_TX_LOCK(sc); 4870 4871 atid->hwq_depth--; 4872 if (atid->hwq_depth < 0) 4873 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n", 4874 __func__, atid->hwq_depth); 4875 4876 /* 4877 * If the TID is filtered, handle completing the filter 4878 * transition before potentially kicking it to the cleanup 4879 * function. 4880 * 4881 * XXX this is duplicate work, ew. 4882 */ 4883 if (atid->isfiltered) 4884 ath_tx_tid_filt_comp_complete(sc, atid); 4885 4886 /* 4887 * Punt cleanup to the relevant function, not our problem now 4888 */ 4889 if (atid->cleanup_inprogress) { 4890 if (atid->isfiltered) 4891 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4892 "%s: isfiltered=1, normal_comp?\n", 4893 __func__); 4894 ATH_TX_UNLOCK(sc); 4895 ath_tx_comp_cleanup_aggr(sc, bf_first); 4896 return; 4897 } 4898 4899 /* 4900 * If the frame is filtered, transition to filtered frame 4901 * mode and add this to the filtered frame list. 4902 * 4903 * XXX TODO: figure out how this interoperates with 4904 * BAR, pause and cleanup states. 4905 */ 4906 if ((ts.ts_status & HAL_TXERR_FILT) || 4907 (ts.ts_status != 0 && atid->isfiltered)) { 4908 if (fail != 0) 4909 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4910 "%s: isfiltered=1, fail=%d\n", __func__, fail); 4911 ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq); 4912 4913 /* Remove from BAW */ 4914 TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) { 4915 if (bf->bf_state.bfs_addedbaw) 4916 drops++; 4917 if (bf->bf_state.bfs_dobaw) { 4918 ath_tx_update_baw(sc, an, atid, bf); 4919 if (!bf->bf_state.bfs_addedbaw) 4920 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4921 "%s: wasn't added: seqno %d\n", 4922 __func__, 4923 SEQNO(bf->bf_state.bfs_seqno)); 4924 } 4925 bf->bf_state.bfs_dobaw = 0; 4926 } 4927 /* 4928 * If any intermediate frames in the BAW were dropped when 4929 * handling filtering things, send a BAR. 4930 */ 4931 if (drops) 4932 ath_tx_tid_bar_suspend(sc, atid); 4933 4934 /* 4935 * Finish up by sending a BAR if required and freeing 4936 * the frames outside of the TX lock. 4937 */ 4938 goto finish_send_bar; 4939 } 4940 4941 /* 4942 * XXX for now, use the first frame in the aggregate for 4943 * XXX rate control completion; it's at least consistent. 4944 */ 4945 pktlen = bf_first->bf_state.bfs_pktlen; 4946 4947 /* 4948 * Handle errors first! 4949 * 4950 * Here, handle _any_ error as a "exceeded retries" error. 4951 * Later on (when filtered frames are to be specially handled) 4952 * it'll have to be expanded. 4953 */ 4954 #if 0 4955 if (ts.ts_status & HAL_TXERR_XRETRY) { 4956 #endif 4957 if (ts.ts_status != 0) { 4958 ATH_TX_UNLOCK(sc); 4959 ath_tx_comp_aggr_error(sc, bf_first, atid); 4960 return; 4961 } 4962 4963 tap = ath_tx_get_tx_tid(an, tid); 4964 4965 /* 4966 * extract starting sequence and block-ack bitmap 4967 */ 4968 /* XXX endian-ness of seq_st, ba? */ 4969 seq_st = ts.ts_seqnum; 4970 hasba = !! (ts.ts_flags & HAL_TX_BA); 4971 tx_ok = (ts.ts_status == 0); 4972 isaggr = bf_first->bf_state.bfs_aggr; 4973 ba[0] = ts.ts_ba_low; 4974 ba[1] = ts.ts_ba_high; 4975 4976 /* 4977 * Copy the TX completion status and the rate control 4978 * series from the first descriptor, as it may be freed 4979 * before the rate control code can get its grubby fingers 4980 * into things. 4981 */ 4982 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc)); 4983 4984 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4985 "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, " 4986 "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n", 4987 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags, 4988 isaggr, seq_st, hasba, ba[0], ba[1]); 4989 4990 /* 4991 * The reference driver doesn't do this; it simply ignores 4992 * this check in its entirety. 4993 * 4994 * I've seen this occur when using iperf to send traffic 4995 * out tid 1 - the aggregate frames are all marked as TID 1, 4996 * but the TXSTATUS has TID=0. So, let's just ignore this 4997 * check. 4998 */ 4999 #if 0 5000 /* Occasionally, the MAC sends a tx status for the wrong TID. */ 5001 if (tid != ts.ts_tid) { 5002 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n", 5003 __func__, tid, ts.ts_tid); 5004 tx_ok = 0; 5005 } 5006 #endif 5007 5008 /* AR5416 BA bug; this requires an interface reset */ 5009 if (isaggr && tx_ok && (! hasba)) { 5010 device_printf(sc->sc_dev, 5011 "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, " 5012 "seq_st=%d\n", 5013 __func__, hasba, tx_ok, isaggr, seq_st); 5014 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask); 5015 /* And as we can't really trust the BA here .. */ 5016 ba[0] = 0; 5017 ba[1] = 0; 5018 seq_st = 0; 5019 #ifdef ATH_DEBUG 5020 ath_printtxbuf(sc, bf_first, 5021 sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0); 5022 #endif 5023 } 5024 5025 /* 5026 * Walk the list of frames, figure out which ones were correctly 5027 * sent and which weren't. 5028 */ 5029 bf = bf_first; 5030 nf = bf_first->bf_state.bfs_nframes; 5031 5032 /* bf_first is going to be invalid once this list is walked */ 5033 bf_first = NULL; 5034 5035 /* 5036 * Walk the list of completed frames and determine 5037 * which need to be completed and which need to be 5038 * retransmitted. 5039 * 5040 * For completed frames, the completion functions need 5041 * to be called at the end of this function as the last 5042 * node reference may free the node. 5043 * 5044 * Finally, since the TXQ lock can't be held during the 5045 * completion callback (to avoid lock recursion), 5046 * the completion calls have to be done outside of the 5047 * lock. 5048 */ 5049 while (bf) { 5050 nframes++; 5051 ba_index = ATH_BA_INDEX(seq_st, 5052 SEQNO(bf->bf_state.bfs_seqno)); 5053 bf_next = bf->bf_next; 5054 bf->bf_next = NULL; /* Remove it from the aggr list */ 5055 5056 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5057 "%s: checking bf=%p seqno=%d; ack=%d\n", 5058 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 5059 ATH_BA_ISSET(ba, ba_index)); 5060 5061 if (tx_ok && ATH_BA_ISSET(ba, ba_index)) { 5062 sc->sc_stats.ast_tx_aggr_ok++; 5063 ath_tx_update_baw(sc, an, atid, bf); 5064 bf->bf_state.bfs_dobaw = 0; 5065 if (!bf->bf_state.bfs_addedbaw) 5066 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5067 "%s: wasn't added: seqno %d\n", 5068 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5069 bf->bf_next = NULL; 5070 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 5071 } else { 5072 sc->sc_stats.ast_tx_aggr_fail++; 5073 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 5074 drops++; 5075 bf->bf_next = NULL; 5076 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 5077 } 5078 nbad++; 5079 } 5080 bf = bf_next; 5081 } 5082 5083 /* 5084 * Now that the BAW updates have been done, unlock 5085 * 5086 * txseq is grabbed before the lock is released so we 5087 * have a consistent view of what -was- in the BAW. 5088 * Anything after this point will not yet have been 5089 * TXed. 5090 */ 5091 txseq = tap->txa_start; 5092 ATH_TX_UNLOCK(sc); 5093 5094 if (nframes != nf) 5095 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5096 "%s: num frames seen=%d; bf nframes=%d\n", 5097 __func__, nframes, nf); 5098 5099 /* 5100 * Now we know how many frames were bad, call the rate 5101 * control code. 5102 */ 5103 if (fail == 0) { 5104 ath_tx_update_ratectrl(sc, ni, rc, &ts, agglen, rc_agglen, 5105 nframes, nbad); 5106 } 5107 5108 /* 5109 * send bar if we dropped any frames 5110 */ 5111 if (drops) { 5112 /* Suspend the TX queue and get ready to send the BAR */ 5113 ATH_TX_LOCK(sc); 5114 ath_tx_tid_bar_suspend(sc, atid); 5115 ATH_TX_UNLOCK(sc); 5116 } 5117 5118 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5119 "%s: txa_start now %d\n", __func__, tap->txa_start); 5120 5121 ATH_TX_LOCK(sc); 5122 5123 /* Prepend all frames to the beginning of the queue */ 5124 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 5125 TAILQ_REMOVE(&bf_q, bf, bf_list); 5126 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 5127 } 5128 5129 /* 5130 * Reschedule to grab some further frames. 5131 */ 5132 ath_tx_tid_sched(sc, atid); 5133 5134 /* 5135 * If the queue is filtered, re-schedule as required. 5136 * 5137 * This is required as there may be a subsequent TX descriptor 5138 * for this end-node that has CLRDMASK set, so it's quite possible 5139 * that a filtered frame will be followed by a non-filtered 5140 * (complete or otherwise) frame. 5141 * 5142 * XXX should we do this before we complete the frame? 5143 */ 5144 if (atid->isfiltered) 5145 ath_tx_tid_filt_comp_complete(sc, atid); 5146 5147 finish_send_bar: 5148 5149 /* 5150 * Send BAR if required 5151 */ 5152 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5153 ath_tx_tid_bar_tx(sc, atid); 5154 5155 ATH_TX_UNLOCK(sc); 5156 5157 /* Do deferred completion */ 5158 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 5159 TAILQ_REMOVE(&bf_cq, bf, bf_list); 5160 ath_tx_default_comp(sc, bf, 0); 5161 } 5162 } 5163 5164 /* 5165 * Handle completion of unaggregated frames in an ADDBA 5166 * session. 5167 * 5168 * Fail is set to 1 if the entry is being freed via a call to 5169 * ath_tx_draintxq(). 5170 */ 5171 static void 5172 ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail) 5173 { 5174 struct ieee80211_node *ni = bf->bf_node; 5175 struct ath_node *an = ATH_NODE(ni); 5176 int tid = bf->bf_state.bfs_tid; 5177 struct ath_tid *atid = &an->an_tid[tid]; 5178 struct ath_tx_status ts; 5179 int drops = 0; 5180 5181 /* 5182 * Take a copy of this; filtering/cloning the frame may free the 5183 * bf pointer. 5184 */ 5185 ts = bf->bf_status.ds_txstat; 5186 5187 /* 5188 * Update rate control status here, before we possibly 5189 * punt to retry or cleanup. 5190 * 5191 * Do it outside of the TXQ lock. 5192 */ 5193 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 5194 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 5195 &bf->bf_status.ds_txstat, 5196 bf->bf_state.bfs_pktlen, 5197 bf->bf_state.bfs_pktlen, 5198 1, (ts.ts_status == 0) ? 0 : 1); 5199 5200 /* 5201 * This is called early so atid->hwq_depth can be tracked. 5202 * This unfortunately means that it's released and regrabbed 5203 * during retry and cleanup. That's rather inefficient. 5204 */ 5205 ATH_TX_LOCK(sc); 5206 5207 if (tid == IEEE80211_NONQOS_TID) 5208 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__); 5209 5210 DPRINTF(sc, ATH_DEBUG_SW_TX, 5211 "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n", 5212 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth, 5213 SEQNO(bf->bf_state.bfs_seqno)); 5214 5215 atid->hwq_depth--; 5216 if (atid->hwq_depth < 0) 5217 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", 5218 __func__, atid->hwq_depth); 5219 5220 /* 5221 * If the TID is filtered, handle completing the filter 5222 * transition before potentially kicking it to the cleanup 5223 * function. 5224 */ 5225 if (atid->isfiltered) 5226 ath_tx_tid_filt_comp_complete(sc, atid); 5227 5228 /* 5229 * If a cleanup is in progress, punt to comp_cleanup; 5230 * rather than handling it here. It's thus their 5231 * responsibility to clean up, call the completion 5232 * function in net80211, etc. 5233 */ 5234 if (atid->cleanup_inprogress) { 5235 if (atid->isfiltered) 5236 DPRINTF(sc, ATH_DEBUG_SW_TX, 5237 "%s: isfiltered=1, normal_comp?\n", 5238 __func__); 5239 ATH_TX_UNLOCK(sc); 5240 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n", 5241 __func__); 5242 ath_tx_comp_cleanup_unaggr(sc, bf); 5243 return; 5244 } 5245 5246 /* 5247 * XXX TODO: how does cleanup, BAR and filtered frame handling 5248 * overlap? 5249 * 5250 * If the frame is filtered OR if it's any failure but 5251 * the TID is filtered, the frame must be added to the 5252 * filtered frame list. 5253 * 5254 * However - a busy buffer can't be added to the filtered 5255 * list as it will end up being recycled without having 5256 * been made available for the hardware. 5257 */ 5258 if ((ts.ts_status & HAL_TXERR_FILT) || 5259 (ts.ts_status != 0 && atid->isfiltered)) { 5260 int freeframe; 5261 5262 if (fail != 0) 5263 DPRINTF(sc, ATH_DEBUG_SW_TX, 5264 "%s: isfiltered=1, fail=%d\n", 5265 __func__, fail); 5266 freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf); 5267 /* 5268 * If freeframe=0 then bf is no longer ours; don't 5269 * touch it. 5270 */ 5271 if (freeframe) { 5272 /* Remove from BAW */ 5273 if (bf->bf_state.bfs_addedbaw) 5274 drops++; 5275 if (bf->bf_state.bfs_dobaw) { 5276 ath_tx_update_baw(sc, an, atid, bf); 5277 if (!bf->bf_state.bfs_addedbaw) 5278 DPRINTF(sc, ATH_DEBUG_SW_TX, 5279 "%s: wasn't added: seqno %d\n", 5280 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5281 } 5282 bf->bf_state.bfs_dobaw = 0; 5283 } 5284 5285 /* 5286 * If the frame couldn't be filtered, treat it as a drop and 5287 * prepare to send a BAR. 5288 */ 5289 if (freeframe && drops) 5290 ath_tx_tid_bar_suspend(sc, atid); 5291 5292 /* 5293 * Send BAR if required 5294 */ 5295 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5296 ath_tx_tid_bar_tx(sc, atid); 5297 5298 ATH_TX_UNLOCK(sc); 5299 /* 5300 * If freeframe is set, then the frame couldn't be 5301 * cloned and bf is still valid. Just complete/free it. 5302 */ 5303 if (freeframe) 5304 ath_tx_default_comp(sc, bf, fail); 5305 5306 return; 5307 } 5308 /* 5309 * Don't bother with the retry check if all frames 5310 * are being failed (eg during queue deletion.) 5311 */ 5312 #if 0 5313 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) { 5314 #endif 5315 if (fail == 0 && ts.ts_status != 0) { 5316 ATH_TX_UNLOCK(sc); 5317 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n", 5318 __func__); 5319 ath_tx_aggr_retry_unaggr(sc, bf); 5320 return; 5321 } 5322 5323 /* Success? Complete */ 5324 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n", 5325 __func__, tid, SEQNO(bf->bf_state.bfs_seqno)); 5326 if (bf->bf_state.bfs_dobaw) { 5327 ath_tx_update_baw(sc, an, atid, bf); 5328 bf->bf_state.bfs_dobaw = 0; 5329 if (!bf->bf_state.bfs_addedbaw) 5330 DPRINTF(sc, ATH_DEBUG_SW_TX, 5331 "%s: wasn't added: seqno %d\n", 5332 __func__, SEQNO(bf->bf_state.bfs_seqno)); 5333 } 5334 5335 /* 5336 * If the queue is filtered, re-schedule as required. 5337 * 5338 * This is required as there may be a subsequent TX descriptor 5339 * for this end-node that has CLRDMASK set, so it's quite possible 5340 * that a filtered frame will be followed by a non-filtered 5341 * (complete or otherwise) frame. 5342 * 5343 * XXX should we do this before we complete the frame? 5344 */ 5345 if (atid->isfiltered) 5346 ath_tx_tid_filt_comp_complete(sc, atid); 5347 5348 /* 5349 * Send BAR if required 5350 */ 5351 if (ath_tx_tid_bar_tx_ready(sc, atid)) 5352 ath_tx_tid_bar_tx(sc, atid); 5353 5354 ATH_TX_UNLOCK(sc); 5355 5356 ath_tx_default_comp(sc, bf, fail); 5357 /* bf is freed at this point */ 5358 } 5359 5360 void 5361 ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 5362 { 5363 if (bf->bf_state.bfs_aggr) 5364 ath_tx_aggr_comp_aggr(sc, bf, fail); 5365 else 5366 ath_tx_aggr_comp_unaggr(sc, bf, fail); 5367 } 5368 5369 /* 5370 * Grab the software queue depth that we COULD transmit. 5371 * 5372 * This includes checks if it's in the BAW, whether it's a frame 5373 * that is supposed to be in the BAW. Other checks could be done; 5374 * but for now let's try and avoid doing the whole of ath_tx_form_aggr() 5375 * here. 5376 */ 5377 static int 5378 ath_tx_tid_swq_depth_bytes(struct ath_softc *sc, struct ath_node *an, 5379 struct ath_tid *tid) 5380 { 5381 struct ath_buf *bf; 5382 struct ieee80211_tx_ampdu *tap; 5383 int nbytes = 0; 5384 5385 ATH_TX_LOCK_ASSERT(sc); 5386 5387 tap = ath_tx_get_tx_tid(an, tid->tid); 5388 5389 /* 5390 * Iterate over each buffer and sum the pkt_len. 5391 * Bail if we exceed ATH_AGGR_MAXSIZE bytes; we won't 5392 * ever queue more than that in a single frame. 5393 */ 5394 TAILQ_FOREACH(bf, &tid->tid_q, bf_list) { 5395 5396 /* 5397 * TODO: I'm not sure if we're going to hit cases where 5398 * no frames get sent because the list is empty. 5399 */ 5400 5401 /* Check if it's in the BAW */ 5402 if (tap != NULL && (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 5403 SEQNO(bf->bf_state.bfs_seqno)))) { 5404 break; 5405 } 5406 5407 /* Check if it's even supposed to be in the BAW */ 5408 if (! bf->bf_state.bfs_dobaw) { 5409 break; 5410 } 5411 5412 nbytes += bf->bf_state.bfs_pktlen; 5413 if (nbytes >= ATH_AGGR_MAXSIZE) 5414 break; 5415 5416 /* 5417 * Check if we're likely going to leak a frame 5418 * as part of a PSPOLL. Break out at this point; 5419 * we're only going to send a single frame anyway. 5420 */ 5421 if (an->an_leak_count) { 5422 break; 5423 } 5424 } 5425 5426 return MIN(nbytes, ATH_AGGR_MAXSIZE); 5427 } 5428 5429 /* 5430 * Schedule some packets from the given node/TID to the hardware. 5431 * 5432 * This is the aggregate version. 5433 */ 5434 void 5435 ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an, 5436 struct ath_tid *tid) 5437 { 5438 struct ath_buf *bf; 5439 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 5440 struct ieee80211_tx_ampdu *tap; 5441 ATH_AGGR_STATUS status; 5442 ath_bufhead bf_q; 5443 int swq_pktbytes; 5444 5445 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid); 5446 ATH_TX_LOCK_ASSERT(sc); 5447 5448 /* 5449 * XXX TODO: If we're called for a queue that we're leaking frames to, 5450 * ensure we only leak one. 5451 */ 5452 5453 tap = ath_tx_get_tx_tid(an, tid->tid); 5454 5455 if (tid->tid == IEEE80211_NONQOS_TID) 5456 DPRINTF(sc, ATH_DEBUG_SW_TX, 5457 "%s: called for TID=NONQOS_TID?\n", __func__); 5458 5459 for (;;) { 5460 status = ATH_AGGR_DONE; 5461 5462 /* 5463 * If the upper layer has paused the TID, don't 5464 * queue any further packets. 5465 * 5466 * This can also occur from the completion task because 5467 * of packet loss; but as its serialised with this code, 5468 * it won't "appear" half way through queuing packets. 5469 */ 5470 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 5471 break; 5472 5473 bf = ATH_TID_FIRST(tid); 5474 if (bf == NULL) { 5475 break; 5476 } 5477 5478 /* 5479 * If the packet doesn't fall within the BAW (eg a NULL 5480 * data frame), schedule it directly; continue. 5481 */ 5482 if (! bf->bf_state.bfs_dobaw) { 5483 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5484 "%s: non-baw packet\n", 5485 __func__); 5486 ATH_TID_REMOVE(tid, bf, bf_list); 5487 5488 if (bf->bf_state.bfs_nframes > 1) 5489 DPRINTF(sc, ATH_DEBUG_SW_TX, 5490 "%s: aggr=%d, nframes=%d\n", 5491 __func__, 5492 bf->bf_state.bfs_aggr, 5493 bf->bf_state.bfs_nframes); 5494 5495 /* 5496 * This shouldn't happen - such frames shouldn't 5497 * ever have been queued as an aggregate in the 5498 * first place. However, make sure the fields 5499 * are correctly setup just to be totally sure. 5500 */ 5501 bf->bf_state.bfs_aggr = 0; 5502 bf->bf_state.bfs_nframes = 1; 5503 5504 /* Update CLRDMASK just before this frame is queued */ 5505 ath_tx_update_clrdmask(sc, tid, bf); 5506 5507 ath_tx_do_ratelookup(sc, bf, tid->tid, 5508 bf->bf_state.bfs_pktlen, false); 5509 ath_tx_calc_duration(sc, bf); 5510 ath_tx_calc_protection(sc, bf); 5511 ath_tx_set_rtscts(sc, bf); 5512 ath_tx_rate_fill_rcflags(sc, bf); 5513 ath_tx_setds(sc, bf); 5514 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 5515 5516 sc->sc_aggr_stats.aggr_nonbaw_pkt++; 5517 5518 /* Queue the packet; continue */ 5519 goto queuepkt; 5520 } 5521 5522 TAILQ_INIT(&bf_q); 5523 5524 /* 5525 * Loop over the swq to find out how long 5526 * each packet is (up until 64k) and provide that 5527 * to the rate control lookup. 5528 */ 5529 swq_pktbytes = ath_tx_tid_swq_depth_bytes(sc, an, tid); 5530 ath_tx_do_ratelookup(sc, bf, tid->tid, swq_pktbytes, true); 5531 5532 /* 5533 * Note this only is used for the fragment paths and 5534 * should really be rethought out if we want to do 5535 * things like an RTS burst across >1 aggregate. 5536 */ 5537 ath_tx_calc_duration(sc, bf); 5538 ath_tx_calc_protection(sc, bf); 5539 5540 ath_tx_set_rtscts(sc, bf); 5541 ath_tx_rate_fill_rcflags(sc, bf); 5542 5543 status = ath_tx_form_aggr(sc, an, tid, &bf_q); 5544 5545 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5546 "%s: ath_tx_form_aggr() status=%d\n", __func__, status); 5547 5548 /* 5549 * No frames to be picked up - out of BAW 5550 */ 5551 if (TAILQ_EMPTY(&bf_q)) 5552 break; 5553 5554 /* 5555 * This assumes that the descriptor list in the ath_bufhead 5556 * are already linked together via bf_next pointers. 5557 */ 5558 bf = TAILQ_FIRST(&bf_q); 5559 5560 if (status == ATH_AGGR_8K_LIMITED) 5561 sc->sc_aggr_stats.aggr_rts_aggr_limited++; 5562 5563 /* 5564 * If it's the only frame send as non-aggregate 5565 * assume that ath_tx_form_aggr() has checked 5566 * whether it's in the BAW and added it appropriately. 5567 */ 5568 if (bf->bf_state.bfs_nframes == 1) { 5569 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5570 "%s: single-frame aggregate\n", __func__); 5571 5572 /* Update CLRDMASK just before this frame is queued */ 5573 ath_tx_update_clrdmask(sc, tid, bf); 5574 5575 bf->bf_state.bfs_aggr = 0; 5576 bf->bf_state.bfs_ndelim = 0; 5577 ath_tx_setds(sc, bf); 5578 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 5579 if (status == ATH_AGGR_BAW_CLOSED) 5580 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++; 5581 else 5582 sc->sc_aggr_stats.aggr_single_pkt++; 5583 } else { 5584 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5585 "%s: multi-frame aggregate: %d frames, " 5586 "length %d\n", 5587 __func__, bf->bf_state.bfs_nframes, 5588 bf->bf_state.bfs_al); 5589 bf->bf_state.bfs_aggr = 1; 5590 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++; 5591 sc->sc_aggr_stats.aggr_aggr_pkt++; 5592 5593 /* Update CLRDMASK just before this frame is queued */ 5594 ath_tx_update_clrdmask(sc, tid, bf); 5595 5596 /* 5597 * Calculate the duration/protection as required. 5598 */ 5599 ath_tx_calc_duration(sc, bf); 5600 ath_tx_calc_protection(sc, bf); 5601 5602 /* 5603 * Update the rate and rtscts information based on the 5604 * rate decision made by the rate control code; 5605 * the first frame in the aggregate needs it. 5606 */ 5607 ath_tx_set_rtscts(sc, bf); 5608 5609 /* 5610 * Setup the relevant descriptor fields 5611 * for aggregation. The first descriptor 5612 * already points to the rest in the chain. 5613 */ 5614 ath_tx_setds_11n(sc, bf); 5615 5616 } 5617 queuepkt: 5618 /* Set completion handler, multi-frame aggregate or not */ 5619 bf->bf_comp = ath_tx_aggr_comp; 5620 5621 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID) 5622 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__); 5623 5624 /* 5625 * Update leak count and frame config if were leaking frames. 5626 * 5627 * XXX TODO: it should update all frames in an aggregate 5628 * correctly! 5629 */ 5630 ath_tx_leak_count_update(sc, tid, bf); 5631 5632 /* Punt to txq */ 5633 ath_tx_handoff(sc, txq, bf); 5634 5635 /* Track outstanding buffer count to hardware */ 5636 /* aggregates are "one" buffer */ 5637 tid->hwq_depth++; 5638 5639 /* 5640 * Break out if ath_tx_form_aggr() indicated 5641 * there can't be any further progress (eg BAW is full.) 5642 * Checking for an empty txq is done above. 5643 * 5644 * XXX locking on txq here? 5645 */ 5646 /* XXX TXQ locking */ 5647 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr || 5648 (status == ATH_AGGR_BAW_CLOSED || 5649 status == ATH_AGGR_LEAK_CLOSED)) 5650 break; 5651 } 5652 } 5653 5654 /* 5655 * Schedule some packets from the given node/TID to the hardware. 5656 * 5657 * XXX TODO: this routine doesn't enforce the maximum TXQ depth. 5658 * It just dumps frames into the TXQ. We should limit how deep 5659 * the transmit queue can grow for frames dispatched to the given 5660 * TXQ. 5661 * 5662 * To avoid locking issues, either we need to own the TXQ lock 5663 * at this point, or we need to pass in the maximum frame count 5664 * from the caller. 5665 */ 5666 void 5667 ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an, 5668 struct ath_tid *tid) 5669 { 5670 struct ath_buf *bf; 5671 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 5672 5673 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n", 5674 __func__, an, tid->tid); 5675 5676 ATH_TX_LOCK_ASSERT(sc); 5677 5678 /* Check - is AMPDU pending or running? then print out something */ 5679 if (ath_tx_ampdu_pending(sc, an, tid->tid)) 5680 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n", 5681 __func__, tid->tid); 5682 if (ath_tx_ampdu_running(sc, an, tid->tid)) 5683 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n", 5684 __func__, tid->tid); 5685 5686 for (;;) { 5687 5688 /* 5689 * If the upper layers have paused the TID, don't 5690 * queue any further packets. 5691 * 5692 * XXX if we are leaking frames, make sure we decrement 5693 * that counter _and_ we continue here. 5694 */ 5695 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 5696 break; 5697 5698 bf = ATH_TID_FIRST(tid); 5699 if (bf == NULL) { 5700 break; 5701 } 5702 5703 ATH_TID_REMOVE(tid, bf, bf_list); 5704 5705 /* Sanity check! */ 5706 if (tid->tid != bf->bf_state.bfs_tid) { 5707 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !=" 5708 " tid %d\n", __func__, bf->bf_state.bfs_tid, 5709 tid->tid); 5710 } 5711 /* Normal completion handler */ 5712 bf->bf_comp = ath_tx_normal_comp; 5713 5714 /* 5715 * Override this for now, until the non-aggregate 5716 * completion handler correctly handles software retransmits. 5717 */ 5718 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 5719 5720 /* Update CLRDMASK just before this frame is queued */ 5721 ath_tx_update_clrdmask(sc, tid, bf); 5722 5723 /* Program descriptors + rate control */ 5724 ath_tx_do_ratelookup(sc, bf, tid->tid, 5725 bf->bf_state.bfs_pktlen, false); 5726 ath_tx_calc_duration(sc, bf); 5727 ath_tx_calc_protection(sc, bf); 5728 ath_tx_set_rtscts(sc, bf); 5729 ath_tx_rate_fill_rcflags(sc, bf); 5730 ath_tx_setds(sc, bf); 5731 5732 /* 5733 * Update the current leak count if 5734 * we're leaking frames; and set the 5735 * MORE flag as appropriate. 5736 */ 5737 ath_tx_leak_count_update(sc, tid, bf); 5738 5739 /* Track outstanding buffer count to hardware */ 5740 /* aggregates are "one" buffer */ 5741 tid->hwq_depth++; 5742 5743 /* Punt to hardware or software txq */ 5744 ath_tx_handoff(sc, txq, bf); 5745 } 5746 } 5747 5748 /* 5749 * Schedule some packets to the given hardware queue. 5750 * 5751 * This function walks the list of TIDs (ie, ath_node TIDs 5752 * with queued traffic) and attempts to schedule traffic 5753 * from them. 5754 * 5755 * TID scheduling is implemented as a FIFO, with TIDs being 5756 * added to the end of the queue after some frames have been 5757 * scheduled. 5758 */ 5759 void 5760 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq) 5761 { 5762 struct ath_tid *tid, *next, *last; 5763 5764 ATH_TX_LOCK_ASSERT(sc); 5765 5766 /* 5767 * For non-EDMA chips, aggr frames that have been built are 5768 * in axq_aggr_depth, whether they've been scheduled or not. 5769 * There's no FIFO, so txq->axq_depth is what's been scheduled 5770 * to the hardware. 5771 * 5772 * For EDMA chips, we do it in two stages. The existing code 5773 * builds a list of frames to go to the hardware and the EDMA 5774 * code turns it into a single entry to push into the FIFO. 5775 * That way we don't take up one packet per FIFO slot. 5776 * We do push one aggregate per FIFO slot though, just to keep 5777 * things simple. 5778 * 5779 * The FIFO depth is what's in the hardware; the txq->axq_depth 5780 * is what's been scheduled to the FIFO. 5781 * 5782 * fifo.axq_depth is the number of frames (or aggregates) pushed 5783 * into the EDMA FIFO. For multi-frame lists, this is the number 5784 * of frames pushed in. 5785 * axq_fifo_depth is the number of FIFO slots currently busy. 5786 */ 5787 5788 /* For EDMA and non-EDMA, check built/scheduled against aggr limit */ 5789 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr) { 5790 sc->sc_aggr_stats.aggr_sched_nopkt++; 5791 return; 5792 } 5793 5794 /* 5795 * For non-EDMA chips, axq_depth is the "what's scheduled to 5796 * the hardware list". For EDMA it's "What's built for the hardware" 5797 * and fifo.axq_depth is how many frames have been dispatched 5798 * already to the hardware. 5799 */ 5800 if (txq->axq_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_nonaggr) { 5801 sc->sc_aggr_stats.aggr_sched_nopkt++; 5802 return; 5803 } 5804 5805 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s); 5806 5807 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) { 5808 /* 5809 * Suspend paused queues here; they'll be resumed 5810 * once the addba completes or times out. 5811 */ 5812 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n", 5813 __func__, tid->tid, tid->paused); 5814 ath_tx_tid_unsched(sc, tid); 5815 /* 5816 * This node may be in power-save and we're leaking 5817 * a frame; be careful. 5818 */ 5819 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { 5820 goto loop_done; 5821 } 5822 if (ath_tx_ampdu_running(sc, tid->an, tid->tid)) 5823 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid); 5824 else 5825 ath_tx_tid_hw_queue_norm(sc, tid->an, tid); 5826 5827 /* Not empty? Re-schedule */ 5828 if (tid->axq_depth != 0) 5829 ath_tx_tid_sched(sc, tid); 5830 5831 /* 5832 * Give the software queue time to aggregate more 5833 * packets. If we aren't running aggregation then 5834 * we should still limit the hardware queue depth. 5835 */ 5836 /* XXX TXQ locking */ 5837 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { 5838 break; 5839 } 5840 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { 5841 break; 5842 } 5843 loop_done: 5844 /* 5845 * If this was the last entry on the original list, stop. 5846 * Otherwise nodes that have been rescheduled onto the end 5847 * of the TID FIFO list will just keep being rescheduled. 5848 * 5849 * XXX What should we do about nodes that were paused 5850 * but are pending a leaking frame in response to a ps-poll? 5851 * They'll be put at the front of the list; so they'll 5852 * prematurely trigger this condition! Ew. 5853 */ 5854 if (tid == last) 5855 break; 5856 } 5857 } 5858 5859 /* 5860 * TX addba handling 5861 */ 5862 5863 /* 5864 * Return net80211 TID struct pointer, or NULL for none 5865 */ 5866 struct ieee80211_tx_ampdu * 5867 ath_tx_get_tx_tid(struct ath_node *an, int tid) 5868 { 5869 struct ieee80211_node *ni = &an->an_node; 5870 struct ieee80211_tx_ampdu *tap; 5871 5872 if (tid == IEEE80211_NONQOS_TID) 5873 return NULL; 5874 5875 tap = &ni->ni_tx_ampdu[tid]; 5876 return tap; 5877 } 5878 5879 /* 5880 * Is AMPDU-TX running? 5881 */ 5882 static int 5883 ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid) 5884 { 5885 struct ieee80211_tx_ampdu *tap; 5886 5887 if (tid == IEEE80211_NONQOS_TID) 5888 return 0; 5889 5890 tap = ath_tx_get_tx_tid(an, tid); 5891 if (tap == NULL) 5892 return 0; /* Not valid; default to not running */ 5893 5894 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING); 5895 } 5896 5897 /* 5898 * Is AMPDU-TX negotiation pending? 5899 */ 5900 static int 5901 ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid) 5902 { 5903 struct ieee80211_tx_ampdu *tap; 5904 5905 if (tid == IEEE80211_NONQOS_TID) 5906 return 0; 5907 5908 tap = ath_tx_get_tx_tid(an, tid); 5909 if (tap == NULL) 5910 return 0; /* Not valid; default to not pending */ 5911 5912 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND); 5913 } 5914 5915 /* 5916 * Is AMPDU-TX pending for the given TID? 5917 */ 5918 5919 5920 /* 5921 * Method to handle sending an ADDBA request. 5922 * 5923 * We tap this so the relevant flags can be set to pause the TID 5924 * whilst waiting for the response. 5925 * 5926 * XXX there's no timeout handler we can override? 5927 */ 5928 int 5929 ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5930 int dialogtoken, int baparamset, int batimeout) 5931 { 5932 struct ath_softc *sc = ni->ni_ic->ic_softc; 5933 int tid = tap->txa_tid; 5934 struct ath_node *an = ATH_NODE(ni); 5935 struct ath_tid *atid = &an->an_tid[tid]; 5936 5937 /* 5938 * XXX danger Will Robinson! 5939 * 5940 * Although the taskqueue may be running and scheduling some more 5941 * packets, these should all be _before_ the addba sequence number. 5942 * However, net80211 will keep self-assigning sequence numbers 5943 * until addba has been negotiated. 5944 * 5945 * In the past, these packets would be "paused" (which still works 5946 * fine, as they're being scheduled to the driver in the same 5947 * serialised method which is calling the addba request routine) 5948 * and when the aggregation session begins, they'll be dequeued 5949 * as aggregate packets and added to the BAW. However, now there's 5950 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these 5951 * packets. Thus they never get included in the BAW tracking and 5952 * this can cause the initial burst of packets after the addba 5953 * negotiation to "hang", as they quickly fall outside the BAW. 5954 * 5955 * The "eventual" solution should be to tag these packets with 5956 * dobaw. Although net80211 has given us a sequence number, 5957 * it'll be "after" the left edge of the BAW and thus it'll 5958 * fall within it. 5959 */ 5960 ATH_TX_LOCK(sc); 5961 /* 5962 * This is a bit annoying. Until net80211 HT code inherits some 5963 * (any) locking, we may have this called in parallel BUT only 5964 * one response/timeout will be called. Grr. 5965 */ 5966 if (atid->addba_tx_pending == 0) { 5967 ath_tx_tid_pause(sc, atid); 5968 atid->addba_tx_pending = 1; 5969 } 5970 ATH_TX_UNLOCK(sc); 5971 5972 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5973 "%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n", 5974 __func__, 5975 ni->ni_macaddr, 5976 ":", 5977 dialogtoken, baparamset, batimeout); 5978 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5979 "%s: txa_start=%d, ni_txseqs=%d\n", 5980 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5981 5982 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 5983 batimeout); 5984 } 5985 5986 /* 5987 * Handle an ADDBA response. 5988 * 5989 * We unpause the queue so TX'ing can resume. 5990 * 5991 * Any packets TX'ed from this point should be "aggregate" (whether 5992 * aggregate or not) so the BAW is updated. 5993 * 5994 * Note! net80211 keeps self-assigning sequence numbers until 5995 * ampdu is negotiated. This means the initially-negotiated BAW left 5996 * edge won't match the ni->ni_txseq. 5997 * 5998 * So, being very dirty, the BAW left edge is "slid" here to match 5999 * ni->ni_txseq. 6000 * 6001 * What likely SHOULD happen is that all packets subsequent to the 6002 * addba request should be tagged as aggregate and queued as non-aggregate 6003 * frames; thus updating the BAW. For now though, I'll just slide the 6004 * window. 6005 */ 6006 int 6007 ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 6008 int status, int code, int batimeout) 6009 { 6010 struct ath_softc *sc = ni->ni_ic->ic_softc; 6011 int tid = tap->txa_tid; 6012 struct ath_node *an = ATH_NODE(ni); 6013 struct ath_tid *atid = &an->an_tid[tid]; 6014 int r; 6015 6016 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 6017 "%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__, 6018 ni->ni_macaddr, 6019 ":", 6020 status, code, batimeout); 6021 6022 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 6023 "%s: txa_start=%d, ni_txseqs=%d\n", 6024 __func__, tap->txa_start, ni->ni_txseqs[tid]); 6025 6026 /* 6027 * Call this first, so the interface flags get updated 6028 * before the TID is unpaused. Otherwise a race condition 6029 * exists where the unpaused TID still doesn't yet have 6030 * IEEE80211_AGGR_RUNNING set. 6031 */ 6032 r = sc->sc_addba_response(ni, tap, status, code, batimeout); 6033 6034 ATH_TX_LOCK(sc); 6035 atid->addba_tx_pending = 0; 6036 /* 6037 * XXX dirty! 6038 * Slide the BAW left edge to wherever net80211 left it for us. 6039 * Read above for more information. 6040 */ 6041 tap->txa_start = ni->ni_txseqs[tid]; 6042 ath_tx_tid_resume(sc, atid); 6043 ATH_TX_UNLOCK(sc); 6044 return r; 6045 } 6046 6047 6048 /* 6049 * Stop ADDBA on a queue. 6050 * 6051 * This can be called whilst BAR TX is currently active on the queue, 6052 * so make sure this is unblocked before continuing. 6053 */ 6054 void 6055 ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 6056 { 6057 struct ath_softc *sc = ni->ni_ic->ic_softc; 6058 int tid = tap->txa_tid; 6059 struct ath_node *an = ATH_NODE(ni); 6060 struct ath_tid *atid = &an->an_tid[tid]; 6061 ath_bufhead bf_cq; 6062 struct ath_buf *bf; 6063 6064 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n", 6065 __func__, 6066 ni->ni_macaddr, 6067 ":"); 6068 6069 /* 6070 * Pause TID traffic early, so there aren't any races 6071 * Unblock the pending BAR held traffic, if it's currently paused. 6072 */ 6073 ATH_TX_LOCK(sc); 6074 ath_tx_tid_pause(sc, atid); 6075 if (atid->bar_wait) { 6076 /* 6077 * bar_unsuspend() expects bar_tx == 1, as it should be 6078 * called from the TX completion path. This quietens 6079 * the warning. It's cleared for us anyway. 6080 */ 6081 atid->bar_tx = 1; 6082 ath_tx_tid_bar_unsuspend(sc, atid); 6083 } 6084 ATH_TX_UNLOCK(sc); 6085 6086 /* There's no need to hold the TXQ lock here */ 6087 sc->sc_addba_stop(ni, tap); 6088 6089 /* 6090 * ath_tx_tid_cleanup will resume the TID if possible, otherwise 6091 * it'll set the cleanup flag, and it'll be unpaused once 6092 * things have been cleaned up. 6093 */ 6094 TAILQ_INIT(&bf_cq); 6095 ATH_TX_LOCK(sc); 6096 6097 /* 6098 * In case there's a followup call to this, only call it 6099 * if we don't have a cleanup in progress. 6100 * 6101 * Since we've paused the queue above, we need to make 6102 * sure we unpause if there's already a cleanup in 6103 * progress - it means something else is also doing 6104 * this stuff, so we don't need to also keep it paused. 6105 */ 6106 if (atid->cleanup_inprogress) { 6107 ath_tx_tid_resume(sc, atid); 6108 } else { 6109 ath_tx_tid_cleanup(sc, an, tid, &bf_cq); 6110 /* 6111 * Unpause the TID if no cleanup is required. 6112 */ 6113 if (! atid->cleanup_inprogress) 6114 ath_tx_tid_resume(sc, atid); 6115 } 6116 ATH_TX_UNLOCK(sc); 6117 6118 /* Handle completing frames and fail them */ 6119 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 6120 TAILQ_REMOVE(&bf_cq, bf, bf_list); 6121 ath_tx_default_comp(sc, bf, 1); 6122 } 6123 6124 } 6125 6126 /* 6127 * Handle a node reassociation. 6128 * 6129 * We may have a bunch of frames queued to the hardware; those need 6130 * to be marked as cleanup. 6131 */ 6132 void 6133 ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an) 6134 { 6135 struct ath_tid *tid; 6136 int i; 6137 ath_bufhead bf_cq; 6138 struct ath_buf *bf; 6139 6140 TAILQ_INIT(&bf_cq); 6141 6142 ATH_TX_UNLOCK_ASSERT(sc); 6143 6144 ATH_TX_LOCK(sc); 6145 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 6146 tid = &an->an_tid[i]; 6147 if (tid->hwq_depth == 0) 6148 continue; 6149 DPRINTF(sc, ATH_DEBUG_NODE, 6150 "%s: %6D: TID %d: cleaning up TID\n", 6151 __func__, 6152 an->an_node.ni_macaddr, 6153 ":", 6154 i); 6155 /* 6156 * In case there's a followup call to this, only call it 6157 * if we don't have a cleanup in progress. 6158 */ 6159 if (! tid->cleanup_inprogress) { 6160 ath_tx_tid_pause(sc, tid); 6161 ath_tx_tid_cleanup(sc, an, i, &bf_cq); 6162 /* 6163 * Unpause the TID if no cleanup is required. 6164 */ 6165 if (! tid->cleanup_inprogress) 6166 ath_tx_tid_resume(sc, tid); 6167 } 6168 } 6169 ATH_TX_UNLOCK(sc); 6170 6171 /* Handle completing frames and fail them */ 6172 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 6173 TAILQ_REMOVE(&bf_cq, bf, bf_list); 6174 ath_tx_default_comp(sc, bf, 1); 6175 } 6176 } 6177 6178 /* 6179 * Note: net80211 bar_timeout() doesn't call this function on BAR failure; 6180 * it simply tears down the aggregation session. Ew. 6181 * 6182 * It however will call ieee80211_ampdu_stop() which will call 6183 * ic->ic_addba_stop(). 6184 * 6185 * XXX This uses a hard-coded max BAR count value; the whole 6186 * XXX BAR TX success or failure should be better handled! 6187 */ 6188 void 6189 ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 6190 int status) 6191 { 6192 struct ath_softc *sc = ni->ni_ic->ic_softc; 6193 int tid = tap->txa_tid; 6194 struct ath_node *an = ATH_NODE(ni); 6195 struct ath_tid *atid = &an->an_tid[tid]; 6196 int attempts = tap->txa_attempts; 6197 int old_txa_start; 6198 6199 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 6200 "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n", 6201 __func__, 6202 ni->ni_macaddr, 6203 ":", 6204 tap->txa_tid, 6205 atid->tid, 6206 status, 6207 attempts, 6208 tap->txa_start, 6209 tap->txa_seqpending); 6210 6211 /* Note: This may update the BAW details */ 6212 /* 6213 * XXX What if this does slide the BAW along? We need to somehow 6214 * XXX either fix things when it does happen, or prevent the 6215 * XXX seqpending value to be anything other than exactly what 6216 * XXX the hell we want! 6217 * 6218 * XXX So for now, how I do this inside the TX lock for now 6219 * XXX and just correct it afterwards? The below condition should 6220 * XXX never happen and if it does I need to fix all kinds of things. 6221 */ 6222 ATH_TX_LOCK(sc); 6223 old_txa_start = tap->txa_start; 6224 sc->sc_bar_response(ni, tap, status); 6225 if (tap->txa_start != old_txa_start) { 6226 device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n", 6227 __func__, 6228 tid, 6229 tap->txa_start, 6230 old_txa_start); 6231 } 6232 tap->txa_start = old_txa_start; 6233 ATH_TX_UNLOCK(sc); 6234 6235 /* Unpause the TID */ 6236 /* 6237 * XXX if this is attempt=50, the TID will be downgraded 6238 * XXX to a non-aggregate session. So we must unpause the 6239 * XXX TID here or it'll never be done. 6240 * 6241 * Also, don't call it if bar_tx/bar_wait are 0; something 6242 * has beaten us to the punch? (XXX figure out what?) 6243 */ 6244 if (status == 0 || attempts == 50) { 6245 ATH_TX_LOCK(sc); 6246 if (atid->bar_tx == 0 || atid->bar_wait == 0) 6247 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 6248 "%s: huh? bar_tx=%d, bar_wait=%d\n", 6249 __func__, 6250 atid->bar_tx, atid->bar_wait); 6251 else 6252 ath_tx_tid_bar_unsuspend(sc, atid); 6253 ATH_TX_UNLOCK(sc); 6254 } 6255 } 6256 6257 /* 6258 * This is called whenever the pending ADDBA request times out. 6259 * Unpause and reschedule the TID. 6260 */ 6261 void 6262 ath_addba_response_timeout(struct ieee80211_node *ni, 6263 struct ieee80211_tx_ampdu *tap) 6264 { 6265 struct ath_softc *sc = ni->ni_ic->ic_softc; 6266 int tid = tap->txa_tid; 6267 struct ath_node *an = ATH_NODE(ni); 6268 struct ath_tid *atid = &an->an_tid[tid]; 6269 6270 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 6271 "%s: %6D: TID=%d, called; resuming\n", 6272 __func__, 6273 ni->ni_macaddr, 6274 ":", 6275 tid); 6276 6277 ATH_TX_LOCK(sc); 6278 atid->addba_tx_pending = 0; 6279 ATH_TX_UNLOCK(sc); 6280 6281 /* Note: This updates the aggregate state to (again) pending */ 6282 sc->sc_addba_response_timeout(ni, tap); 6283 6284 /* Unpause the TID; which reschedules it */ 6285 ATH_TX_LOCK(sc); 6286 ath_tx_tid_resume(sc, atid); 6287 ATH_TX_UNLOCK(sc); 6288 } 6289 6290 /* 6291 * Check if a node is asleep or not. 6292 */ 6293 int 6294 ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an) 6295 { 6296 6297 ATH_TX_LOCK_ASSERT(sc); 6298 6299 return (an->an_is_powersave); 6300 } 6301 6302 /* 6303 * Mark a node as currently "in powersaving." 6304 * This suspends all traffic on the node. 6305 * 6306 * This must be called with the node/tx locks free. 6307 * 6308 * XXX TODO: the locking silliness below is due to how the node 6309 * locking currently works. Right now, the node lock is grabbed 6310 * to do rate control lookups and these are done with the TX 6311 * queue lock held. This means the node lock can't be grabbed 6312 * first here or a LOR will occur. 6313 * 6314 * Eventually (hopefully!) the TX path code will only grab 6315 * the TXQ lock when transmitting and the ath_node lock when 6316 * doing node/TID operations. There are other complications - 6317 * the sched/unsched operations involve walking the per-txq 6318 * 'active tid' list and this requires both locks to be held. 6319 */ 6320 void 6321 ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an) 6322 { 6323 struct ath_tid *atid; 6324 struct ath_txq *txq; 6325 int tid; 6326 6327 ATH_TX_UNLOCK_ASSERT(sc); 6328 6329 /* Suspend all traffic on the node */ 6330 ATH_TX_LOCK(sc); 6331 6332 if (an->an_is_powersave) { 6333 DPRINTF(sc, ATH_DEBUG_XMIT, 6334 "%s: %6D: node was already asleep!\n", 6335 __func__, an->an_node.ni_macaddr, ":"); 6336 ATH_TX_UNLOCK(sc); 6337 return; 6338 } 6339 6340 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 6341 atid = &an->an_tid[tid]; 6342 txq = sc->sc_ac2q[atid->ac]; 6343 6344 ath_tx_tid_pause(sc, atid); 6345 } 6346 6347 /* Mark node as in powersaving */ 6348 an->an_is_powersave = 1; 6349 6350 ATH_TX_UNLOCK(sc); 6351 } 6352 6353 /* 6354 * Mark a node as currently "awake." 6355 * This resumes all traffic to the node. 6356 */ 6357 void 6358 ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an) 6359 { 6360 struct ath_tid *atid; 6361 struct ath_txq *txq; 6362 int tid; 6363 6364 ATH_TX_UNLOCK_ASSERT(sc); 6365 6366 ATH_TX_LOCK(sc); 6367 6368 /* !? */ 6369 if (an->an_is_powersave == 0) { 6370 ATH_TX_UNLOCK(sc); 6371 DPRINTF(sc, ATH_DEBUG_XMIT, 6372 "%s: an=%p: node was already awake\n", 6373 __func__, an); 6374 return; 6375 } 6376 6377 /* Mark node as awake */ 6378 an->an_is_powersave = 0; 6379 /* 6380 * Clear any pending leaked frame requests 6381 */ 6382 an->an_leak_count = 0; 6383 6384 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 6385 atid = &an->an_tid[tid]; 6386 txq = sc->sc_ac2q[atid->ac]; 6387 6388 ath_tx_tid_resume(sc, atid); 6389 } 6390 ATH_TX_UNLOCK(sc); 6391 } 6392 6393 static int 6394 ath_legacy_dma_txsetup(struct ath_softc *sc) 6395 { 6396 6397 /* nothing new needed */ 6398 return (0); 6399 } 6400 6401 static int 6402 ath_legacy_dma_txteardown(struct ath_softc *sc) 6403 { 6404 6405 /* nothing new needed */ 6406 return (0); 6407 } 6408 6409 void 6410 ath_xmit_setup_legacy(struct ath_softc *sc) 6411 { 6412 /* 6413 * For now, just set the descriptor length to sizeof(ath_desc); 6414 * worry about extracting the real length out of the HAL later. 6415 */ 6416 sc->sc_tx_desclen = sizeof(struct ath_desc); 6417 sc->sc_tx_statuslen = sizeof(struct ath_desc); 6418 sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */ 6419 6420 sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup; 6421 sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown; 6422 sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func; 6423 6424 sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart; 6425 sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff; 6426 6427 sc->sc_tx.xmit_drain = ath_legacy_tx_drain; 6428 } 6429