1 /*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 13 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 14 * redistribution must be conditioned upon including a substantially 15 * similar Disclaimer requirement for further binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGES. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * Driver for the Atheros Wireless LAN controller. 36 * 37 * This software is derived from work of Atsushi Onoe; his contribution 38 * is greatly appreciated. 39 */ 40 41 #include "opt_inet.h" 42 #include "opt_ath.h" 43 #include "opt_wlan.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/sysctl.h> 48 #include <sys/mbuf.h> 49 #include <sys/malloc.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/kernel.h> 53 #include <sys/socket.h> 54 #include <sys/sockio.h> 55 #include <sys/errno.h> 56 #include <sys/callout.h> 57 #include <sys/bus.h> 58 #include <sys/endian.h> 59 #include <sys/kthread.h> 60 #include <sys/taskqueue.h> 61 #include <sys/priv.h> 62 63 #include <machine/bus.h> 64 65 #include <net/if.h> 66 #include <net/if_dl.h> 67 #include <net/if_media.h> 68 #include <net/if_types.h> 69 #include <net/if_arp.h> 70 #include <net/ethernet.h> 71 #include <net/if_llc.h> 72 73 #include <net80211/ieee80211_var.h> 74 #include <net80211/ieee80211_regdomain.h> 75 #ifdef IEEE80211_SUPPORT_SUPERG 76 #include <net80211/ieee80211_superg.h> 77 #endif 78 #ifdef IEEE80211_SUPPORT_TDMA 79 #include <net80211/ieee80211_tdma.h> 80 #endif 81 #include <net80211/ieee80211_ht.h> 82 83 #include <net/bpf.h> 84 85 #ifdef INET 86 #include <netinet/in.h> 87 #include <netinet/if_ether.h> 88 #endif 89 90 #include <dev/ath/if_athvar.h> 91 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 92 #include <dev/ath/ath_hal/ah_diagcodes.h> 93 94 #include <dev/ath/if_ath_debug.h> 95 96 #ifdef ATH_TX99_DIAG 97 #include <dev/ath/ath_tx99/ath_tx99.h> 98 #endif 99 100 #include <dev/ath/if_ath_misc.h> 101 #include <dev/ath/if_ath_tx.h> 102 #include <dev/ath/if_ath_tx_ht.h> 103 104 /* 105 * How many retries to perform in software 106 */ 107 #define SWMAX_RETRIES 10 108 109 /* 110 * What queue to throw the non-QoS TID traffic into 111 */ 112 #define ATH_NONQOS_TID_AC WME_AC_VO 113 114 #if 0 115 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an); 116 #endif 117 static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, 118 int tid); 119 static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, 120 int tid); 121 static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc, 122 struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0); 123 static int ath_tx_action_frame_override_queue(struct ath_softc *sc, 124 struct ieee80211_node *ni, struct mbuf *m0, int *tid); 125 static struct ath_buf * 126 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 127 struct ath_tid *tid, struct ath_buf *bf); 128 129 /* 130 * Whether to use the 11n rate scenario functions or not 131 */ 132 static inline int 133 ath_tx_is_11n(struct ath_softc *sc) 134 { 135 return ((sc->sc_ah->ah_magic == 0x20065416) || 136 (sc->sc_ah->ah_magic == 0x19741014)); 137 } 138 139 /* 140 * Obtain the current TID from the given frame. 141 * 142 * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.) 143 * This has implications for which AC/priority the packet is placed 144 * in. 145 */ 146 static int 147 ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0) 148 { 149 const struct ieee80211_frame *wh; 150 int pri = M_WME_GETAC(m0); 151 152 wh = mtod(m0, const struct ieee80211_frame *); 153 if (! IEEE80211_QOS_HAS_SEQ(wh)) 154 return IEEE80211_NONQOS_TID; 155 else 156 return WME_AC_TO_TID(pri); 157 } 158 159 static void 160 ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) 161 { 162 struct ieee80211_frame *wh; 163 164 wh = mtod(bf->bf_m, struct ieee80211_frame *); 165 /* Only update/resync if needed */ 166 if (bf->bf_state.bfs_isretried == 0) { 167 wh->i_fc[1] |= IEEE80211_FC1_RETRY; 168 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 169 BUS_DMASYNC_PREWRITE); 170 } 171 bf->bf_state.bfs_isretried = 1; 172 bf->bf_state.bfs_retries ++; 173 } 174 175 /* 176 * Determine what the correct AC queue for the given frame 177 * should be. 178 * 179 * This code assumes that the TIDs map consistently to 180 * the underlying hardware (or software) ath_txq. 181 * Since the sender may try to set an AC which is 182 * arbitrary, non-QoS TIDs may end up being put on 183 * completely different ACs. There's no way to put a 184 * TID into multiple ath_txq's for scheduling, so 185 * for now we override the AC/TXQ selection and set 186 * non-QOS TID frames into the BE queue. 187 * 188 * This may be completely incorrect - specifically, 189 * some management frames may end up out of order 190 * compared to the QoS traffic they're controlling. 191 * I'll look into this later. 192 */ 193 static int 194 ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0) 195 { 196 const struct ieee80211_frame *wh; 197 int pri = M_WME_GETAC(m0); 198 wh = mtod(m0, const struct ieee80211_frame *); 199 if (IEEE80211_QOS_HAS_SEQ(wh)) 200 return pri; 201 202 return ATH_NONQOS_TID_AC; 203 } 204 205 void 206 ath_txfrag_cleanup(struct ath_softc *sc, 207 ath_bufhead *frags, struct ieee80211_node *ni) 208 { 209 struct ath_buf *bf, *next; 210 211 ATH_TXBUF_LOCK_ASSERT(sc); 212 213 TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) { 214 /* NB: bf assumed clean */ 215 TAILQ_REMOVE(frags, bf, bf_list); 216 ath_returnbuf_head(sc, bf); 217 ieee80211_node_decref(ni); 218 } 219 } 220 221 /* 222 * Setup xmit of a fragmented frame. Allocate a buffer 223 * for each frag and bump the node reference count to 224 * reflect the held reference to be setup by ath_tx_start. 225 */ 226 int 227 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, 228 struct mbuf *m0, struct ieee80211_node *ni) 229 { 230 struct mbuf *m; 231 struct ath_buf *bf; 232 233 ATH_TXBUF_LOCK(sc); 234 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { 235 /* XXX non-management? */ 236 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 237 if (bf == NULL) { /* out of buffers, cleanup */ 238 device_printf(sc->sc_dev, "%s: no buffer?\n", 239 __func__); 240 ath_txfrag_cleanup(sc, frags, ni); 241 break; 242 } 243 ieee80211_node_incref(ni); 244 TAILQ_INSERT_TAIL(frags, bf, bf_list); 245 } 246 ATH_TXBUF_UNLOCK(sc); 247 248 return !TAILQ_EMPTY(frags); 249 } 250 251 /* 252 * Reclaim mbuf resources. For fragmented frames we 253 * need to claim each frag chained with m_nextpkt. 254 */ 255 void 256 ath_freetx(struct mbuf *m) 257 { 258 struct mbuf *next; 259 260 do { 261 next = m->m_nextpkt; 262 m->m_nextpkt = NULL; 263 m_freem(m); 264 } while ((m = next) != NULL); 265 } 266 267 static int 268 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) 269 { 270 struct mbuf *m; 271 int error; 272 273 /* 274 * Load the DMA map so any coalescing is done. This 275 * also calculates the number of descriptors we need. 276 */ 277 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 278 bf->bf_segs, &bf->bf_nseg, 279 BUS_DMA_NOWAIT); 280 if (error == EFBIG) { 281 /* XXX packet requires too many descriptors */ 282 bf->bf_nseg = ATH_TXDESC+1; 283 } else if (error != 0) { 284 sc->sc_stats.ast_tx_busdma++; 285 ath_freetx(m0); 286 return error; 287 } 288 /* 289 * Discard null packets and check for packets that 290 * require too many TX descriptors. We try to convert 291 * the latter to a cluster. 292 */ 293 if (bf->bf_nseg > ATH_TXDESC) { /* too many desc's, linearize */ 294 sc->sc_stats.ast_tx_linear++; 295 m = m_collapse(m0, M_DONTWAIT, ATH_TXDESC); 296 if (m == NULL) { 297 ath_freetx(m0); 298 sc->sc_stats.ast_tx_nombuf++; 299 return ENOMEM; 300 } 301 m0 = m; 302 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 303 bf->bf_segs, &bf->bf_nseg, 304 BUS_DMA_NOWAIT); 305 if (error != 0) { 306 sc->sc_stats.ast_tx_busdma++; 307 ath_freetx(m0); 308 return error; 309 } 310 KASSERT(bf->bf_nseg <= ATH_TXDESC, 311 ("too many segments after defrag; nseg %u", bf->bf_nseg)); 312 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 313 sc->sc_stats.ast_tx_nodata++; 314 ath_freetx(m0); 315 return EIO; 316 } 317 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", 318 __func__, m0, m0->m_pkthdr.len); 319 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 320 bf->bf_m = m0; 321 322 return 0; 323 } 324 325 /* 326 * Chain together segments+descriptors for a non-11n frame. 327 */ 328 static void 329 ath_tx_chaindesclist(struct ath_softc *sc, struct ath_buf *bf) 330 { 331 struct ath_hal *ah = sc->sc_ah; 332 char *ds, *ds0; 333 int i, bp, dsp; 334 HAL_DMA_ADDR bufAddrList[4]; 335 uint32_t segLenList[4]; 336 int numTxMaps = 1; 337 int isFirstDesc = 1; 338 int qnum; 339 340 /* 341 * XXX There's txdma and txdma_mgmt; the descriptor 342 * sizes must match. 343 */ 344 struct ath_descdma *dd = &sc->sc_txdma; 345 346 /* 347 * Fillin the remainder of the descriptor info. 348 */ 349 350 /* 351 * For now the HAL doesn't implement halNumTxMaps for non-EDMA 352 * (ie it's 0.) So just work around it. 353 * 354 * XXX TODO: populate halNumTxMaps for each HAL chip and 355 * then undo this hack. 356 */ 357 if (sc->sc_ah->ah_magic == 0x19741014) 358 numTxMaps = 4; 359 360 /* 361 * For EDMA and later chips ensure the TX map is fully populated 362 * before advancing to the next descriptor. 363 */ 364 ds0 = ds = (char *) bf->bf_desc; 365 bp = dsp = 0; 366 bzero(bufAddrList, sizeof(bufAddrList)); 367 bzero(segLenList, sizeof(segLenList)); 368 for (i = 0; i < bf->bf_nseg; i++) { 369 bufAddrList[bp] = bf->bf_segs[i].ds_addr; 370 segLenList[bp] = bf->bf_segs[i].ds_len; 371 bp++; 372 373 /* 374 * Go to the next segment if this isn't the last segment 375 * and there's space in the current TX map. 376 */ 377 if ((i != bf->bf_nseg - 1) && (bp < numTxMaps)) 378 continue; 379 380 /* 381 * Last segment or we're out of buffer pointers. 382 */ 383 bp = 0; 384 385 if (i == bf->bf_nseg - 1) 386 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0); 387 else 388 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 389 bf->bf_daddr + dd->dd_descsize * (dsp + 1)); 390 391 /* 392 * XXX this assumes that bfs_txq is the actual destination 393 * hardware queue at this point. It may not have been assigned, 394 * it may actually be pointing to the multicast software 395 * TXQ id. These must be fixed! 396 */ 397 qnum = bf->bf_state.bfs_txq->axq_qnum; 398 399 ath_hal_filltxdesc(ah, (struct ath_desc *) ds 400 , bufAddrList 401 , segLenList 402 , bf->bf_descid /* XXX desc id */ 403 , qnum 404 , isFirstDesc /* first segment */ 405 , i == bf->bf_nseg - 1 /* last segment */ 406 , (struct ath_desc *) ds0 /* first descriptor */ 407 ); 408 409 /* Make sure the 11n aggregate fields are cleared */ 410 if (ath_tx_is_11n(sc)) 411 ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds); 412 413 isFirstDesc = 0; 414 #ifdef ATH_DEBUG 415 if (sc->sc_debug & ATH_DEBUG_XMIT) 416 ath_printtxbuf(sc, bf, qnum, 0, 0); 417 #endif 418 bf->bf_lastds = (struct ath_desc *) ds; 419 420 /* 421 * Don't forget to skip to the next descriptor. 422 */ 423 ds += sc->sc_tx_desclen; 424 dsp++; 425 426 /* 427 * .. and don't forget to blank these out! 428 */ 429 bzero(bufAddrList, sizeof(bufAddrList)); 430 bzero(segLenList, sizeof(segLenList)); 431 } 432 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 433 } 434 435 /* 436 * Fill in the descriptor list for a aggregate subframe. 437 * 438 * The subframe is returned with the ds_link field in the last subframe 439 * pointing to 0. 440 */ 441 static void 442 ath_tx_chaindesclist_subframe(struct ath_softc *sc, struct ath_buf *bf) 443 { 444 struct ath_hal *ah = sc->sc_ah; 445 struct ath_desc *ds, *ds0; 446 int i; 447 HAL_DMA_ADDR bufAddrList[4]; 448 uint32_t segLenList[4]; 449 450 /* 451 * XXX There's txdma and txdma_mgmt; the descriptor 452 * sizes must match. 453 */ 454 struct ath_descdma *dd = &sc->sc_txdma; 455 456 ds0 = ds = bf->bf_desc; 457 458 /* 459 * There's no need to call ath_hal_setupfirsttxdesc here; 460 * That's only going to occur for the first frame in an aggregate. 461 */ 462 for (i = 0; i < bf->bf_nseg; i++, ds++) { 463 bzero(bufAddrList, sizeof(bufAddrList)); 464 bzero(segLenList, sizeof(segLenList)); 465 if (i == bf->bf_nseg - 1) 466 ath_hal_settxdesclink(ah, ds, 0); 467 else 468 ath_hal_settxdesclink(ah, ds, 469 bf->bf_daddr + dd->dd_descsize * (i + 1)); 470 471 bufAddrList[0] = bf->bf_segs[i].ds_addr; 472 segLenList[0] = bf->bf_segs[i].ds_len; 473 474 /* 475 * This performs the setup for an aggregate frame. 476 * This includes enabling the aggregate flags if needed. 477 */ 478 ath_hal_chaintxdesc(ah, ds, 479 bufAddrList, 480 segLenList, 481 bf->bf_state.bfs_pktlen, 482 bf->bf_state.bfs_hdrlen, 483 HAL_PKT_TYPE_AMPDU, /* forces aggregate bits to be set */ 484 bf->bf_state.bfs_keyix, 485 0, /* cipher, calculated from keyix */ 486 bf->bf_state.bfs_ndelim, 487 i == 0, /* first segment */ 488 i == bf->bf_nseg - 1, /* last segment */ 489 bf->bf_next == NULL /* last sub-frame in aggr */ 490 ); 491 492 DPRINTF(sc, ATH_DEBUG_XMIT, 493 "%s: %d: %08x %08x %08x %08x %08x %08x\n", 494 __func__, i, ds->ds_link, ds->ds_data, 495 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]); 496 bf->bf_lastds = ds; 497 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 498 BUS_DMASYNC_PREWRITE); 499 } 500 } 501 502 /* 503 * Set the rate control fields in the given descriptor based on 504 * the bf_state fields and node state. 505 * 506 * The bfs fields should already be set with the relevant rate 507 * control information, including whether MRR is to be enabled. 508 * 509 * Since the FreeBSD HAL currently sets up the first TX rate 510 * in ath_hal_setuptxdesc(), this will setup the MRR 511 * conditionally for the pre-11n chips, and call ath_buf_set_rate 512 * unconditionally for 11n chips. These require the 11n rate 513 * scenario to be set if MCS rates are enabled, so it's easier 514 * to just always call it. The caller can then only set rates 2, 3 515 * and 4 if multi-rate retry is needed. 516 */ 517 static void 518 ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 519 struct ath_buf *bf) 520 { 521 struct ath_rc_series *rc = bf->bf_state.bfs_rc; 522 523 /* If mrr is disabled, blank tries 1, 2, 3 */ 524 if (! bf->bf_state.bfs_ismrr) 525 rc[1].tries = rc[2].tries = rc[3].tries = 0; 526 527 /* 528 * Always call - that way a retried descriptor will 529 * have the MRR fields overwritten. 530 * 531 * XXX TODO: see if this is really needed - setting up 532 * the first descriptor should set the MRR fields to 0 533 * for us anyway. 534 */ 535 if (ath_tx_is_11n(sc)) { 536 ath_buf_set_rate(sc, ni, bf); 537 } else { 538 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc 539 , rc[1].ratecode, rc[1].tries 540 , rc[2].ratecode, rc[2].tries 541 , rc[3].ratecode, rc[3].tries 542 ); 543 } 544 } 545 546 /* 547 * Setup segments+descriptors for an 11n aggregate. 548 * bf_first is the first buffer in the aggregate. 549 * The descriptor list must already been linked together using 550 * bf->bf_next. 551 */ 552 static void 553 ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first) 554 { 555 struct ath_buf *bf, *bf_prev = NULL; 556 557 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n", 558 __func__, bf_first->bf_state.bfs_nframes, 559 bf_first->bf_state.bfs_al); 560 561 /* 562 * Setup all descriptors of all subframes. 563 */ 564 bf = bf_first; 565 while (bf != NULL) { 566 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 567 "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n", 568 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen, 569 SEQNO(bf->bf_state.bfs_seqno)); 570 571 /* Sub-frame setup */ 572 ath_tx_chaindesclist_subframe(sc, bf); 573 574 /* 575 * Link the last descriptor of the previous frame 576 * to the beginning descriptor of this frame. 577 */ 578 if (bf_prev != NULL) 579 ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds, 580 bf->bf_daddr); 581 582 /* Save a copy so we can link the next descriptor in */ 583 bf_prev = bf; 584 bf = bf->bf_next; 585 } 586 587 /* 588 * Setup first descriptor of first frame. 589 * chaintxdesc() overwrites the descriptor entries; 590 * setupfirsttxdesc() merges in things. 591 * Otherwise various fields aren't set correctly (eg flags). 592 */ 593 ath_hal_setupfirsttxdesc(sc->sc_ah, 594 bf_first->bf_desc, 595 bf_first->bf_state.bfs_al, 596 bf_first->bf_state.bfs_txflags | HAL_TXDESC_INTREQ, 597 bf_first->bf_state.bfs_txpower, 598 bf_first->bf_state.bfs_txrate0, 599 bf_first->bf_state.bfs_try0, 600 bf_first->bf_state.bfs_txantenna, 601 bf_first->bf_state.bfs_ctsrate, 602 bf_first->bf_state.bfs_ctsduration); 603 604 /* 605 * Set the first descriptor bf_lastds field to point to 606 * the last descriptor in the last subframe, that's where 607 * the status update will occur. 608 */ 609 bf_first->bf_lastds = bf_prev->bf_lastds; 610 611 /* 612 * And bf_last in the first descriptor points to the end of 613 * the aggregate list. 614 */ 615 bf_first->bf_last = bf_prev; 616 617 /* 618 * setup first desc with rate and aggr info 619 */ 620 ath_tx_set_ratectrl(sc, bf_first->bf_node, bf_first); 621 622 /* 623 * Setup the last descriptor in the list. 624 * 625 * bf_first->bf_lastds already points to it; the rate 626 * control information needs to be squirreled away here 627 * as well ans clearing the moreaggr/paddelim fields. 628 */ 629 ath_hal_setuplasttxdesc(sc->sc_ah, bf_first->bf_lastds, 630 bf_first->bf_desc); 631 632 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__); 633 } 634 635 /* 636 * Hand-off a frame to the multicast TX queue. 637 * 638 * This is a software TXQ which will be appended to the CAB queue 639 * during the beacon setup code. 640 * 641 * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID 642 * as part of the TX descriptor, bf_state.bfs_txq must be updated 643 * with the actual hardware txq, or all of this will fall apart. 644 * 645 * XXX It may not be a bad idea to just stuff the QCU ID into bf_state 646 * and retire bfs_txq; then make sure the CABQ QCU ID is populated 647 * correctly. 648 */ 649 static void 650 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, 651 struct ath_buf *bf) 652 { 653 ATH_TXQ_LOCK_ASSERT(txq); 654 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 655 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 656 if (txq->axq_link != NULL) { 657 struct ath_buf *last = ATH_TXQ_LAST(txq, axq_q_s); 658 struct ieee80211_frame *wh; 659 660 /* mark previous frame */ 661 wh = mtod(last->bf_m, struct ieee80211_frame *); 662 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 663 bus_dmamap_sync(sc->sc_dmat, last->bf_dmamap, 664 BUS_DMASYNC_PREWRITE); 665 666 /* link descriptor */ 667 *txq->axq_link = bf->bf_daddr; 668 } 669 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 670 ath_hal_gettxdesclinkptr(sc->sc_ah, bf->bf_lastds, &txq->axq_link); 671 } 672 673 /* 674 * Hand-off packet to a hardware queue. 675 */ 676 static void 677 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, 678 struct ath_buf *bf) 679 { 680 struct ath_hal *ah = sc->sc_ah; 681 682 /* 683 * Insert the frame on the outbound list and pass it on 684 * to the hardware. Multicast frames buffered for power 685 * save stations and transmit from the CAB queue are stored 686 * on a s/w only queue and loaded on to the CAB queue in 687 * the SWBA handler since frames only go out on DTIM and 688 * to avoid possible races. 689 */ 690 ATH_TXQ_LOCK_ASSERT(txq); 691 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 692 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 693 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ, 694 ("ath_tx_handoff_hw called for mcast queue")); 695 696 #if 0 697 /* 698 * This causes a LOR. Find out where the PCU lock is being 699 * held whilst the TXQ lock is grabbed - that shouldn't 700 * be occuring. 701 */ 702 ATH_PCU_LOCK(sc); 703 if (sc->sc_inreset_cnt) { 704 ATH_PCU_UNLOCK(sc); 705 DPRINTF(sc, ATH_DEBUG_RESET, 706 "%s: called with sc_in_reset != 0\n", 707 __func__); 708 DPRINTF(sc, ATH_DEBUG_XMIT, 709 "%s: queued: TXDP[%u] = %p (%p) depth %d\n", 710 __func__, txq->axq_qnum, 711 (caddr_t)bf->bf_daddr, bf->bf_desc, 712 txq->axq_depth); 713 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 714 if (bf->bf_state.bfs_aggr) 715 txq->axq_aggr_depth++; 716 /* 717 * There's no need to update axq_link; the hardware 718 * is in reset and once the reset is complete, any 719 * non-empty queues will simply have DMA restarted. 720 */ 721 return; 722 } 723 ATH_PCU_UNLOCK(sc); 724 #endif 725 726 /* For now, so not to generate whitespace diffs */ 727 if (1) { 728 #ifdef IEEE80211_SUPPORT_TDMA 729 int qbusy; 730 731 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 732 qbusy = ath_hal_txqenabled(ah, txq->axq_qnum); 733 734 ATH_KTR(sc, ATH_KTR_TX, 4, 735 "ath_tx_handoff: txq=%u, add bf=%p, qbusy=%d, depth=%d", 736 txq->axq_qnum, bf, qbusy, txq->axq_depth); 737 if (txq->axq_link == NULL) { 738 /* 739 * Be careful writing the address to TXDP. If 740 * the tx q is enabled then this write will be 741 * ignored. Normally this is not an issue but 742 * when tdma is in use and the q is beacon gated 743 * this race can occur. If the q is busy then 744 * defer the work to later--either when another 745 * packet comes along or when we prepare a beacon 746 * frame at SWBA. 747 */ 748 if (!qbusy) { 749 ath_hal_puttxbuf(ah, txq->axq_qnum, 750 bf->bf_daddr); 751 txq->axq_flags &= ~ATH_TXQ_PUTPENDING; 752 DPRINTF(sc, ATH_DEBUG_XMIT, 753 "%s: TXDP[%u] = %p (%p) lastds=%p depth %d\n", 754 __func__, txq->axq_qnum, 755 (caddr_t)bf->bf_daddr, bf->bf_desc, 756 bf->bf_lastds, 757 txq->axq_depth); 758 ATH_KTR(sc, ATH_KTR_TX, 5, 759 "ath_tx_handoff: TXDP[%u] = %p (%p) " 760 "lastds=%p depth %d", 761 txq->axq_qnum, 762 (caddr_t)bf->bf_daddr, bf->bf_desc, 763 bf->bf_lastds, 764 txq->axq_depth); 765 } else { 766 txq->axq_flags |= ATH_TXQ_PUTPENDING; 767 DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT, 768 "%s: Q%u busy, defer enable\n", __func__, 769 txq->axq_qnum); 770 ATH_KTR(sc, ATH_KTR_TX, 0, "defer enable"); 771 } 772 } else { 773 *txq->axq_link = bf->bf_daddr; 774 DPRINTF(sc, ATH_DEBUG_XMIT, 775 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 776 txq->axq_qnum, txq->axq_link, 777 (caddr_t)bf->bf_daddr, bf->bf_desc, 778 txq->axq_depth); 779 ATH_KTR(sc, ATH_KTR_TX, 5, 780 "ath_tx_handoff: link[%u](%p)=%p (%p) lastds=%p", 781 txq->axq_qnum, txq->axq_link, 782 (caddr_t)bf->bf_daddr, bf->bf_desc, 783 bf->bf_lastds); 784 785 if ((txq->axq_flags & ATH_TXQ_PUTPENDING) && !qbusy) { 786 /* 787 * The q was busy when we previously tried 788 * to write the address of the first buffer 789 * in the chain. Since it's not busy now 790 * handle this chore. We are certain the 791 * buffer at the front is the right one since 792 * axq_link is NULL only when the buffer list 793 * is/was empty. 794 */ 795 ath_hal_puttxbuf(ah, txq->axq_qnum, 796 TAILQ_FIRST(&txq->axq_q)->bf_daddr); 797 txq->axq_flags &= ~ATH_TXQ_PUTPENDING; 798 DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT, 799 "%s: Q%u restarted\n", __func__, 800 txq->axq_qnum); 801 ATH_KTR(sc, ATH_KTR_TX, 4, 802 "ath_tx_handoff: txq[%d] restarted, bf=%p " 803 "daddr=%p ds=%p", 804 txq->axq_qnum, 805 bf, 806 (caddr_t)bf->bf_daddr, 807 bf->bf_desc); 808 } 809 } 810 #else 811 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 812 ATH_KTR(sc, ATH_KTR_TX, 3, 813 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p " 814 "depth=%d", 815 txq->axq_qnum, 816 bf, 817 txq->axq_depth); 818 if (txq->axq_link == NULL) { 819 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 820 DPRINTF(sc, ATH_DEBUG_XMIT, 821 "%s: TXDP[%u] = %p (%p) depth %d\n", 822 __func__, txq->axq_qnum, 823 (caddr_t)bf->bf_daddr, bf->bf_desc, 824 txq->axq_depth); 825 ATH_KTR(sc, ATH_KTR_TX, 5, 826 "ath_tx_handoff: non-tdma: TXDP[%u] = %p (%p) " 827 "lastds=%p depth %d", 828 txq->axq_qnum, 829 (caddr_t)bf->bf_daddr, bf->bf_desc, 830 bf->bf_lastds, 831 txq->axq_depth); 832 833 } else { 834 *txq->axq_link = bf->bf_daddr; 835 DPRINTF(sc, ATH_DEBUG_XMIT, 836 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 837 txq->axq_qnum, txq->axq_link, 838 (caddr_t)bf->bf_daddr, bf->bf_desc, 839 txq->axq_depth); 840 ATH_KTR(sc, ATH_KTR_TX, 5, 841 "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) " 842 "lastds=%d", 843 txq->axq_qnum, txq->axq_link, 844 (caddr_t)bf->bf_daddr, bf->bf_desc, 845 bf->bf_lastds); 846 847 } 848 #endif /* IEEE80211_SUPPORT_TDMA */ 849 if (bf->bf_state.bfs_aggr) 850 txq->axq_aggr_depth++; 851 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link); 852 ath_hal_txstart(ah, txq->axq_qnum); 853 ATH_KTR(sc, ATH_KTR_TX, 1, 854 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum); 855 } 856 } 857 858 /* 859 * Restart TX DMA for the given TXQ. 860 * 861 * This must be called whether the queue is empty or not. 862 */ 863 static void 864 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq) 865 { 866 struct ath_hal *ah = sc->sc_ah; 867 struct ath_buf *bf, *bf_last; 868 869 ATH_TXQ_LOCK_ASSERT(txq); 870 871 /* This is always going to be cleared, empty or not */ 872 txq->axq_flags &= ~ATH_TXQ_PUTPENDING; 873 874 /* XXX make this ATH_TXQ_FIRST */ 875 bf = TAILQ_FIRST(&txq->axq_q); 876 bf_last = ATH_TXQ_LAST(txq, axq_q_s); 877 878 if (bf == NULL) 879 return; 880 881 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 882 ath_hal_gettxdesclinkptr(ah, bf_last->bf_lastds, &txq->axq_link); 883 ath_hal_txstart(ah, txq->axq_qnum); 884 } 885 886 /* 887 * Hand off a packet to the hardware (or mcast queue.) 888 * 889 * The relevant hardware txq should be locked. 890 */ 891 static void 892 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, 893 struct ath_buf *bf) 894 { 895 ATH_TXQ_LOCK_ASSERT(txq); 896 897 if (txq->axq_qnum == ATH_TXQ_SWQ) 898 ath_tx_handoff_mcast(sc, txq, bf); 899 else 900 ath_tx_handoff_hw(sc, txq, bf); 901 } 902 903 static int 904 ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni, 905 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, 906 int *keyix) 907 { 908 DPRINTF(sc, ATH_DEBUG_XMIT, 909 "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n", 910 __func__, 911 *hdrlen, 912 *pktlen, 913 isfrag, 914 iswep, 915 m0); 916 917 if (iswep) { 918 const struct ieee80211_cipher *cip; 919 struct ieee80211_key *k; 920 921 /* 922 * Construct the 802.11 header+trailer for an encrypted 923 * frame. The only reason this can fail is because of an 924 * unknown or unsupported cipher/key type. 925 */ 926 k = ieee80211_crypto_encap(ni, m0); 927 if (k == NULL) { 928 /* 929 * This can happen when the key is yanked after the 930 * frame was queued. Just discard the frame; the 931 * 802.11 layer counts failures and provides 932 * debugging/diagnostics. 933 */ 934 return (0); 935 } 936 /* 937 * Adjust the packet + header lengths for the crypto 938 * additions and calculate the h/w key index. When 939 * a s/w mic is done the frame will have had any mic 940 * added to it prior to entry so m0->m_pkthdr.len will 941 * account for it. Otherwise we need to add it to the 942 * packet length. 943 */ 944 cip = k->wk_cipher; 945 (*hdrlen) += cip->ic_header; 946 (*pktlen) += cip->ic_header + cip->ic_trailer; 947 /* NB: frags always have any TKIP MIC done in s/w */ 948 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) 949 (*pktlen) += cip->ic_miclen; 950 (*keyix) = k->wk_keyix; 951 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { 952 /* 953 * Use station key cache slot, if assigned. 954 */ 955 (*keyix) = ni->ni_ucastkey.wk_keyix; 956 if ((*keyix) == IEEE80211_KEYIX_NONE) 957 (*keyix) = HAL_TXKEYIX_INVALID; 958 } else 959 (*keyix) = HAL_TXKEYIX_INVALID; 960 961 return (1); 962 } 963 964 /* 965 * Calculate whether interoperability protection is required for 966 * this frame. 967 * 968 * This requires the rate control information be filled in, 969 * as the protection requirement depends upon the current 970 * operating mode / PHY. 971 */ 972 static void 973 ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf) 974 { 975 struct ieee80211_frame *wh; 976 uint8_t rix; 977 uint16_t flags; 978 int shortPreamble; 979 const HAL_RATE_TABLE *rt = sc->sc_currates; 980 struct ifnet *ifp = sc->sc_ifp; 981 struct ieee80211com *ic = ifp->if_l2com; 982 983 flags = bf->bf_state.bfs_txflags; 984 rix = bf->bf_state.bfs_rc[0].rix; 985 shortPreamble = bf->bf_state.bfs_shpream; 986 wh = mtod(bf->bf_m, struct ieee80211_frame *); 987 988 /* 989 * If 802.11g protection is enabled, determine whether 990 * to use RTS/CTS or just CTS. Note that this is only 991 * done for OFDM unicast frames. 992 */ 993 if ((ic->ic_flags & IEEE80211_F_USEPROT) && 994 rt->info[rix].phy == IEEE80211_T_OFDM && 995 (flags & HAL_TXDESC_NOACK) == 0) { 996 bf->bf_state.bfs_doprot = 1; 997 /* XXX fragments must use CCK rates w/ protection */ 998 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { 999 flags |= HAL_TXDESC_RTSENA; 1000 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { 1001 flags |= HAL_TXDESC_CTSENA; 1002 } 1003 /* 1004 * For frags it would be desirable to use the 1005 * highest CCK rate for RTS/CTS. But stations 1006 * farther away may detect it at a lower CCK rate 1007 * so use the configured protection rate instead 1008 * (for now). 1009 */ 1010 sc->sc_stats.ast_tx_protect++; 1011 } 1012 1013 /* 1014 * If 11n protection is enabled and it's a HT frame, 1015 * enable RTS. 1016 * 1017 * XXX ic_htprotmode or ic_curhtprotmode? 1018 * XXX should it_htprotmode only matter if ic_curhtprotmode 1019 * XXX indicates it's not a HT pure environment? 1020 */ 1021 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && 1022 rt->info[rix].phy == IEEE80211_T_HT && 1023 (flags & HAL_TXDESC_NOACK) == 0) { 1024 flags |= HAL_TXDESC_RTSENA; 1025 sc->sc_stats.ast_tx_htprotect++; 1026 } 1027 bf->bf_state.bfs_txflags = flags; 1028 } 1029 1030 /* 1031 * Update the frame duration given the currently selected rate. 1032 * 1033 * This also updates the frame duration value, so it will require 1034 * a DMA flush. 1035 */ 1036 static void 1037 ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf) 1038 { 1039 struct ieee80211_frame *wh; 1040 uint8_t rix; 1041 uint16_t flags; 1042 int shortPreamble; 1043 struct ath_hal *ah = sc->sc_ah; 1044 const HAL_RATE_TABLE *rt = sc->sc_currates; 1045 int isfrag = bf->bf_m->m_flags & M_FRAG; 1046 1047 flags = bf->bf_state.bfs_txflags; 1048 rix = bf->bf_state.bfs_rc[0].rix; 1049 shortPreamble = bf->bf_state.bfs_shpream; 1050 wh = mtod(bf->bf_m, struct ieee80211_frame *); 1051 1052 /* 1053 * Calculate duration. This logically belongs in the 802.11 1054 * layer but it lacks sufficient information to calculate it. 1055 */ 1056 if ((flags & HAL_TXDESC_NOACK) == 0 && 1057 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 1058 u_int16_t dur; 1059 if (shortPreamble) 1060 dur = rt->info[rix].spAckDuration; 1061 else 1062 dur = rt->info[rix].lpAckDuration; 1063 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 1064 dur += dur; /* additional SIFS+ACK */ 1065 KASSERT(bf->bf_m->m_nextpkt != NULL, ("no fragment")); 1066 /* 1067 * Include the size of next fragment so NAV is 1068 * updated properly. The last fragment uses only 1069 * the ACK duration 1070 * 1071 * XXX TODO: ensure that the rate lookup for each 1072 * fragment is the same as the rate used by the 1073 * first fragment! 1074 */ 1075 dur += ath_hal_computetxtime(ah, rt, 1076 bf->bf_m->m_nextpkt->m_pkthdr.len, 1077 rix, shortPreamble); 1078 } 1079 if (isfrag) { 1080 /* 1081 * Force hardware to use computed duration for next 1082 * fragment by disabling multi-rate retry which updates 1083 * duration based on the multi-rate duration table. 1084 */ 1085 bf->bf_state.bfs_ismrr = 0; 1086 bf->bf_state.bfs_try0 = ATH_TXMGTTRY; 1087 /* XXX update bfs_rc[0].try? */ 1088 } 1089 1090 /* Update the duration field itself */ 1091 *(u_int16_t *)wh->i_dur = htole16(dur); 1092 } 1093 } 1094 1095 static uint8_t 1096 ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt, 1097 int cix, int shortPreamble) 1098 { 1099 uint8_t ctsrate; 1100 1101 /* 1102 * CTS transmit rate is derived from the transmit rate 1103 * by looking in the h/w rate table. We must also factor 1104 * in whether or not a short preamble is to be used. 1105 */ 1106 /* NB: cix is set above where RTS/CTS is enabled */ 1107 KASSERT(cix != 0xff, ("cix not setup")); 1108 ctsrate = rt->info[cix].rateCode; 1109 1110 /* XXX this should only matter for legacy rates */ 1111 if (shortPreamble) 1112 ctsrate |= rt->info[cix].shortPreamble; 1113 1114 return (ctsrate); 1115 } 1116 1117 /* 1118 * Calculate the RTS/CTS duration for legacy frames. 1119 */ 1120 static int 1121 ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix, 1122 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt, 1123 int flags) 1124 { 1125 int ctsduration = 0; 1126 1127 /* This mustn't be called for HT modes */ 1128 if (rt->info[cix].phy == IEEE80211_T_HT) { 1129 printf("%s: HT rate where it shouldn't be (0x%x)\n", 1130 __func__, rt->info[cix].rateCode); 1131 return (-1); 1132 } 1133 1134 /* 1135 * Compute the transmit duration based on the frame 1136 * size and the size of an ACK frame. We call into the 1137 * HAL to do the computation since it depends on the 1138 * characteristics of the actual PHY being used. 1139 * 1140 * NB: CTS is assumed the same size as an ACK so we can 1141 * use the precalculated ACK durations. 1142 */ 1143 if (shortPreamble) { 1144 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1145 ctsduration += rt->info[cix].spAckDuration; 1146 ctsduration += ath_hal_computetxtime(ah, 1147 rt, pktlen, rix, AH_TRUE); 1148 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1149 ctsduration += rt->info[rix].spAckDuration; 1150 } else { 1151 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1152 ctsduration += rt->info[cix].lpAckDuration; 1153 ctsduration += ath_hal_computetxtime(ah, 1154 rt, pktlen, rix, AH_FALSE); 1155 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1156 ctsduration += rt->info[rix].lpAckDuration; 1157 } 1158 1159 return (ctsduration); 1160 } 1161 1162 /* 1163 * Update the given ath_buf with updated rts/cts setup and duration 1164 * values. 1165 * 1166 * To support rate lookups for each software retry, the rts/cts rate 1167 * and cts duration must be re-calculated. 1168 * 1169 * This function assumes the RTS/CTS flags have been set as needed; 1170 * mrr has been disabled; and the rate control lookup has been done. 1171 * 1172 * XXX TODO: MRR need only be disabled for the pre-11n NICs. 1173 * XXX The 11n NICs support per-rate RTS/CTS configuration. 1174 */ 1175 static void 1176 ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf) 1177 { 1178 uint16_t ctsduration = 0; 1179 uint8_t ctsrate = 0; 1180 uint8_t rix = bf->bf_state.bfs_rc[0].rix; 1181 uint8_t cix = 0; 1182 const HAL_RATE_TABLE *rt = sc->sc_currates; 1183 1184 /* 1185 * No RTS/CTS enabled? Don't bother. 1186 */ 1187 if ((bf->bf_state.bfs_txflags & 1188 (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) { 1189 /* XXX is this really needed? */ 1190 bf->bf_state.bfs_ctsrate = 0; 1191 bf->bf_state.bfs_ctsduration = 0; 1192 return; 1193 } 1194 1195 /* 1196 * If protection is enabled, use the protection rix control 1197 * rate. Otherwise use the rate0 control rate. 1198 */ 1199 if (bf->bf_state.bfs_doprot) 1200 rix = sc->sc_protrix; 1201 else 1202 rix = bf->bf_state.bfs_rc[0].rix; 1203 1204 /* 1205 * If the raw path has hard-coded ctsrate0 to something, 1206 * use it. 1207 */ 1208 if (bf->bf_state.bfs_ctsrate0 != 0) 1209 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0); 1210 else 1211 /* Control rate from above */ 1212 cix = rt->info[rix].controlRate; 1213 1214 /* Calculate the rtscts rate for the given cix */ 1215 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix, 1216 bf->bf_state.bfs_shpream); 1217 1218 /* The 11n chipsets do ctsduration calculations for you */ 1219 if (! ath_tx_is_11n(sc)) 1220 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix, 1221 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, 1222 rt, bf->bf_state.bfs_txflags); 1223 1224 /* Squirrel away in ath_buf */ 1225 bf->bf_state.bfs_ctsrate = ctsrate; 1226 bf->bf_state.bfs_ctsduration = ctsduration; 1227 1228 /* 1229 * Must disable multi-rate retry when using RTS/CTS. 1230 */ 1231 if (!sc->sc_mrrprot) { 1232 bf->bf_state.bfs_ismrr = 0; 1233 bf->bf_state.bfs_try0 = 1234 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */ 1235 } 1236 } 1237 1238 /* 1239 * Setup the descriptor chain for a normal or fast-frame 1240 * frame. 1241 * 1242 * XXX TODO: extend to include the destination hardware QCU ID. 1243 * Make sure that is correct. Make sure that when being added 1244 * to the mcastq, the CABQ QCUID is set or things will get a bit 1245 * odd. 1246 */ 1247 static void 1248 ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf) 1249 { 1250 struct ath_desc *ds = bf->bf_desc; 1251 struct ath_hal *ah = sc->sc_ah; 1252 1253 ath_hal_setuptxdesc(ah, ds 1254 , bf->bf_state.bfs_pktlen /* packet length */ 1255 , bf->bf_state.bfs_hdrlen /* header length */ 1256 , bf->bf_state.bfs_atype /* Atheros packet type */ 1257 , bf->bf_state.bfs_txpower /* txpower */ 1258 , bf->bf_state.bfs_txrate0 1259 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 1260 , bf->bf_state.bfs_keyix /* key cache index */ 1261 , bf->bf_state.bfs_txantenna /* antenna mode */ 1262 , bf->bf_state.bfs_txflags /* flags */ 1263 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 1264 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 1265 ); 1266 1267 /* 1268 * This will be overriden when the descriptor chain is written. 1269 */ 1270 bf->bf_lastds = ds; 1271 bf->bf_last = bf; 1272 1273 /* Set rate control and descriptor chain for this frame */ 1274 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 1275 ath_tx_chaindesclist(sc, bf); 1276 } 1277 1278 /* 1279 * Do a rate lookup. 1280 * 1281 * This performs a rate lookup for the given ath_buf only if it's required. 1282 * Non-data frames and raw frames don't require it. 1283 * 1284 * This populates the primary and MRR entries; MRR values are 1285 * then disabled later on if something requires it (eg RTS/CTS on 1286 * pre-11n chipsets. 1287 * 1288 * This needs to be done before the RTS/CTS fields are calculated 1289 * as they may depend upon the rate chosen. 1290 */ 1291 static void 1292 ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf) 1293 { 1294 uint8_t rate, rix; 1295 int try0; 1296 1297 if (! bf->bf_state.bfs_doratelookup) 1298 return; 1299 1300 /* Get rid of any previous state */ 1301 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1302 1303 ATH_NODE_LOCK(ATH_NODE(bf->bf_node)); 1304 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream, 1305 bf->bf_state.bfs_pktlen, &rix, &try0, &rate); 1306 1307 /* In case MRR is disabled, make sure rc[0] is setup correctly */ 1308 bf->bf_state.bfs_rc[0].rix = rix; 1309 bf->bf_state.bfs_rc[0].ratecode = rate; 1310 bf->bf_state.bfs_rc[0].tries = try0; 1311 1312 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY) 1313 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix, 1314 bf->bf_state.bfs_rc); 1315 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node)); 1316 1317 sc->sc_txrix = rix; /* for LED blinking */ 1318 sc->sc_lastdatarix = rix; /* for fast frames */ 1319 bf->bf_state.bfs_try0 = try0; 1320 bf->bf_state.bfs_txrate0 = rate; 1321 } 1322 1323 /* 1324 * Update the CLRDMASK bit in the ath_buf if it needs to be set. 1325 */ 1326 static void 1327 ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid, 1328 struct ath_buf *bf) 1329 { 1330 1331 ATH_TID_LOCK_ASSERT(sc, tid); 1332 1333 if (tid->clrdmask == 1) { 1334 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1335 tid->clrdmask = 0; 1336 } 1337 } 1338 1339 /* 1340 * Transmit the given frame to the hardware. 1341 * 1342 * The frame must already be setup; rate control must already have 1343 * been done. 1344 * 1345 * XXX since the TXQ lock is being held here (and I dislike holding 1346 * it for this long when not doing software aggregation), later on 1347 * break this function into "setup_normal" and "xmit_normal". The 1348 * lock only needs to be held for the ath_tx_handoff call. 1349 */ 1350 static void 1351 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq, 1352 struct ath_buf *bf) 1353 { 1354 struct ath_node *an = ATH_NODE(bf->bf_node); 1355 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 1356 1357 ATH_TXQ_LOCK_ASSERT(txq); 1358 1359 /* 1360 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does 1361 * set a completion handler however it doesn't (yet) properly 1362 * handle the strict ordering requirements needed for normal, 1363 * non-aggregate session frames. 1364 * 1365 * Once this is implemented, only set CLRDMASK like this for 1366 * frames that must go out - eg management/raw frames. 1367 */ 1368 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1369 1370 /* Setup the descriptor before handoff */ 1371 ath_tx_do_ratelookup(sc, bf); 1372 ath_tx_calc_duration(sc, bf); 1373 ath_tx_calc_protection(sc, bf); 1374 ath_tx_set_rtscts(sc, bf); 1375 ath_tx_rate_fill_rcflags(sc, bf); 1376 ath_tx_setds(sc, bf); 1377 1378 /* Track per-TID hardware queue depth correctly */ 1379 tid->hwq_depth++; 1380 1381 /* Assign the completion handler */ 1382 bf->bf_comp = ath_tx_normal_comp; 1383 1384 /* Hand off to hardware */ 1385 ath_tx_handoff(sc, txq, bf); 1386 } 1387 1388 /* 1389 * Do the basic frame setup stuff that's required before the frame 1390 * is added to a software queue. 1391 * 1392 * All frames get mostly the same treatment and it's done once. 1393 * Retransmits fiddle with things like the rate control setup, 1394 * setting the retransmit bit in the packet; doing relevant DMA/bus 1395 * syncing and relinking it (back) into the hardware TX queue. 1396 * 1397 * Note that this may cause the mbuf to be reallocated, so 1398 * m0 may not be valid. 1399 */ 1400 static int 1401 ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni, 1402 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq) 1403 { 1404 struct ieee80211vap *vap = ni->ni_vap; 1405 struct ath_hal *ah = sc->sc_ah; 1406 struct ifnet *ifp = sc->sc_ifp; 1407 struct ieee80211com *ic = ifp->if_l2com; 1408 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; 1409 int error, iswep, ismcast, isfrag, ismrr; 1410 int keyix, hdrlen, pktlen, try0 = 0; 1411 u_int8_t rix = 0, txrate = 0; 1412 struct ath_desc *ds; 1413 struct ieee80211_frame *wh; 1414 u_int subtype, flags; 1415 HAL_PKT_TYPE atype; 1416 const HAL_RATE_TABLE *rt; 1417 HAL_BOOL shortPreamble; 1418 struct ath_node *an; 1419 u_int pri; 1420 1421 /* 1422 * To ensure that both sequence numbers and the CCMP PN handling 1423 * is "correct", make sure that the relevant TID queue is locked. 1424 * Otherwise the CCMP PN and seqno may appear out of order, causing 1425 * re-ordered frames to have out of order CCMP PN's, resulting 1426 * in many, many frame drops. 1427 */ 1428 ATH_TXQ_LOCK_ASSERT(txq); 1429 1430 wh = mtod(m0, struct ieee80211_frame *); 1431 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; 1432 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1433 isfrag = m0->m_flags & M_FRAG; 1434 hdrlen = ieee80211_anyhdrsize(wh); 1435 /* 1436 * Packet length must not include any 1437 * pad bytes; deduct them here. 1438 */ 1439 pktlen = m0->m_pkthdr.len - (hdrlen & 3); 1440 1441 /* Handle encryption twiddling if needed */ 1442 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, 1443 &pktlen, &keyix)) { 1444 ath_freetx(m0); 1445 return EIO; 1446 } 1447 1448 /* packet header may have moved, reset our local pointer */ 1449 wh = mtod(m0, struct ieee80211_frame *); 1450 1451 pktlen += IEEE80211_CRC_LEN; 1452 1453 /* 1454 * Load the DMA map so any coalescing is done. This 1455 * also calculates the number of descriptors we need. 1456 */ 1457 error = ath_tx_dmasetup(sc, bf, m0); 1458 if (error != 0) 1459 return error; 1460 bf->bf_node = ni; /* NB: held reference */ 1461 m0 = bf->bf_m; /* NB: may have changed */ 1462 wh = mtod(m0, struct ieee80211_frame *); 1463 1464 /* setup descriptors */ 1465 ds = bf->bf_desc; 1466 rt = sc->sc_currates; 1467 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1468 1469 /* 1470 * NB: the 802.11 layer marks whether or not we should 1471 * use short preamble based on the current mode and 1472 * negotiated parameters. 1473 */ 1474 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 1475 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 1476 shortPreamble = AH_TRUE; 1477 sc->sc_stats.ast_tx_shortpre++; 1478 } else { 1479 shortPreamble = AH_FALSE; 1480 } 1481 1482 an = ATH_NODE(ni); 1483 //flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 1484 flags = 0; 1485 ismrr = 0; /* default no multi-rate retry*/ 1486 pri = M_WME_GETAC(m0); /* honor classification */ 1487 /* XXX use txparams instead of fixed values */ 1488 /* 1489 * Calculate Atheros packet type from IEEE80211 packet header, 1490 * setup for rate calculations, and select h/w transmit queue. 1491 */ 1492 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 1493 case IEEE80211_FC0_TYPE_MGT: 1494 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1495 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 1496 atype = HAL_PKT_TYPE_BEACON; 1497 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 1498 atype = HAL_PKT_TYPE_PROBE_RESP; 1499 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 1500 atype = HAL_PKT_TYPE_ATIM; 1501 else 1502 atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 1503 rix = an->an_mgmtrix; 1504 txrate = rt->info[rix].rateCode; 1505 if (shortPreamble) 1506 txrate |= rt->info[rix].shortPreamble; 1507 try0 = ATH_TXMGTTRY; 1508 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1509 break; 1510 case IEEE80211_FC0_TYPE_CTL: 1511 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 1512 rix = an->an_mgmtrix; 1513 txrate = rt->info[rix].rateCode; 1514 if (shortPreamble) 1515 txrate |= rt->info[rix].shortPreamble; 1516 try0 = ATH_TXMGTTRY; 1517 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1518 break; 1519 case IEEE80211_FC0_TYPE_DATA: 1520 atype = HAL_PKT_TYPE_NORMAL; /* default */ 1521 /* 1522 * Data frames: multicast frames go out at a fixed rate, 1523 * EAPOL frames use the mgmt frame rate; otherwise consult 1524 * the rate control module for the rate to use. 1525 */ 1526 if (ismcast) { 1527 rix = an->an_mcastrix; 1528 txrate = rt->info[rix].rateCode; 1529 if (shortPreamble) 1530 txrate |= rt->info[rix].shortPreamble; 1531 try0 = 1; 1532 } else if (m0->m_flags & M_EAPOL) { 1533 /* XXX? maybe always use long preamble? */ 1534 rix = an->an_mgmtrix; 1535 txrate = rt->info[rix].rateCode; 1536 if (shortPreamble) 1537 txrate |= rt->info[rix].shortPreamble; 1538 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 1539 } else { 1540 /* 1541 * Do rate lookup on each TX, rather than using 1542 * the hard-coded TX information decided here. 1543 */ 1544 ismrr = 1; 1545 bf->bf_state.bfs_doratelookup = 1; 1546 } 1547 if (cap->cap_wmeParams[pri].wmep_noackPolicy) 1548 flags |= HAL_TXDESC_NOACK; 1549 break; 1550 default: 1551 if_printf(ifp, "bogus frame type 0x%x (%s)\n", 1552 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 1553 /* XXX statistic */ 1554 ath_freetx(m0); 1555 return EIO; 1556 } 1557 1558 /* 1559 * There are two known scenarios where the frame AC doesn't match 1560 * what the destination TXQ is. 1561 * 1562 * + non-QoS frames (eg management?) that the net80211 stack has 1563 * assigned a higher AC to, but since it's a non-QoS TID, it's 1564 * being thrown into TID 16. TID 16 gets the AC_BE queue. 1565 * It's quite possible that management frames should just be 1566 * direct dispatched to hardware rather than go via the software 1567 * queue; that should be investigated in the future. There are 1568 * some specific scenarios where this doesn't make sense, mostly 1569 * surrounding ADDBA request/response - hence why that is special 1570 * cased. 1571 * 1572 * + Multicast frames going into the VAP mcast queue. That shows up 1573 * as "TXQ 11". 1574 * 1575 * This driver should eventually support separate TID and TXQ locking, 1576 * allowing for arbitrary AC frames to appear on arbitrary software 1577 * queues, being queued to the "correct" hardware queue when needed. 1578 */ 1579 #if 0 1580 if (txq != sc->sc_ac2q[pri]) { 1581 device_printf(sc->sc_dev, 1582 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n", 1583 __func__, 1584 txq, 1585 txq->axq_qnum, 1586 pri, 1587 sc->sc_ac2q[pri], 1588 sc->sc_ac2q[pri]->axq_qnum); 1589 } 1590 #endif 1591 1592 /* 1593 * Calculate miscellaneous flags. 1594 */ 1595 if (ismcast) { 1596 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 1597 } else if (pktlen > vap->iv_rtsthreshold && 1598 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { 1599 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 1600 sc->sc_stats.ast_tx_rts++; 1601 } 1602 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ 1603 sc->sc_stats.ast_tx_noack++; 1604 #ifdef IEEE80211_SUPPORT_TDMA 1605 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) { 1606 DPRINTF(sc, ATH_DEBUG_TDMA, 1607 "%s: discard frame, ACK required w/ TDMA\n", __func__); 1608 sc->sc_stats.ast_tdma_ack++; 1609 ath_freetx(m0); 1610 return EIO; 1611 } 1612 #endif 1613 1614 /* 1615 * Determine if a tx interrupt should be generated for 1616 * this descriptor. We take a tx interrupt to reap 1617 * descriptors when the h/w hits an EOL condition or 1618 * when the descriptor is specifically marked to generate 1619 * an interrupt. We periodically mark descriptors in this 1620 * way to insure timely replenishing of the supply needed 1621 * for sending frames. Defering interrupts reduces system 1622 * load and potentially allows more concurrent work to be 1623 * done but if done to aggressively can cause senders to 1624 * backup. 1625 * 1626 * NB: use >= to deal with sc_txintrperiod changing 1627 * dynamically through sysctl. 1628 */ 1629 if (flags & HAL_TXDESC_INTREQ) { 1630 txq->axq_intrcnt = 0; 1631 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 1632 flags |= HAL_TXDESC_INTREQ; 1633 txq->axq_intrcnt = 0; 1634 } 1635 1636 /* This point forward is actual TX bits */ 1637 1638 /* 1639 * At this point we are committed to sending the frame 1640 * and we don't need to look at m_nextpkt; clear it in 1641 * case this frame is part of frag chain. 1642 */ 1643 m0->m_nextpkt = NULL; 1644 1645 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 1646 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 1647 sc->sc_hwmap[rix].ieeerate, -1); 1648 1649 if (ieee80211_radiotap_active_vap(vap)) { 1650 u_int64_t tsf = ath_hal_gettsf64(ah); 1651 1652 sc->sc_tx_th.wt_tsf = htole64(tsf); 1653 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 1654 if (iswep) 1655 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 1656 if (isfrag) 1657 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 1658 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 1659 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 1660 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 1661 1662 ieee80211_radiotap_tx(vap, m0); 1663 } 1664 1665 /* Blank the legacy rate array */ 1666 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1667 1668 /* 1669 * ath_buf_set_rate needs at least one rate/try to setup 1670 * the rate scenario. 1671 */ 1672 bf->bf_state.bfs_rc[0].rix = rix; 1673 bf->bf_state.bfs_rc[0].tries = try0; 1674 bf->bf_state.bfs_rc[0].ratecode = txrate; 1675 1676 /* Store the decided rate index values away */ 1677 bf->bf_state.bfs_pktlen = pktlen; 1678 bf->bf_state.bfs_hdrlen = hdrlen; 1679 bf->bf_state.bfs_atype = atype; 1680 bf->bf_state.bfs_txpower = ni->ni_txpower; 1681 bf->bf_state.bfs_txrate0 = txrate; 1682 bf->bf_state.bfs_try0 = try0; 1683 bf->bf_state.bfs_keyix = keyix; 1684 bf->bf_state.bfs_txantenna = sc->sc_txantenna; 1685 bf->bf_state.bfs_txflags = flags; 1686 bf->bf_state.bfs_shpream = shortPreamble; 1687 1688 /* XXX this should be done in ath_tx_setrate() */ 1689 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */ 1690 bf->bf_state.bfs_ctsrate = 0; /* calculated later */ 1691 bf->bf_state.bfs_ctsduration = 0; 1692 bf->bf_state.bfs_ismrr = ismrr; 1693 1694 return 0; 1695 } 1696 1697 /* 1698 * Queue a frame to the hardware or software queue. 1699 * 1700 * This can be called by the net80211 code. 1701 * 1702 * XXX what about locking? Or, push the seqno assign into the 1703 * XXX aggregate scheduler so its serialised? 1704 * 1705 * XXX When sending management frames via ath_raw_xmit(), 1706 * should CLRDMASK be set unconditionally? 1707 */ 1708 int 1709 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, 1710 struct ath_buf *bf, struct mbuf *m0) 1711 { 1712 struct ieee80211vap *vap = ni->ni_vap; 1713 struct ath_vap *avp = ATH_VAP(vap); 1714 int r = 0; 1715 u_int pri; 1716 int tid; 1717 struct ath_txq *txq; 1718 int ismcast; 1719 const struct ieee80211_frame *wh; 1720 int is_ampdu, is_ampdu_tx, is_ampdu_pending; 1721 ieee80211_seq seqno; 1722 uint8_t type, subtype; 1723 1724 /* 1725 * Determine the target hardware queue. 1726 * 1727 * For multicast frames, the txq gets overridden appropriately 1728 * depending upon the state of PS. 1729 * 1730 * For any other frame, we do a TID/QoS lookup inside the frame 1731 * to see what the TID should be. If it's a non-QoS frame, the 1732 * AC and TID are overridden. The TID/TXQ code assumes the 1733 * TID is on a predictable hardware TXQ, so we don't support 1734 * having a node TID queued to multiple hardware TXQs. 1735 * This may change in the future but would require some locking 1736 * fudgery. 1737 */ 1738 pri = ath_tx_getac(sc, m0); 1739 tid = ath_tx_gettid(sc, m0); 1740 1741 txq = sc->sc_ac2q[pri]; 1742 wh = mtod(m0, struct ieee80211_frame *); 1743 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1744 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1745 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1746 1747 /* 1748 * Enforce how deep the multicast queue can grow. 1749 * 1750 * XXX duplicated in ath_raw_xmit(). 1751 */ 1752 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1753 ATH_TXQ_LOCK(sc->sc_cabq); 1754 1755 if (sc->sc_cabq->axq_depth > sc->sc_txq_mcastq_maxdepth) { 1756 sc->sc_stats.ast_tx_mcastq_overflow++; 1757 r = ENOBUFS; 1758 } 1759 1760 ATH_TXQ_UNLOCK(sc->sc_cabq); 1761 1762 if (r != 0) { 1763 m_freem(m0); 1764 return r; 1765 } 1766 } 1767 1768 /* A-MPDU TX */ 1769 is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid); 1770 is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid); 1771 is_ampdu = is_ampdu_tx | is_ampdu_pending; 1772 1773 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n", 1774 __func__, tid, pri, is_ampdu); 1775 1776 /* Set local packet state, used to queue packets to hardware */ 1777 bf->bf_state.bfs_tid = tid; 1778 bf->bf_state.bfs_txq = txq; 1779 bf->bf_state.bfs_pri = pri; 1780 1781 /* 1782 * When servicing one or more stations in power-save mode 1783 * (or) if there is some mcast data waiting on the mcast 1784 * queue (to prevent out of order delivery) multicast frames 1785 * must be bufferd until after the beacon. 1786 * 1787 * TODO: we should lock the mcastq before we check the length. 1788 */ 1789 if (ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { 1790 txq = &avp->av_mcastq; 1791 /* 1792 * Mark the frame as eventually belonging on the CAB 1793 * queue, so the descriptor setup functions will 1794 * correctly initialise the descriptor 'qcuId' field. 1795 */ 1796 bf->bf_state.bfs_txq = sc->sc_cabq; 1797 } 1798 1799 /* Do the generic frame setup */ 1800 /* XXX should just bzero the bf_state? */ 1801 bf->bf_state.bfs_dobaw = 0; 1802 1803 /* 1804 * Acquire the TXQ lock early, so both the encap and seqno 1805 * are allocated together. 1806 * 1807 * XXX should TXQ for CABQ traffic be the multicast queue, 1808 * or the TXQ the given PRI would allocate from? (eg for 1809 * sequence number allocation locking.) 1810 */ 1811 ATH_TXQ_LOCK(txq); 1812 1813 /* A-MPDU TX? Manually set sequence number */ 1814 /* 1815 * Don't do it whilst pending; the net80211 layer still 1816 * assigns them. 1817 */ 1818 if (is_ampdu_tx) { 1819 /* 1820 * Always call; this function will 1821 * handle making sure that null data frames 1822 * don't get a sequence number from the current 1823 * TID and thus mess with the BAW. 1824 */ 1825 seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0); 1826 1827 /* 1828 * Don't add QoS NULL frames to the BAW. 1829 */ 1830 if (IEEE80211_QOS_HAS_SEQ(wh) && 1831 subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) { 1832 bf->bf_state.bfs_dobaw = 1; 1833 } 1834 } 1835 1836 /* 1837 * If needed, the sequence number has been assigned. 1838 * Squirrel it away somewhere easy to get to. 1839 */ 1840 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT; 1841 1842 /* Is ampdu pending? fetch the seqno and print it out */ 1843 if (is_ampdu_pending) 1844 DPRINTF(sc, ATH_DEBUG_SW_TX, 1845 "%s: tid %d: ampdu pending, seqno %d\n", 1846 __func__, tid, M_SEQNO_GET(m0)); 1847 1848 /* This also sets up the DMA map */ 1849 r = ath_tx_normal_setup(sc, ni, bf, m0, txq); 1850 1851 if (r != 0) 1852 goto done; 1853 1854 /* At this point m0 could have changed! */ 1855 m0 = bf->bf_m; 1856 1857 #if 1 1858 /* 1859 * If it's a multicast frame, do a direct-dispatch to the 1860 * destination hardware queue. Don't bother software 1861 * queuing it. 1862 */ 1863 /* 1864 * If it's a BAR frame, do a direct dispatch to the 1865 * destination hardware queue. Don't bother software 1866 * queuing it, as the TID will now be paused. 1867 * Sending a BAR frame can occur from the net80211 txa timer 1868 * (ie, retries) or from the ath txtask (completion call.) 1869 * It queues directly to hardware because the TID is paused 1870 * at this point (and won't be unpaused until the BAR has 1871 * either been TXed successfully or max retries has been 1872 * reached.) 1873 */ 1874 if (txq == &avp->av_mcastq) { 1875 DPRINTF(sc, ATH_DEBUG_SW_TX, 1876 "%s: bf=%p: mcastq: TX'ing\n", __func__, bf); 1877 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1878 ath_tx_xmit_normal(sc, txq, bf); 1879 } else if (type == IEEE80211_FC0_TYPE_CTL && 1880 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1881 DPRINTF(sc, ATH_DEBUG_SW_TX, 1882 "%s: BAR: TX'ing direct\n", __func__); 1883 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1884 ath_tx_xmit_normal(sc, txq, bf); 1885 } else { 1886 /* add to software queue */ 1887 DPRINTF(sc, ATH_DEBUG_SW_TX, 1888 "%s: bf=%p: swq: TX'ing\n", __func__, bf); 1889 ath_tx_swq(sc, ni, txq, bf); 1890 } 1891 #else 1892 /* 1893 * For now, since there's no software queue, 1894 * direct-dispatch to the hardware. 1895 */ 1896 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1897 ath_tx_xmit_normal(sc, txq, bf); 1898 #endif 1899 done: 1900 ATH_TXQ_UNLOCK(txq); 1901 1902 return 0; 1903 } 1904 1905 static int 1906 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, 1907 struct ath_buf *bf, struct mbuf *m0, 1908 const struct ieee80211_bpf_params *params) 1909 { 1910 struct ifnet *ifp = sc->sc_ifp; 1911 struct ieee80211com *ic = ifp->if_l2com; 1912 struct ath_hal *ah = sc->sc_ah; 1913 struct ieee80211vap *vap = ni->ni_vap; 1914 int error, ismcast, ismrr; 1915 int keyix, hdrlen, pktlen, try0, txantenna; 1916 u_int8_t rix, txrate; 1917 struct ieee80211_frame *wh; 1918 u_int flags; 1919 HAL_PKT_TYPE atype; 1920 const HAL_RATE_TABLE *rt; 1921 struct ath_desc *ds; 1922 u_int pri; 1923 int o_tid = -1; 1924 int do_override; 1925 1926 wh = mtod(m0, struct ieee80211_frame *); 1927 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1928 hdrlen = ieee80211_anyhdrsize(wh); 1929 /* 1930 * Packet length must not include any 1931 * pad bytes; deduct them here. 1932 */ 1933 /* XXX honor IEEE80211_BPF_DATAPAD */ 1934 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; 1935 1936 ATH_KTR(sc, ATH_KTR_TX, 2, 1937 "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf); 1938 1939 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n", 1940 __func__, ismcast); 1941 1942 pri = params->ibp_pri & 3; 1943 /* Override pri if the frame isn't a QoS one */ 1944 if (! IEEE80211_QOS_HAS_SEQ(wh)) 1945 pri = ath_tx_getac(sc, m0); 1946 1947 /* XXX If it's an ADDBA, override the correct queue */ 1948 do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid); 1949 1950 /* Map ADDBA to the correct priority */ 1951 if (do_override) { 1952 #if 0 1953 device_printf(sc->sc_dev, 1954 "%s: overriding tid %d pri %d -> %d\n", 1955 __func__, o_tid, pri, TID_TO_WME_AC(o_tid)); 1956 #endif 1957 pri = TID_TO_WME_AC(o_tid); 1958 } 1959 1960 ATH_TXQ_LOCK(sc->sc_ac2q[pri]); 1961 1962 /* Handle encryption twiddling if needed */ 1963 if (! ath_tx_tag_crypto(sc, ni, 1964 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, 1965 &hdrlen, &pktlen, &keyix)) { 1966 ath_freetx(m0); 1967 return EIO; 1968 } 1969 /* packet header may have moved, reset our local pointer */ 1970 wh = mtod(m0, struct ieee80211_frame *); 1971 1972 /* Do the generic frame setup */ 1973 /* XXX should just bzero the bf_state? */ 1974 bf->bf_state.bfs_dobaw = 0; 1975 1976 error = ath_tx_dmasetup(sc, bf, m0); 1977 if (error != 0) 1978 return error; 1979 m0 = bf->bf_m; /* NB: may have changed */ 1980 wh = mtod(m0, struct ieee80211_frame *); 1981 bf->bf_node = ni; /* NB: held reference */ 1982 1983 /* Always enable CLRDMASK for raw frames for now.. */ 1984 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 1985 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1986 if (params->ibp_flags & IEEE80211_BPF_RTS) 1987 flags |= HAL_TXDESC_RTSENA; 1988 else if (params->ibp_flags & IEEE80211_BPF_CTS) { 1989 /* XXX assume 11g/11n protection? */ 1990 bf->bf_state.bfs_doprot = 1; 1991 flags |= HAL_TXDESC_CTSENA; 1992 } 1993 /* XXX leave ismcast to injector? */ 1994 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) 1995 flags |= HAL_TXDESC_NOACK; 1996 1997 rt = sc->sc_currates; 1998 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1999 rix = ath_tx_findrix(sc, params->ibp_rate0); 2000 txrate = rt->info[rix].rateCode; 2001 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 2002 txrate |= rt->info[rix].shortPreamble; 2003 sc->sc_txrix = rix; 2004 try0 = params->ibp_try0; 2005 ismrr = (params->ibp_try1 != 0); 2006 txantenna = params->ibp_pri >> 2; 2007 if (txantenna == 0) /* XXX? */ 2008 txantenna = sc->sc_txantenna; 2009 2010 /* 2011 * Since ctsrate is fixed, store it away for later 2012 * use when the descriptor fields are being set. 2013 */ 2014 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) 2015 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate; 2016 2017 /* 2018 * NB: we mark all packets as type PSPOLL so the h/w won't 2019 * set the sequence number, duration, etc. 2020 */ 2021 atype = HAL_PKT_TYPE_PSPOLL; 2022 2023 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 2024 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, 2025 sc->sc_hwmap[rix].ieeerate, -1); 2026 2027 if (ieee80211_radiotap_active_vap(vap)) { 2028 u_int64_t tsf = ath_hal_gettsf64(ah); 2029 2030 sc->sc_tx_th.wt_tsf = htole64(tsf); 2031 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 2032 if (wh->i_fc[1] & IEEE80211_FC1_WEP) 2033 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2034 if (m0->m_flags & M_FRAG) 2035 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 2036 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 2037 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 2038 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 2039 2040 ieee80211_radiotap_tx(vap, m0); 2041 } 2042 2043 /* 2044 * Formulate first tx descriptor with tx controls. 2045 */ 2046 ds = bf->bf_desc; 2047 /* XXX check return value? */ 2048 2049 /* Store the decided rate index values away */ 2050 bf->bf_state.bfs_pktlen = pktlen; 2051 bf->bf_state.bfs_hdrlen = hdrlen; 2052 bf->bf_state.bfs_atype = atype; 2053 bf->bf_state.bfs_txpower = params->ibp_power; 2054 bf->bf_state.bfs_txrate0 = txrate; 2055 bf->bf_state.bfs_try0 = try0; 2056 bf->bf_state.bfs_keyix = keyix; 2057 bf->bf_state.bfs_txantenna = txantenna; 2058 bf->bf_state.bfs_txflags = flags; 2059 bf->bf_state.bfs_shpream = 2060 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE); 2061 2062 /* Set local packet state, used to queue packets to hardware */ 2063 bf->bf_state.bfs_tid = WME_AC_TO_TID(pri); 2064 bf->bf_state.bfs_txq = sc->sc_ac2q[pri]; 2065 bf->bf_state.bfs_pri = pri; 2066 2067 /* XXX this should be done in ath_tx_setrate() */ 2068 bf->bf_state.bfs_ctsrate = 0; 2069 bf->bf_state.bfs_ctsduration = 0; 2070 bf->bf_state.bfs_ismrr = ismrr; 2071 2072 /* Blank the legacy rate array */ 2073 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 2074 2075 bf->bf_state.bfs_rc[0].rix = 2076 ath_tx_findrix(sc, params->ibp_rate0); 2077 bf->bf_state.bfs_rc[0].tries = try0; 2078 bf->bf_state.bfs_rc[0].ratecode = txrate; 2079 2080 if (ismrr) { 2081 int rix; 2082 2083 rix = ath_tx_findrix(sc, params->ibp_rate1); 2084 bf->bf_state.bfs_rc[1].rix = rix; 2085 bf->bf_state.bfs_rc[1].tries = params->ibp_try1; 2086 2087 rix = ath_tx_findrix(sc, params->ibp_rate2); 2088 bf->bf_state.bfs_rc[2].rix = rix; 2089 bf->bf_state.bfs_rc[2].tries = params->ibp_try2; 2090 2091 rix = ath_tx_findrix(sc, params->ibp_rate3); 2092 bf->bf_state.bfs_rc[3].rix = rix; 2093 bf->bf_state.bfs_rc[3].tries = params->ibp_try3; 2094 } 2095 /* 2096 * All the required rate control decisions have been made; 2097 * fill in the rc flags. 2098 */ 2099 ath_tx_rate_fill_rcflags(sc, bf); 2100 2101 /* NB: no buffered multicast in power save support */ 2102 2103 /* 2104 * If we're overiding the ADDBA destination, dump directly 2105 * into the hardware queue, right after any pending 2106 * frames to that node are. 2107 */ 2108 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n", 2109 __func__, do_override); 2110 2111 #if 1 2112 if (do_override) { 2113 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2114 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2115 } else { 2116 /* Queue to software queue */ 2117 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], bf); 2118 } 2119 #else 2120 /* Direct-dispatch to the hardware */ 2121 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2122 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2123 #endif 2124 ATH_TXQ_UNLOCK(sc->sc_ac2q[pri]); 2125 2126 return 0; 2127 } 2128 2129 /* 2130 * Send a raw frame. 2131 * 2132 * This can be called by net80211. 2133 */ 2134 int 2135 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 2136 const struct ieee80211_bpf_params *params) 2137 { 2138 struct ieee80211com *ic = ni->ni_ic; 2139 struct ifnet *ifp = ic->ic_ifp; 2140 struct ath_softc *sc = ifp->if_softc; 2141 struct ath_buf *bf; 2142 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 2143 int error = 0; 2144 2145 ATH_PCU_LOCK(sc); 2146 if (sc->sc_inreset_cnt > 0) { 2147 device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; bailing\n", 2148 __func__); 2149 error = EIO; 2150 ATH_PCU_UNLOCK(sc); 2151 goto bad0; 2152 } 2153 sc->sc_txstart_cnt++; 2154 ATH_PCU_UNLOCK(sc); 2155 2156 ATH_TX_LOCK(sc); 2157 2158 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) { 2159 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__, 2160 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ? 2161 "!running" : "invalid"); 2162 m_freem(m); 2163 error = ENETDOWN; 2164 goto bad; 2165 } 2166 2167 /* 2168 * Enforce how deep the multicast queue can grow. 2169 * 2170 * XXX duplicated in ath_tx_start(). 2171 */ 2172 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2173 ATH_TXQ_LOCK(sc->sc_cabq); 2174 2175 if (sc->sc_cabq->axq_depth > sc->sc_txq_mcastq_maxdepth) { 2176 sc->sc_stats.ast_tx_mcastq_overflow++; 2177 error = ENOBUFS; 2178 } 2179 2180 ATH_TXQ_UNLOCK(sc->sc_cabq); 2181 2182 if (error != 0) { 2183 m_freem(m); 2184 goto bad; 2185 } 2186 } 2187 2188 /* 2189 * Grab a TX buffer and associated resources. 2190 */ 2191 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); 2192 if (bf == NULL) { 2193 sc->sc_stats.ast_tx_nobuf++; 2194 m_freem(m); 2195 error = ENOBUFS; 2196 goto bad; 2197 } 2198 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n", 2199 m, params, bf); 2200 2201 if (params == NULL) { 2202 /* 2203 * Legacy path; interpret frame contents to decide 2204 * precisely how to send the frame. 2205 */ 2206 if (ath_tx_start(sc, ni, bf, m)) { 2207 error = EIO; /* XXX */ 2208 goto bad2; 2209 } 2210 } else { 2211 /* 2212 * Caller supplied explicit parameters to use in 2213 * sending the frame. 2214 */ 2215 if (ath_tx_raw_start(sc, ni, bf, m, params)) { 2216 error = EIO; /* XXX */ 2217 goto bad2; 2218 } 2219 } 2220 sc->sc_wd_timer = 5; 2221 ifp->if_opackets++; 2222 sc->sc_stats.ast_tx_raw++; 2223 2224 /* 2225 * Update the TIM - if there's anything queued to the 2226 * software queue and power save is enabled, we should 2227 * set the TIM. 2228 */ 2229 ath_tx_update_tim(sc, ni, 1); 2230 2231 ATH_PCU_LOCK(sc); 2232 sc->sc_txstart_cnt--; 2233 ATH_PCU_UNLOCK(sc); 2234 2235 ATH_TX_UNLOCK(sc); 2236 2237 return 0; 2238 bad2: 2239 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, " 2240 "bf=%p", 2241 m, 2242 params, 2243 bf); 2244 ATH_TXBUF_LOCK(sc); 2245 ath_returnbuf_head(sc, bf); 2246 ATH_TXBUF_UNLOCK(sc); 2247 bad: 2248 2249 ATH_TX_UNLOCK(sc); 2250 2251 ATH_PCU_LOCK(sc); 2252 sc->sc_txstart_cnt--; 2253 ATH_PCU_UNLOCK(sc); 2254 bad0: 2255 ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p", 2256 m, params); 2257 ifp->if_oerrors++; 2258 sc->sc_stats.ast_tx_raw_fail++; 2259 ieee80211_free_node(ni); 2260 2261 return error; 2262 } 2263 2264 /* Some helper functions */ 2265 2266 /* 2267 * ADDBA (and potentially others) need to be placed in the same 2268 * hardware queue as the TID/node it's relating to. This is so 2269 * it goes out after any pending non-aggregate frames to the 2270 * same node/TID. 2271 * 2272 * If this isn't done, the ADDBA can go out before the frames 2273 * queued in hardware. Even though these frames have a sequence 2274 * number -earlier- than the ADDBA can be transmitted (but 2275 * no frames whose sequence numbers are after the ADDBA should 2276 * be!) they'll arrive after the ADDBA - and the receiving end 2277 * will simply drop them as being out of the BAW. 2278 * 2279 * The frames can't be appended to the TID software queue - it'll 2280 * never be sent out. So these frames have to be directly 2281 * dispatched to the hardware, rather than queued in software. 2282 * So if this function returns true, the TXQ has to be 2283 * overridden and it has to be directly dispatched. 2284 * 2285 * It's a dirty hack, but someone's gotta do it. 2286 */ 2287 2288 /* 2289 * XXX doesn't belong here! 2290 */ 2291 static int 2292 ieee80211_is_action(struct ieee80211_frame *wh) 2293 { 2294 /* Type: Management frame? */ 2295 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 2296 IEEE80211_FC0_TYPE_MGT) 2297 return 0; 2298 2299 /* Subtype: Action frame? */ 2300 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) != 2301 IEEE80211_FC0_SUBTYPE_ACTION) 2302 return 0; 2303 2304 return 1; 2305 } 2306 2307 #define MS(_v, _f) (((_v) & _f) >> _f##_S) 2308 /* 2309 * Return an alternate TID for ADDBA request frames. 2310 * 2311 * Yes, this likely should be done in the net80211 layer. 2312 */ 2313 static int 2314 ath_tx_action_frame_override_queue(struct ath_softc *sc, 2315 struct ieee80211_node *ni, 2316 struct mbuf *m0, int *tid) 2317 { 2318 struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *); 2319 struct ieee80211_action_ba_addbarequest *ia; 2320 uint8_t *frm; 2321 uint16_t baparamset; 2322 2323 /* Not action frame? Bail */ 2324 if (! ieee80211_is_action(wh)) 2325 return 0; 2326 2327 /* XXX Not needed for frames we send? */ 2328 #if 0 2329 /* Correct length? */ 2330 if (! ieee80211_parse_action(ni, m)) 2331 return 0; 2332 #endif 2333 2334 /* Extract out action frame */ 2335 frm = (u_int8_t *)&wh[1]; 2336 ia = (struct ieee80211_action_ba_addbarequest *) frm; 2337 2338 /* Not ADDBA? Bail */ 2339 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA) 2340 return 0; 2341 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST) 2342 return 0; 2343 2344 /* Extract TID, return it */ 2345 baparamset = le16toh(ia->rq_baparamset); 2346 *tid = (int) MS(baparamset, IEEE80211_BAPS_TID); 2347 2348 return 1; 2349 } 2350 #undef MS 2351 2352 /* Per-node software queue operations */ 2353 2354 /* 2355 * Add the current packet to the given BAW. 2356 * It is assumed that the current packet 2357 * 2358 * + fits inside the BAW; 2359 * + already has had a sequence number allocated. 2360 * 2361 * Since the BAW status may be modified by both the ath task and 2362 * the net80211/ifnet contexts, the TID must be locked. 2363 */ 2364 void 2365 ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an, 2366 struct ath_tid *tid, struct ath_buf *bf) 2367 { 2368 int index, cindex; 2369 struct ieee80211_tx_ampdu *tap; 2370 2371 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 2372 ATH_TID_LOCK_ASSERT(sc, tid); 2373 2374 if (bf->bf_state.bfs_isretried) 2375 return; 2376 2377 tap = ath_tx_get_tx_tid(an, tid->tid); 2378 2379 if (! bf->bf_state.bfs_dobaw) { 2380 device_printf(sc->sc_dev, 2381 "%s: dobaw=0, seqno=%d, window %d:%d\n", 2382 __func__, 2383 SEQNO(bf->bf_state.bfs_seqno), 2384 tap->txa_start, 2385 tap->txa_wnd); 2386 } 2387 2388 if (bf->bf_state.bfs_addedbaw) 2389 device_printf(sc->sc_dev, 2390 "%s: re-added? tid=%d, seqno %d; window %d:%d; " 2391 "baw head=%d tail=%d\n", 2392 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2393 tap->txa_start, tap->txa_wnd, tid->baw_head, 2394 tid->baw_tail); 2395 2396 /* 2397 * Verify that the given sequence number is not outside of the 2398 * BAW. Complain loudly if that's the case. 2399 */ 2400 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2401 SEQNO(bf->bf_state.bfs_seqno))) { 2402 device_printf(sc->sc_dev, 2403 "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; " 2404 "baw head=%d tail=%d\n", 2405 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2406 tap->txa_start, tap->txa_wnd, tid->baw_head, 2407 tid->baw_tail); 2408 } 2409 2410 /* 2411 * ni->ni_txseqs[] is the currently allocated seqno. 2412 * the txa state contains the current baw start. 2413 */ 2414 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno)); 2415 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2416 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2417 "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d " 2418 "baw head=%d tail=%d\n", 2419 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2420 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, 2421 tid->baw_tail); 2422 2423 2424 #if 0 2425 assert(tid->tx_buf[cindex] == NULL); 2426 #endif 2427 if (tid->tx_buf[cindex] != NULL) { 2428 device_printf(sc->sc_dev, 2429 "%s: ba packet dup (index=%d, cindex=%d, " 2430 "head=%d, tail=%d)\n", 2431 __func__, index, cindex, tid->baw_head, tid->baw_tail); 2432 device_printf(sc->sc_dev, 2433 "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n", 2434 __func__, 2435 tid->tx_buf[cindex], 2436 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno), 2437 bf, 2438 SEQNO(bf->bf_state.bfs_seqno) 2439 ); 2440 } 2441 tid->tx_buf[cindex] = bf; 2442 2443 if (index >= ((tid->baw_tail - tid->baw_head) & 2444 (ATH_TID_MAX_BUFS - 1))) { 2445 tid->baw_tail = cindex; 2446 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 2447 } 2448 } 2449 2450 /* 2451 * Flip the BAW buffer entry over from the existing one to the new one. 2452 * 2453 * When software retransmitting a (sub-)frame, it is entirely possible that 2454 * the frame ath_buf is marked as BUSY and can't be immediately reused. 2455 * In that instance the buffer is cloned and the new buffer is used for 2456 * retransmit. We thus need to update the ath_buf slot in the BAW buf 2457 * tracking array to maintain consistency. 2458 */ 2459 static void 2460 ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an, 2461 struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf) 2462 { 2463 int index, cindex; 2464 struct ieee80211_tx_ampdu *tap; 2465 int seqno = SEQNO(old_bf->bf_state.bfs_seqno); 2466 2467 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 2468 ATH_TID_LOCK_ASSERT(sc, tid); 2469 2470 tap = ath_tx_get_tx_tid(an, tid->tid); 2471 index = ATH_BA_INDEX(tap->txa_start, seqno); 2472 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2473 2474 /* 2475 * Just warn for now; if it happens then we should find out 2476 * about it. It's highly likely the aggregation session will 2477 * soon hang. 2478 */ 2479 if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) { 2480 device_printf(sc->sc_dev, "%s: retransmitted buffer" 2481 " has mismatching seqno's, BA session may hang.\n", 2482 __func__); 2483 device_printf(sc->sc_dev, "%s: old seqno=%d, new_seqno=%d\n", 2484 __func__, 2485 old_bf->bf_state.bfs_seqno, 2486 new_bf->bf_state.bfs_seqno); 2487 } 2488 2489 if (tid->tx_buf[cindex] != old_bf) { 2490 device_printf(sc->sc_dev, "%s: ath_buf pointer incorrect; " 2491 " has m BA session may hang.\n", 2492 __func__); 2493 device_printf(sc->sc_dev, "%s: old bf=%p, new bf=%p\n", 2494 __func__, 2495 old_bf, new_bf); 2496 } 2497 2498 tid->tx_buf[cindex] = new_bf; 2499 } 2500 2501 /* 2502 * seq_start - left edge of BAW 2503 * seq_next - current/next sequence number to allocate 2504 * 2505 * Since the BAW status may be modified by both the ath task and 2506 * the net80211/ifnet contexts, the TID must be locked. 2507 */ 2508 static void 2509 ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an, 2510 struct ath_tid *tid, const struct ath_buf *bf) 2511 { 2512 int index, cindex; 2513 struct ieee80211_tx_ampdu *tap; 2514 int seqno = SEQNO(bf->bf_state.bfs_seqno); 2515 2516 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 2517 ATH_TID_LOCK_ASSERT(sc, tid); 2518 2519 tap = ath_tx_get_tx_tid(an, tid->tid); 2520 index = ATH_BA_INDEX(tap->txa_start, seqno); 2521 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2522 2523 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2524 "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, " 2525 "baw head=%d, tail=%d\n", 2526 __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index, 2527 cindex, tid->baw_head, tid->baw_tail); 2528 2529 /* 2530 * If this occurs then we have a big problem - something else 2531 * has slid tap->txa_start along without updating the BAW 2532 * tracking start/end pointers. Thus the TX BAW state is now 2533 * completely busted. 2534 * 2535 * But for now, since I haven't yet fixed TDMA and buffer cloning, 2536 * it's quite possible that a cloned buffer is making its way 2537 * here and causing it to fire off. Disable TDMA for now. 2538 */ 2539 if (tid->tx_buf[cindex] != bf) { 2540 device_printf(sc->sc_dev, 2541 "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n", 2542 __func__, 2543 bf, SEQNO(bf->bf_state.bfs_seqno), 2544 tid->tx_buf[cindex], 2545 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno)); 2546 } 2547 2548 tid->tx_buf[cindex] = NULL; 2549 2550 while (tid->baw_head != tid->baw_tail && 2551 !tid->tx_buf[tid->baw_head]) { 2552 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 2553 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 2554 } 2555 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2556 "%s: baw is now %d:%d, baw head=%d\n", 2557 __func__, tap->txa_start, tap->txa_wnd, tid->baw_head); 2558 } 2559 2560 /* 2561 * Mark the current node/TID as ready to TX. 2562 * 2563 * This is done to make it easy for the software scheduler to 2564 * find which nodes have data to send. 2565 * 2566 * The TXQ lock must be held. 2567 */ 2568 static void 2569 ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid) 2570 { 2571 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2572 2573 ATH_TXQ_LOCK_ASSERT(txq); 2574 2575 if (tid->paused) 2576 return; /* paused, can't schedule yet */ 2577 2578 if (tid->sched) 2579 return; /* already scheduled */ 2580 2581 tid->sched = 1; 2582 2583 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2584 } 2585 2586 /* 2587 * Mark the current node as no longer needing to be polled for 2588 * TX packets. 2589 * 2590 * The TXQ lock must be held. 2591 */ 2592 static void 2593 ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid) 2594 { 2595 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2596 2597 ATH_TXQ_LOCK_ASSERT(txq); 2598 2599 if (tid->sched == 0) 2600 return; 2601 2602 tid->sched = 0; 2603 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem); 2604 } 2605 2606 /* 2607 * Assign a sequence number manually to the given frame. 2608 * 2609 * This should only be called for A-MPDU TX frames. 2610 */ 2611 static ieee80211_seq 2612 ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, 2613 struct ath_buf *bf, struct mbuf *m0) 2614 { 2615 struct ieee80211_frame *wh; 2616 int tid, pri; 2617 ieee80211_seq seqno; 2618 uint8_t subtype; 2619 2620 /* TID lookup */ 2621 wh = mtod(m0, struct ieee80211_frame *); 2622 pri = M_WME_GETAC(m0); /* honor classification */ 2623 tid = WME_AC_TO_TID(pri); 2624 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n", 2625 __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 2626 2627 /* XXX Is it a control frame? Ignore */ 2628 2629 /* Does the packet require a sequence number? */ 2630 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2631 return -1; 2632 2633 ATH_TID_LOCK_ASSERT(sc, &(ATH_NODE(ni)->an_tid[tid])); 2634 2635 /* 2636 * Is it a QOS NULL Data frame? Give it a sequence number from 2637 * the default TID (IEEE80211_NONQOS_TID.) 2638 * 2639 * The RX path of everything I've looked at doesn't include the NULL 2640 * data frame sequence number in the aggregation state updates, so 2641 * assigning it a sequence number there will cause a BAW hole on the 2642 * RX side. 2643 */ 2644 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2645 if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) { 2646 /* XXX no locking for this TID? This is a bit of a problem. */ 2647 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; 2648 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); 2649 } else { 2650 /* Manually assign sequence number */ 2651 seqno = ni->ni_txseqs[tid]; 2652 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE); 2653 } 2654 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 2655 M_SEQNO_SET(m0, seqno); 2656 2657 /* Return so caller can do something with it if needed */ 2658 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: -> seqno=%d\n", __func__, seqno); 2659 return seqno; 2660 } 2661 2662 /* 2663 * Attempt to direct dispatch an aggregate frame to hardware. 2664 * If the frame is out of BAW, queue. 2665 * Otherwise, schedule it as a single frame. 2666 */ 2667 static void 2668 ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an, 2669 struct ath_txq *txq, struct ath_buf *bf) 2670 { 2671 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 2672 // struct ath_txq *txq = bf->bf_state.bfs_txq; 2673 struct ieee80211_tx_ampdu *tap; 2674 2675 if (txq != bf->bf_state.bfs_txq) { 2676 device_printf(sc->sc_dev, "%s: txq %d != bfs_txq %d!\n", 2677 __func__, 2678 txq->axq_qnum, 2679 bf->bf_state.bfs_txq->axq_qnum); 2680 } 2681 2682 ATH_TXQ_LOCK_ASSERT(txq); 2683 ATH_TID_LOCK_ASSERT(sc, tid); 2684 2685 tap = ath_tx_get_tx_tid(an, tid->tid); 2686 2687 /* paused? queue */ 2688 if (tid->paused) { 2689 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 2690 /* XXX don't sched - we're paused! */ 2691 return; 2692 } 2693 2694 /* outside baw? queue */ 2695 if (bf->bf_state.bfs_dobaw && 2696 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2697 SEQNO(bf->bf_state.bfs_seqno)))) { 2698 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 2699 ath_tx_tid_sched(sc, tid); 2700 return; 2701 } 2702 2703 /* 2704 * This is a temporary check and should be removed once 2705 * all the relevant code paths have been fixed. 2706 * 2707 * During aggregate retries, it's possible that the head 2708 * frame will fail (which has the bfs_aggr and bfs_nframes 2709 * fields set for said aggregate) and will be retried as 2710 * a single frame. In this instance, the values should 2711 * be reset or the completion code will get upset with you. 2712 */ 2713 if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) { 2714 device_printf(sc->sc_dev, "%s: bfs_aggr=%d, bfs_nframes=%d\n", 2715 __func__, 2716 bf->bf_state.bfs_aggr, 2717 bf->bf_state.bfs_nframes); 2718 bf->bf_state.bfs_aggr = 0; 2719 bf->bf_state.bfs_nframes = 1; 2720 } 2721 2722 /* Update CLRDMASK just before this frame is queued */ 2723 ath_tx_update_clrdmask(sc, tid, bf); 2724 2725 /* Direct dispatch to hardware */ 2726 ath_tx_do_ratelookup(sc, bf); 2727 ath_tx_calc_duration(sc, bf); 2728 ath_tx_calc_protection(sc, bf); 2729 ath_tx_set_rtscts(sc, bf); 2730 ath_tx_rate_fill_rcflags(sc, bf); 2731 ath_tx_setds(sc, bf); 2732 2733 /* Statistics */ 2734 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++; 2735 2736 /* Track per-TID hardware queue depth correctly */ 2737 tid->hwq_depth++; 2738 2739 /* Add to BAW */ 2740 if (bf->bf_state.bfs_dobaw) { 2741 ath_tx_addto_baw(sc, an, tid, bf); 2742 bf->bf_state.bfs_addedbaw = 1; 2743 } 2744 2745 /* Set completion handler, multi-frame aggregate or not */ 2746 bf->bf_comp = ath_tx_aggr_comp; 2747 2748 /* Hand off to hardware */ 2749 ath_tx_handoff(sc, txq, bf); 2750 } 2751 2752 /* 2753 * Attempt to send the packet. 2754 * If the queue isn't busy, direct-dispatch. 2755 * If the queue is busy enough, queue the given packet on the 2756 * relevant software queue. 2757 */ 2758 void 2759 ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_txq *txq, 2760 struct ath_buf *bf) 2761 { 2762 struct ath_node *an = ATH_NODE(ni); 2763 struct ieee80211_frame *wh; 2764 struct ath_tid *atid; 2765 int pri, tid; 2766 struct mbuf *m0 = bf->bf_m; 2767 2768 ATH_TXQ_LOCK_ASSERT(txq); 2769 2770 /* Fetch the TID - non-QoS frames get assigned to TID 16 */ 2771 wh = mtod(m0, struct ieee80211_frame *); 2772 pri = ath_tx_getac(sc, m0); 2773 tid = ath_tx_gettid(sc, m0); 2774 atid = &an->an_tid[tid]; 2775 2776 ATH_TID_LOCK_ASSERT(sc, atid); 2777 2778 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n", 2779 __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 2780 2781 /* Set local packet state, used to queue packets to hardware */ 2782 /* XXX potentially duplicate info, re-check */ 2783 /* XXX remember, txq must be the hardware queue, not the av_mcastq */ 2784 bf->bf_state.bfs_tid = tid; 2785 bf->bf_state.bfs_txq = txq; 2786 bf->bf_state.bfs_pri = pri; 2787 2788 /* 2789 * If the hardware queue isn't busy, queue it directly. 2790 * If the hardware queue is busy, queue it. 2791 * If the TID is paused or the traffic it outside BAW, software 2792 * queue it. 2793 */ 2794 if (atid->paused) { 2795 /* TID is paused, queue */ 2796 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__); 2797 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 2798 } else if (ath_tx_ampdu_pending(sc, an, tid)) { 2799 /* AMPDU pending; queue */ 2800 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__); 2801 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 2802 /* XXX sched? */ 2803 } else if (ath_tx_ampdu_running(sc, an, tid)) { 2804 /* AMPDU running, attempt direct dispatch if possible */ 2805 2806 /* 2807 * Always queue the frame to the tail of the list. 2808 */ 2809 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 2810 2811 /* 2812 * If the hardware queue isn't busy, direct dispatch 2813 * the head frame in the list. Don't schedule the 2814 * TID - let it build some more frames first? 2815 * 2816 * Otherwise, schedule the TID. 2817 */ 2818 if (txq->axq_depth < sc->sc_hwq_limit) { 2819 bf = ATH_TID_FIRST(atid); 2820 ATH_TID_REMOVE(atid, bf, bf_list); 2821 2822 /* 2823 * Ensure it's definitely treated as a non-AMPDU 2824 * frame - this information may have been left 2825 * over from a previous attempt. 2826 */ 2827 bf->bf_state.bfs_aggr = 0; 2828 bf->bf_state.bfs_nframes = 1; 2829 2830 /* Queue to the hardware */ 2831 ath_tx_xmit_aggr(sc, an, txq, bf); 2832 DPRINTF(sc, ATH_DEBUG_SW_TX, 2833 "%s: xmit_aggr\n", 2834 __func__); 2835 } else { 2836 DPRINTF(sc, ATH_DEBUG_SW_TX, 2837 "%s: ampdu; swq'ing\n", 2838 __func__); 2839 2840 ath_tx_tid_sched(sc, atid); 2841 } 2842 } else if (txq->axq_depth < sc->sc_hwq_limit) { 2843 /* AMPDU not running, attempt direct dispatch */ 2844 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__); 2845 /* See if clrdmask needs to be set */ 2846 ath_tx_update_clrdmask(sc, atid, bf); 2847 ath_tx_xmit_normal(sc, txq, bf); 2848 } else { 2849 /* Busy; queue */ 2850 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__); 2851 ATH_TID_INSERT_TAIL(atid, bf, bf_list); 2852 ath_tx_tid_sched(sc, atid); 2853 } 2854 } 2855 2856 /* 2857 * Configure the per-TID node state. 2858 * 2859 * This likely belongs in if_ath_node.c but I can't think of anywhere 2860 * else to put it just yet. 2861 * 2862 * This sets up the SLISTs and the mutex as appropriate. 2863 */ 2864 void 2865 ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an) 2866 { 2867 int i, j; 2868 struct ath_tid *atid; 2869 2870 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 2871 atid = &an->an_tid[i]; 2872 2873 /* XXX now with this bzer(), is the field 0'ing needed? */ 2874 bzero(atid, sizeof(*atid)); 2875 2876 TAILQ_INIT(&atid->tid_q); 2877 TAILQ_INIT(&atid->filtq.tid_q); 2878 atid->tid = i; 2879 atid->an = an; 2880 for (j = 0; j < ATH_TID_MAX_BUFS; j++) 2881 atid->tx_buf[j] = NULL; 2882 atid->baw_head = atid->baw_tail = 0; 2883 atid->paused = 0; 2884 atid->sched = 0; 2885 atid->hwq_depth = 0; 2886 atid->cleanup_inprogress = 0; 2887 atid->clrdmask = 1; /* Always start by setting this bit */ 2888 if (i == IEEE80211_NONQOS_TID) 2889 atid->ac = ATH_NONQOS_TID_AC; 2890 else 2891 atid->ac = TID_TO_WME_AC(i); 2892 } 2893 } 2894 2895 /* 2896 * Pause the current TID. This stops packets from being transmitted 2897 * on it. 2898 * 2899 * Since this is also called from upper layers as well as the driver, 2900 * it will get the TID lock. 2901 */ 2902 static void 2903 ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid) 2904 { 2905 2906 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 2907 tid->paused++; 2908 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: paused = %d\n", 2909 __func__, tid->paused); 2910 } 2911 2912 /* 2913 * Unpause the current TID, and schedule it if needed. 2914 */ 2915 static void 2916 ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid) 2917 { 2918 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 2919 2920 tid->paused--; 2921 2922 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: unpaused = %d\n", 2923 __func__, tid->paused); 2924 2925 if (tid->paused) 2926 return; 2927 2928 /* 2929 * Override the clrdmask configuration for the next frame 2930 * from this TID, just to get the ball rolling. 2931 */ 2932 tid->clrdmask = 1; 2933 2934 if (tid->axq_depth == 0) 2935 return; 2936 2937 /* XXX isfiltered shouldn't ever be 0 at this point */ 2938 if (tid->isfiltered == 1) { 2939 device_printf(sc->sc_dev, "%s: filtered?!\n", __func__); 2940 return; 2941 } 2942 2943 ath_tx_tid_sched(sc, tid); 2944 /* Punt some frames to the hardware if needed */ 2945 //ath_txq_sched(sc, sc->sc_ac2q[tid->ac]); 2946 taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask); 2947 } 2948 2949 /* 2950 * Add the given ath_buf to the TID filtered frame list. 2951 * This requires the TID be filtered. 2952 */ 2953 static void 2954 ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid, 2955 struct ath_buf *bf) 2956 { 2957 2958 ATH_TID_LOCK_ASSERT(sc, tid); 2959 if (! tid->isfiltered) 2960 device_printf(sc->sc_dev, "%s: not filtered?!\n", __func__); 2961 2962 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf); 2963 2964 /* Set the retry bit and bump the retry counter */ 2965 ath_tx_set_retry(sc, bf); 2966 sc->sc_stats.ast_tx_swfiltered++; 2967 2968 ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list); 2969 } 2970 2971 /* 2972 * Handle a completed filtered frame from the given TID. 2973 * This just enables/pauses the filtered frame state if required 2974 * and appends the filtered frame to the filtered queue. 2975 */ 2976 static void 2977 ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid, 2978 struct ath_buf *bf) 2979 { 2980 2981 ATH_TID_LOCK_ASSERT(sc, tid); 2982 2983 if (! tid->isfiltered) { 2984 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: filter transition\n", 2985 __func__); 2986 tid->isfiltered = 1; 2987 ath_tx_tid_pause(sc, tid); 2988 } 2989 2990 /* Add the frame to the filter queue */ 2991 ath_tx_tid_filt_addbuf(sc, tid, bf); 2992 } 2993 2994 /* 2995 * Complete the filtered frame TX completion. 2996 * 2997 * If there are no more frames in the hardware queue, unpause/unfilter 2998 * the TID if applicable. Otherwise we will wait for a node PS transition 2999 * to unfilter. 3000 */ 3001 static void 3002 ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid) 3003 { 3004 struct ath_buf *bf; 3005 3006 ATH_TID_LOCK_ASSERT(sc, tid); 3007 3008 if (tid->hwq_depth != 0) 3009 return; 3010 3011 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: hwq=0, transition back\n", 3012 __func__); 3013 tid->isfiltered = 0; 3014 tid->clrdmask = 1; 3015 3016 /* XXX this is really quite inefficient */ 3017 while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) { 3018 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3019 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3020 } 3021 3022 ath_tx_tid_resume(sc, tid); 3023 } 3024 3025 /* 3026 * Called when a single (aggregate or otherwise) frame is completed. 3027 * 3028 * Returns 1 if the buffer could be added to the filtered list 3029 * (cloned or otherwise), 0 if the buffer couldn't be added to the 3030 * filtered list (failed clone; expired retry) and the caller should 3031 * free it and handle it like a failure (eg by sending a BAR.) 3032 */ 3033 static int 3034 ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid, 3035 struct ath_buf *bf) 3036 { 3037 struct ath_buf *nbf; 3038 int retval; 3039 3040 ATH_TID_LOCK_ASSERT(sc, tid); 3041 3042 /* 3043 * Don't allow a filtered frame to live forever. 3044 */ 3045 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3046 sc->sc_stats.ast_tx_swretrymax++; 3047 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3048 "%s: bf=%p, seqno=%d, exceeded retries\n", 3049 __func__, 3050 bf, 3051 bf->bf_state.bfs_seqno); 3052 return (0); 3053 } 3054 3055 /* 3056 * A busy buffer can't be added to the retry list. 3057 * It needs to be cloned. 3058 */ 3059 if (bf->bf_flags & ATH_BUF_BUSY) { 3060 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3061 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3062 "%s: busy buffer clone: %p -> %p\n", 3063 __func__, bf, nbf); 3064 } else { 3065 nbf = bf; 3066 } 3067 3068 if (nbf == NULL) { 3069 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3070 "%s: busy buffer couldn't be cloned (%p)!\n", 3071 __func__, bf); 3072 retval = 1; 3073 } else { 3074 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3075 retval = 0; 3076 } 3077 ath_tx_tid_filt_comp_complete(sc, tid); 3078 3079 return (retval); 3080 } 3081 3082 static void 3083 ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid, 3084 struct ath_buf *bf_first, ath_bufhead *bf_q) 3085 { 3086 struct ath_buf *bf, *bf_next, *nbf; 3087 3088 ATH_TID_LOCK_ASSERT(sc, tid); 3089 3090 bf = bf_first; 3091 while (bf) { 3092 bf_next = bf->bf_next; 3093 bf->bf_next = NULL; /* Remove it from the aggr list */ 3094 3095 /* 3096 * Don't allow a filtered frame to live forever. 3097 */ 3098 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3099 sc->sc_stats.ast_tx_swretrymax++; 3100 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3101 "%s: bf=%p, seqno=%d, exceeded retries\n", 3102 __func__, 3103 bf, 3104 bf->bf_state.bfs_seqno); 3105 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3106 goto next; 3107 } 3108 3109 if (bf->bf_flags & ATH_BUF_BUSY) { 3110 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3111 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3112 "%s: busy buffer cloned: %p -> %p", 3113 __func__, bf, nbf); 3114 } else { 3115 nbf = bf; 3116 } 3117 3118 /* 3119 * If the buffer couldn't be cloned, add it to bf_q; 3120 * the caller will free the buffer(s) as required. 3121 */ 3122 if (nbf == NULL) { 3123 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3124 "%s: buffer couldn't be cloned! (%p)\n", 3125 __func__, bf); 3126 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3127 } else { 3128 ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3129 } 3130 next: 3131 bf = bf_next; 3132 } 3133 3134 ath_tx_tid_filt_comp_complete(sc, tid); 3135 } 3136 3137 /* 3138 * Suspend the queue because we need to TX a BAR. 3139 */ 3140 static void 3141 ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid) 3142 { 3143 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 3144 3145 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3146 "%s: tid=%p, bar_wait=%d, bar_tx=%d, called\n", 3147 __func__, 3148 tid, 3149 tid->bar_wait, 3150 tid->bar_tx); 3151 3152 /* We shouldn't be called when bar_tx is 1 */ 3153 if (tid->bar_tx) { 3154 device_printf(sc->sc_dev, "%s: bar_tx is 1?!\n", 3155 __func__); 3156 } 3157 3158 /* If we've already been called, just be patient. */ 3159 if (tid->bar_wait) 3160 return; 3161 3162 /* Wait! */ 3163 tid->bar_wait = 1; 3164 3165 /* Only one pause, no matter how many frames fail */ 3166 ath_tx_tid_pause(sc, tid); 3167 } 3168 3169 /* 3170 * We've finished with BAR handling - either we succeeded or 3171 * failed. Either way, unsuspend TX. 3172 */ 3173 static void 3174 ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid) 3175 { 3176 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 3177 3178 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3179 "%s: tid=%p, called\n", 3180 __func__, 3181 tid); 3182 3183 if (tid->bar_tx == 0 || tid->bar_wait == 0) { 3184 device_printf(sc->sc_dev, "%s: bar_tx=%d, bar_wait=%d: ?\n", 3185 __func__, tid->bar_tx, tid->bar_wait); 3186 } 3187 3188 tid->bar_tx = tid->bar_wait = 0; 3189 ath_tx_tid_resume(sc, tid); 3190 } 3191 3192 /* 3193 * Return whether we're ready to TX a BAR frame. 3194 * 3195 * Requires the TID lock be held. 3196 */ 3197 static int 3198 ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid) 3199 { 3200 3201 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 3202 3203 if (tid->bar_wait == 0 || tid->hwq_depth > 0) 3204 return (0); 3205 3206 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: tid=%p (%d), bar ready\n", 3207 __func__, tid, tid->tid); 3208 3209 return (1); 3210 } 3211 3212 /* 3213 * Check whether the current TID is ready to have a BAR 3214 * TXed and if so, do the TX. 3215 * 3216 * Since the TID/TXQ lock can't be held during a call to 3217 * ieee80211_send_bar(), we have to do the dirty thing of unlocking it, 3218 * sending the BAR and locking it again. 3219 * 3220 * Eventually, the code to send the BAR should be broken out 3221 * from this routine so the lock doesn't have to be reacquired 3222 * just to be immediately dropped by the caller. 3223 */ 3224 static void 3225 ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid) 3226 { 3227 struct ieee80211_tx_ampdu *tap; 3228 3229 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 3230 3231 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3232 "%s: tid=%p, called\n", 3233 __func__, 3234 tid); 3235 3236 tap = ath_tx_get_tx_tid(tid->an, tid->tid); 3237 3238 /* 3239 * This is an error condition! 3240 */ 3241 if (tid->bar_wait == 0 || tid->bar_tx == 1) { 3242 device_printf(sc->sc_dev, 3243 "%s: tid=%p, bar_tx=%d, bar_wait=%d: ?\n", 3244 __func__, 3245 tid, 3246 tid->bar_tx, 3247 tid->bar_wait); 3248 return; 3249 } 3250 3251 /* Don't do anything if we still have pending frames */ 3252 if (tid->hwq_depth > 0) { 3253 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3254 "%s: tid=%p, hwq_depth=%d, waiting\n", 3255 __func__, 3256 tid, 3257 tid->hwq_depth); 3258 return; 3259 } 3260 3261 /* We're now about to TX */ 3262 tid->bar_tx = 1; 3263 3264 /* 3265 * Override the clrdmask configuration for the next frame, 3266 * just to get the ball rolling. 3267 */ 3268 tid->clrdmask = 1; 3269 3270 /* 3271 * Calculate new BAW left edge, now that all frames have either 3272 * succeeded or failed. 3273 * 3274 * XXX verify this is _actually_ the valid value to begin at! 3275 */ 3276 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3277 "%s: tid=%p, new BAW left edge=%d\n", 3278 __func__, 3279 tid, 3280 tap->txa_start); 3281 3282 /* Try sending the BAR frame */ 3283 /* We can't hold the lock here! */ 3284 3285 ATH_TXQ_UNLOCK(sc->sc_ac2q[tid->ac]); 3286 if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) { 3287 /* Success? Now we wait for notification that it's done */ 3288 ATH_TXQ_LOCK(sc->sc_ac2q[tid->ac]); 3289 return; 3290 } 3291 3292 /* Failure? For now, warn loudly and continue */ 3293 ATH_TXQ_LOCK(sc->sc_ac2q[tid->ac]); 3294 device_printf(sc->sc_dev, "%s: tid=%p, failed to TX BAR, continue!\n", 3295 __func__, tid); 3296 ath_tx_tid_bar_unsuspend(sc, tid); 3297 } 3298 3299 static void 3300 ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an, 3301 struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf) 3302 { 3303 3304 ATH_TID_LOCK_ASSERT(sc, tid); 3305 3306 /* 3307 * If the current TID is running AMPDU, update 3308 * the BAW. 3309 */ 3310 if (ath_tx_ampdu_running(sc, an, tid->tid) && 3311 bf->bf_state.bfs_dobaw) { 3312 /* 3313 * Only remove the frame from the BAW if it's 3314 * been transmitted at least once; this means 3315 * the frame was in the BAW to begin with. 3316 */ 3317 if (bf->bf_state.bfs_retries > 0) { 3318 ath_tx_update_baw(sc, an, tid, bf); 3319 bf->bf_state.bfs_dobaw = 0; 3320 } 3321 /* 3322 * This has become a non-fatal error now 3323 */ 3324 if (! bf->bf_state.bfs_addedbaw) 3325 device_printf(sc->sc_dev, 3326 "%s: wasn't added: seqno %d\n", 3327 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3328 } 3329 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 3330 } 3331 3332 static void 3333 ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an, 3334 const char *pfx, struct ath_tid *tid, struct ath_buf *bf) 3335 { 3336 struct ieee80211_node *ni = &an->an_node; 3337 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 3338 struct ieee80211_tx_ampdu *tap; 3339 3340 tap = ath_tx_get_tx_tid(an, tid->tid); 3341 3342 device_printf(sc->sc_dev, 3343 "%s: %s: node %p: bf=%p: addbaw=%d, dobaw=%d, " 3344 "seqno=%d, retry=%d\n", 3345 __func__, pfx, ni, bf, 3346 bf->bf_state.bfs_addedbaw, 3347 bf->bf_state.bfs_dobaw, 3348 SEQNO(bf->bf_state.bfs_seqno), 3349 bf->bf_state.bfs_retries); 3350 device_printf(sc->sc_dev, 3351 "%s: node %p: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n", 3352 __func__, ni, bf, 3353 txq->axq_qnum, 3354 txq->axq_depth, 3355 txq->axq_aggr_depth); 3356 3357 device_printf(sc->sc_dev, 3358 "%s: node %p: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, isfiltered=%d\n", 3359 __func__, ni, bf, 3360 tid->axq_depth, 3361 tid->hwq_depth, 3362 tid->bar_wait, 3363 tid->isfiltered); 3364 device_printf(sc->sc_dev, 3365 "%s: node %p: tid %d: " 3366 "sched=%d, paused=%d, " 3367 "incomp=%d, baw_head=%d, " 3368 "baw_tail=%d txa_start=%d, ni_txseqs=%d\n", 3369 __func__, ni, tid->tid, 3370 tid->sched, tid->paused, 3371 tid->incomp, tid->baw_head, 3372 tid->baw_tail, tap == NULL ? -1 : tap->txa_start, 3373 ni->ni_txseqs[tid->tid]); 3374 3375 /* XXX Dump the frame, see what it is? */ 3376 ieee80211_dump_pkt(ni->ni_ic, 3377 mtod(bf->bf_m, const uint8_t *), 3378 bf->bf_m->m_len, 0, -1); 3379 } 3380 3381 /* 3382 * Free any packets currently pending in the software TX queue. 3383 * 3384 * This will be called when a node is being deleted. 3385 * 3386 * It can also be called on an active node during an interface 3387 * reset or state transition. 3388 * 3389 * (From Linux/reference): 3390 * 3391 * TODO: For frame(s) that are in the retry state, we will reuse the 3392 * sequence number(s) without setting the retry bit. The 3393 * alternative is to give up on these and BAR the receiver's window 3394 * forward. 3395 */ 3396 static void 3397 ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an, 3398 struct ath_tid *tid, ath_bufhead *bf_cq) 3399 { 3400 struct ath_buf *bf; 3401 struct ieee80211_tx_ampdu *tap; 3402 struct ieee80211_node *ni = &an->an_node; 3403 int t; 3404 3405 tap = ath_tx_get_tx_tid(an, tid->tid); 3406 3407 ATH_TID_LOCK_ASSERT(sc, tid); 3408 3409 /* Walk the queue, free frames */ 3410 t = 0; 3411 for (;;) { 3412 bf = ATH_TID_FIRST(tid); 3413 if (bf == NULL) { 3414 break; 3415 } 3416 3417 if (t == 0) { 3418 ath_tx_tid_drain_print(sc, an, "norm", tid, bf); 3419 t = 1; 3420 } 3421 3422 ATH_TID_REMOVE(tid, bf, bf_list); 3423 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3424 } 3425 3426 /* And now, drain the filtered frame queue */ 3427 t = 0; 3428 for (;;) { 3429 bf = ATH_TID_FILT_FIRST(tid); 3430 if (bf == NULL) 3431 break; 3432 3433 if (t == 0) { 3434 ath_tx_tid_drain_print(sc, an, "filt", tid, bf); 3435 t = 1; 3436 } 3437 3438 ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3439 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3440 } 3441 3442 /* 3443 * Override the clrdmask configuration for the next frame 3444 * in case there is some future transmission, just to get 3445 * the ball rolling. 3446 * 3447 * This won't hurt things if the TID is about to be freed. 3448 */ 3449 tid->clrdmask = 1; 3450 3451 /* 3452 * Now that it's completed, grab the TID lock and update 3453 * the sequence number and BAW window. 3454 * Because sequence numbers have been assigned to frames 3455 * that haven't been sent yet, it's entirely possible 3456 * we'll be called with some pending frames that have not 3457 * been transmitted. 3458 * 3459 * The cleaner solution is to do the sequence number allocation 3460 * when the packet is first transmitted - and thus the "retries" 3461 * check above would be enough to update the BAW/seqno. 3462 */ 3463 3464 /* But don't do it for non-QoS TIDs */ 3465 if (tap) { 3466 #if 0 3467 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3468 "%s: node %p: TID %d: sliding BAW left edge to %d\n", 3469 __func__, an, tid->tid, tap->txa_start); 3470 #endif 3471 ni->ni_txseqs[tid->tid] = tap->txa_start; 3472 tid->baw_tail = tid->baw_head; 3473 } 3474 } 3475 3476 /* 3477 * Flush all software queued packets for the given node. 3478 * 3479 * This occurs when a completion handler frees the last buffer 3480 * for a node, and the node is thus freed. This causes the node 3481 * to be cleaned up, which ends up calling ath_tx_node_flush. 3482 */ 3483 void 3484 ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an) 3485 { 3486 int tid; 3487 ath_bufhead bf_cq; 3488 struct ath_buf *bf; 3489 3490 TAILQ_INIT(&bf_cq); 3491 3492 ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p", 3493 &an->an_node); 3494 3495 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 3496 struct ath_tid *atid = &an->an_tid[tid]; 3497 struct ath_txq *txq = sc->sc_ac2q[atid->ac]; 3498 3499 ATH_TXQ_LOCK(txq); 3500 /* Free packets */ 3501 ath_tx_tid_drain(sc, an, atid, &bf_cq); 3502 /* Remove this tid from the list of active tids */ 3503 ath_tx_tid_unsched(sc, atid); 3504 ATH_TXQ_UNLOCK(txq); 3505 } 3506 3507 /* Handle completed frames */ 3508 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 3509 TAILQ_REMOVE(&bf_cq, bf, bf_list); 3510 ath_tx_default_comp(sc, bf, 0); 3511 } 3512 } 3513 3514 /* 3515 * Drain all the software TXQs currently with traffic queued. 3516 */ 3517 void 3518 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq) 3519 { 3520 struct ath_tid *tid; 3521 ath_bufhead bf_cq; 3522 struct ath_buf *bf; 3523 3524 TAILQ_INIT(&bf_cq); 3525 ATH_TXQ_LOCK(txq); 3526 3527 /* 3528 * Iterate over all active tids for the given txq, 3529 * flushing and unsched'ing them 3530 */ 3531 while (! TAILQ_EMPTY(&txq->axq_tidq)) { 3532 tid = TAILQ_FIRST(&txq->axq_tidq); 3533 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq); 3534 ath_tx_tid_unsched(sc, tid); 3535 } 3536 3537 ATH_TXQ_UNLOCK(txq); 3538 3539 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 3540 TAILQ_REMOVE(&bf_cq, bf, bf_list); 3541 ath_tx_default_comp(sc, bf, 0); 3542 } 3543 } 3544 3545 /* 3546 * Handle completion of non-aggregate session frames. 3547 * 3548 * This (currently) doesn't implement software retransmission of 3549 * non-aggregate frames! 3550 * 3551 * Software retransmission of non-aggregate frames needs to obey 3552 * the strict sequence number ordering, and drop any frames that 3553 * will fail this. 3554 * 3555 * For now, filtered frames and frame transmission will cause 3556 * all kinds of issues. So we don't support them. 3557 * 3558 * So anyone queuing frames via ath_tx_normal_xmit() or 3559 * ath_tx_hw_queue_norm() must override and set CLRDMASK. 3560 */ 3561 void 3562 ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 3563 { 3564 struct ieee80211_node *ni = bf->bf_node; 3565 struct ath_node *an = ATH_NODE(ni); 3566 int tid = bf->bf_state.bfs_tid; 3567 struct ath_tid *atid = &an->an_tid[tid]; 3568 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 3569 3570 /* The TID state is protected behind the TXQ lock */ 3571 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 3572 3573 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n", 3574 __func__, bf, fail, atid->hwq_depth - 1); 3575 3576 atid->hwq_depth--; 3577 3578 #if 0 3579 /* 3580 * If the frame was filtered, stick it on the filter frame 3581 * queue and complain about it. It shouldn't happen! 3582 */ 3583 if ((ts->ts_status & HAL_TXERR_FILT) || 3584 (ts->ts_status != 0 && atid->isfiltered)) { 3585 device_printf(sc->sc_dev, 3586 "%s: isfiltered=%d, ts_status=%d: huh?\n", 3587 __func__, 3588 atid->isfiltered, 3589 ts->ts_status); 3590 ath_tx_tid_filt_comp_buf(sc, atid, bf); 3591 } 3592 #endif 3593 if (atid->isfiltered) 3594 device_printf(sc->sc_dev, "%s: filtered?!\n", __func__); 3595 if (atid->hwq_depth < 0) 3596 device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n", 3597 __func__, atid->hwq_depth); 3598 3599 /* 3600 * If the queue is filtered, potentially mark it as complete 3601 * and reschedule it as needed. 3602 * 3603 * This is required as there may be a subsequent TX descriptor 3604 * for this end-node that has CLRDMASK set, so it's quite possible 3605 * that a filtered frame will be followed by a non-filtered 3606 * (complete or otherwise) frame. 3607 * 3608 * XXX should we do this before we complete the frame? 3609 */ 3610 if (atid->isfiltered) 3611 ath_tx_tid_filt_comp_complete(sc, atid); 3612 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 3613 3614 /* 3615 * punt to rate control if we're not being cleaned up 3616 * during a hw queue drain and the frame wanted an ACK. 3617 */ 3618 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 3619 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 3620 ts, bf->bf_state.bfs_pktlen, 3621 1, (ts->ts_status == 0) ? 0 : 1); 3622 3623 ath_tx_default_comp(sc, bf, fail); 3624 } 3625 3626 /* 3627 * Handle cleanup of aggregate session packets that aren't 3628 * an A-MPDU. 3629 * 3630 * There's no need to update the BAW here - the session is being 3631 * torn down. 3632 */ 3633 static void 3634 ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf) 3635 { 3636 struct ieee80211_node *ni = bf->bf_node; 3637 struct ath_node *an = ATH_NODE(ni); 3638 int tid = bf->bf_state.bfs_tid; 3639 struct ath_tid *atid = &an->an_tid[tid]; 3640 3641 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n", 3642 __func__, tid, atid->incomp); 3643 3644 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 3645 atid->incomp--; 3646 if (atid->incomp == 0) { 3647 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3648 "%s: TID %d: cleaned up! resume!\n", 3649 __func__, tid); 3650 atid->cleanup_inprogress = 0; 3651 ath_tx_tid_resume(sc, atid); 3652 } 3653 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 3654 3655 ath_tx_default_comp(sc, bf, 0); 3656 } 3657 3658 /* 3659 * Performs transmit side cleanup when TID changes from aggregated to 3660 * unaggregated. 3661 * 3662 * - Discard all retry frames from the s/w queue. 3663 * - Fix the tx completion function for all buffers in s/w queue. 3664 * - Count the number of unacked frames, and let transmit completion 3665 * handle it later. 3666 * 3667 * The caller is responsible for pausing the TID. 3668 */ 3669 static void 3670 ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid) 3671 { 3672 struct ath_tid *atid = &an->an_tid[tid]; 3673 struct ieee80211_tx_ampdu *tap; 3674 struct ath_buf *bf, *bf_next; 3675 ath_bufhead bf_cq; 3676 3677 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 3678 "%s: TID %d: called\n", __func__, tid); 3679 3680 TAILQ_INIT(&bf_cq); 3681 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 3682 3683 /* 3684 * Move the filtered frames to the TX queue, before 3685 * we run off and discard/process things. 3686 */ 3687 /* XXX this is really quite inefficient */ 3688 while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) { 3689 ATH_TID_FILT_REMOVE(atid, bf, bf_list); 3690 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 3691 } 3692 3693 /* 3694 * Update the frames in the software TX queue: 3695 * 3696 * + Discard retry frames in the queue 3697 * + Fix the completion function to be non-aggregate 3698 */ 3699 bf = ATH_TID_FIRST(atid); 3700 while (bf) { 3701 if (bf->bf_state.bfs_isretried) { 3702 bf_next = TAILQ_NEXT(bf, bf_list); 3703 ATH_TID_REMOVE(atid, bf, bf_list); 3704 atid->axq_depth--; 3705 if (bf->bf_state.bfs_dobaw) { 3706 ath_tx_update_baw(sc, an, atid, bf); 3707 if (! bf->bf_state.bfs_addedbaw) 3708 device_printf(sc->sc_dev, 3709 "%s: wasn't added: seqno %d\n", 3710 __func__, 3711 SEQNO(bf->bf_state.bfs_seqno)); 3712 } 3713 bf->bf_state.bfs_dobaw = 0; 3714 /* 3715 * Call the default completion handler with "fail" just 3716 * so upper levels are suitably notified about this. 3717 */ 3718 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 3719 bf = bf_next; 3720 continue; 3721 } 3722 /* Give these the default completion handler */ 3723 bf->bf_comp = ath_tx_normal_comp; 3724 bf = TAILQ_NEXT(bf, bf_list); 3725 } 3726 3727 /* The caller is required to pause the TID */ 3728 #if 0 3729 /* Pause the TID */ 3730 ath_tx_tid_pause(sc, atid); 3731 #endif 3732 3733 /* 3734 * Calculate what hardware-queued frames exist based 3735 * on the current BAW size. Ie, what frames have been 3736 * added to the TX hardware queue for this TID but 3737 * not yet ACKed. 3738 */ 3739 tap = ath_tx_get_tx_tid(an, tid); 3740 /* Need the lock - fiddling with BAW */ 3741 while (atid->baw_head != atid->baw_tail) { 3742 if (atid->tx_buf[atid->baw_head]) { 3743 atid->incomp++; 3744 atid->cleanup_inprogress = 1; 3745 atid->tx_buf[atid->baw_head] = NULL; 3746 } 3747 INCR(atid->baw_head, ATH_TID_MAX_BUFS); 3748 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 3749 } 3750 3751 /* 3752 * If cleanup is required, defer TID scheduling 3753 * until all the HW queued packets have been 3754 * sent. 3755 */ 3756 if (! atid->cleanup_inprogress) 3757 ath_tx_tid_resume(sc, atid); 3758 3759 if (atid->cleanup_inprogress) 3760 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3761 "%s: TID %d: cleanup needed: %d packets\n", 3762 __func__, tid, atid->incomp); 3763 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 3764 3765 /* Handle completing frames and fail them */ 3766 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 3767 TAILQ_REMOVE(&bf_cq, bf, bf_list); 3768 ath_tx_default_comp(sc, bf, 1); 3769 } 3770 } 3771 3772 static struct ath_buf * 3773 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 3774 struct ath_tid *tid, struct ath_buf *bf) 3775 { 3776 struct ath_buf *nbf; 3777 int error; 3778 3779 nbf = ath_buf_clone(sc, bf); 3780 3781 #if 0 3782 device_printf(sc->sc_dev, "%s: ATH_BUF_BUSY; cloning\n", 3783 __func__); 3784 #endif 3785 3786 if (nbf == NULL) { 3787 /* Failed to clone */ 3788 device_printf(sc->sc_dev, 3789 "%s: failed to clone a busy buffer\n", 3790 __func__); 3791 return NULL; 3792 } 3793 3794 /* Setup the dma for the new buffer */ 3795 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m); 3796 if (error != 0) { 3797 device_printf(sc->sc_dev, 3798 "%s: failed to setup dma for clone\n", 3799 __func__); 3800 /* 3801 * Put this at the head of the list, not tail; 3802 * that way it doesn't interfere with the 3803 * busy buffer logic (which uses the tail of 3804 * the list.) 3805 */ 3806 ATH_TXBUF_LOCK(sc); 3807 ath_returnbuf_head(sc, nbf); 3808 ATH_TXBUF_UNLOCK(sc); 3809 return NULL; 3810 } 3811 3812 /* Update BAW if required, before we free the original buf */ 3813 if (bf->bf_state.bfs_dobaw) 3814 ath_tx_switch_baw_buf(sc, an, tid, bf, nbf); 3815 3816 /* Free current buffer; return the older buffer */ 3817 bf->bf_m = NULL; 3818 bf->bf_node = NULL; 3819 ath_freebuf(sc, bf); 3820 3821 return nbf; 3822 } 3823 3824 /* 3825 * Handle retrying an unaggregate frame in an aggregate 3826 * session. 3827 * 3828 * If too many retries occur, pause the TID, wait for 3829 * any further retransmits (as there's no reason why 3830 * non-aggregate frames in an aggregate session are 3831 * transmitted in-order; they just have to be in-BAW) 3832 * and then queue a BAR. 3833 */ 3834 static void 3835 ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf) 3836 { 3837 struct ieee80211_node *ni = bf->bf_node; 3838 struct ath_node *an = ATH_NODE(ni); 3839 int tid = bf->bf_state.bfs_tid; 3840 struct ath_tid *atid = &an->an_tid[tid]; 3841 struct ieee80211_tx_ampdu *tap; 3842 3843 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 3844 3845 tap = ath_tx_get_tx_tid(an, tid); 3846 3847 /* 3848 * If the buffer is marked as busy, we can't directly 3849 * reuse it. Instead, try to clone the buffer. 3850 * If the clone is successful, recycle the old buffer. 3851 * If the clone is unsuccessful, set bfs_retries to max 3852 * to force the next bit of code to free the buffer 3853 * for us. 3854 */ 3855 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 3856 (bf->bf_flags & ATH_BUF_BUSY)) { 3857 struct ath_buf *nbf; 3858 nbf = ath_tx_retry_clone(sc, an, atid, bf); 3859 if (nbf) 3860 /* bf has been freed at this point */ 3861 bf = nbf; 3862 else 3863 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 3864 } 3865 3866 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 3867 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 3868 "%s: exceeded retries; seqno %d\n", 3869 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3870 sc->sc_stats.ast_tx_swretrymax++; 3871 3872 /* Update BAW anyway */ 3873 if (bf->bf_state.bfs_dobaw) { 3874 ath_tx_update_baw(sc, an, atid, bf); 3875 if (! bf->bf_state.bfs_addedbaw) 3876 device_printf(sc->sc_dev, 3877 "%s: wasn't added: seqno %d\n", 3878 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3879 } 3880 bf->bf_state.bfs_dobaw = 0; 3881 3882 /* Suspend the TX queue and get ready to send the BAR */ 3883 ath_tx_tid_bar_suspend(sc, atid); 3884 3885 /* Send the BAR if there are no other frames waiting */ 3886 if (ath_tx_tid_bar_tx_ready(sc, atid)) 3887 ath_tx_tid_bar_tx(sc, atid); 3888 3889 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 3890 3891 /* Free buffer, bf is free after this call */ 3892 ath_tx_default_comp(sc, bf, 0); 3893 return; 3894 } 3895 3896 /* 3897 * This increments the retry counter as well as 3898 * sets the retry flag in the ath_buf and packet 3899 * body. 3900 */ 3901 ath_tx_set_retry(sc, bf); 3902 sc->sc_stats.ast_tx_swretries++; 3903 3904 /* 3905 * Insert this at the head of the queue, so it's 3906 * retried before any current/subsequent frames. 3907 */ 3908 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 3909 ath_tx_tid_sched(sc, atid); 3910 /* Send the BAR if there are no other frames waiting */ 3911 if (ath_tx_tid_bar_tx_ready(sc, atid)) 3912 ath_tx_tid_bar_tx(sc, atid); 3913 3914 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 3915 } 3916 3917 /* 3918 * Common code for aggregate excessive retry/subframe retry. 3919 * If retrying, queues buffers to bf_q. If not, frees the 3920 * buffers. 3921 * 3922 * XXX should unify this with ath_tx_aggr_retry_unaggr() 3923 */ 3924 static int 3925 ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf, 3926 ath_bufhead *bf_q) 3927 { 3928 struct ieee80211_node *ni = bf->bf_node; 3929 struct ath_node *an = ATH_NODE(ni); 3930 int tid = bf->bf_state.bfs_tid; 3931 struct ath_tid *atid = &an->an_tid[tid]; 3932 3933 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[atid->ac]); 3934 3935 /* XXX clr11naggr should be done for all subframes */ 3936 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 3937 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0); 3938 3939 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */ 3940 3941 /* 3942 * If the buffer is marked as busy, we can't directly 3943 * reuse it. Instead, try to clone the buffer. 3944 * If the clone is successful, recycle the old buffer. 3945 * If the clone is unsuccessful, set bfs_retries to max 3946 * to force the next bit of code to free the buffer 3947 * for us. 3948 */ 3949 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 3950 (bf->bf_flags & ATH_BUF_BUSY)) { 3951 struct ath_buf *nbf; 3952 nbf = ath_tx_retry_clone(sc, an, atid, bf); 3953 if (nbf) 3954 /* bf has been freed at this point */ 3955 bf = nbf; 3956 else 3957 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 3958 } 3959 3960 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 3961 sc->sc_stats.ast_tx_swretrymax++; 3962 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 3963 "%s: max retries: seqno %d\n", 3964 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3965 ath_tx_update_baw(sc, an, atid, bf); 3966 if (! bf->bf_state.bfs_addedbaw) 3967 device_printf(sc->sc_dev, 3968 "%s: wasn't added: seqno %d\n", 3969 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3970 bf->bf_state.bfs_dobaw = 0; 3971 return 1; 3972 } 3973 3974 ath_tx_set_retry(sc, bf); 3975 sc->sc_stats.ast_tx_swretries++; 3976 bf->bf_next = NULL; /* Just to make sure */ 3977 3978 /* Clear the aggregate state */ 3979 bf->bf_state.bfs_aggr = 0; 3980 bf->bf_state.bfs_ndelim = 0; /* ??? needed? */ 3981 bf->bf_state.bfs_nframes = 1; 3982 3983 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3984 return 0; 3985 } 3986 3987 /* 3988 * error pkt completion for an aggregate destination 3989 */ 3990 static void 3991 ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first, 3992 struct ath_tid *tid) 3993 { 3994 struct ieee80211_node *ni = bf_first->bf_node; 3995 struct ath_node *an = ATH_NODE(ni); 3996 struct ath_buf *bf_next, *bf; 3997 ath_bufhead bf_q; 3998 int drops = 0; 3999 struct ieee80211_tx_ampdu *tap; 4000 ath_bufhead bf_cq; 4001 4002 TAILQ_INIT(&bf_q); 4003 TAILQ_INIT(&bf_cq); 4004 4005 /* 4006 * Update rate control - all frames have failed. 4007 * 4008 * XXX use the length in the first frame in the series; 4009 * XXX just so things are consistent for now. 4010 */ 4011 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc, 4012 &bf_first->bf_status.ds_txstat, 4013 bf_first->bf_state.bfs_pktlen, 4014 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes); 4015 4016 ATH_TXQ_LOCK(sc->sc_ac2q[tid->ac]); 4017 tap = ath_tx_get_tx_tid(an, tid->tid); 4018 sc->sc_stats.ast_tx_aggr_failall++; 4019 4020 /* Retry all subframes */ 4021 bf = bf_first; 4022 while (bf) { 4023 bf_next = bf->bf_next; 4024 bf->bf_next = NULL; /* Remove it from the aggr list */ 4025 sc->sc_stats.ast_tx_aggr_fail++; 4026 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4027 drops++; 4028 bf->bf_next = NULL; 4029 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4030 } 4031 bf = bf_next; 4032 } 4033 4034 /* Prepend all frames to the beginning of the queue */ 4035 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 4036 TAILQ_REMOVE(&bf_q, bf, bf_list); 4037 ATH_TID_INSERT_HEAD(tid, bf, bf_list); 4038 } 4039 4040 /* 4041 * Schedule the TID to be re-tried. 4042 */ 4043 ath_tx_tid_sched(sc, tid); 4044 4045 /* 4046 * send bar if we dropped any frames 4047 * 4048 * Keep the txq lock held for now, as we need to ensure 4049 * that ni_txseqs[] is consistent (as it's being updated 4050 * in the ifnet TX context or raw TX context.) 4051 */ 4052 if (drops) { 4053 /* Suspend the TX queue and get ready to send the BAR */ 4054 ath_tx_tid_bar_suspend(sc, tid); 4055 } 4056 4057 /* 4058 * Send BAR if required 4059 */ 4060 if (ath_tx_tid_bar_tx_ready(sc, tid)) 4061 ath_tx_tid_bar_tx(sc, tid); 4062 4063 ATH_TXQ_UNLOCK(sc->sc_ac2q[tid->ac]); 4064 4065 /* Complete frames which errored out */ 4066 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4067 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4068 ath_tx_default_comp(sc, bf, 0); 4069 } 4070 } 4071 4072 /* 4073 * Handle clean-up of packets from an aggregate list. 4074 * 4075 * There's no need to update the BAW here - the session is being 4076 * torn down. 4077 */ 4078 static void 4079 ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first) 4080 { 4081 struct ath_buf *bf, *bf_next; 4082 struct ieee80211_node *ni = bf_first->bf_node; 4083 struct ath_node *an = ATH_NODE(ni); 4084 int tid = bf_first->bf_state.bfs_tid; 4085 struct ath_tid *atid = &an->an_tid[tid]; 4086 4087 bf = bf_first; 4088 4089 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 4090 4091 /* update incomp */ 4092 while (bf) { 4093 atid->incomp--; 4094 bf = bf->bf_next; 4095 } 4096 4097 if (atid->incomp == 0) { 4098 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4099 "%s: TID %d: cleaned up! resume!\n", 4100 __func__, tid); 4101 atid->cleanup_inprogress = 0; 4102 ath_tx_tid_resume(sc, atid); 4103 } 4104 4105 /* Send BAR if required */ 4106 /* XXX why would we send a BAR when transitioning to non-aggregation? */ 4107 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4108 ath_tx_tid_bar_tx(sc, atid); 4109 4110 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 4111 4112 /* Handle frame completion */ 4113 while (bf) { 4114 bf_next = bf->bf_next; 4115 ath_tx_default_comp(sc, bf, 1); 4116 bf = bf_next; 4117 } 4118 } 4119 4120 /* 4121 * Handle completion of an set of aggregate frames. 4122 * 4123 * XXX for now, simply complete each sub-frame. 4124 * 4125 * Note: the completion handler is the last descriptor in the aggregate, 4126 * not the last descriptor in the first frame. 4127 */ 4128 static void 4129 ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first, 4130 int fail) 4131 { 4132 //struct ath_desc *ds = bf->bf_lastds; 4133 struct ieee80211_node *ni = bf_first->bf_node; 4134 struct ath_node *an = ATH_NODE(ni); 4135 int tid = bf_first->bf_state.bfs_tid; 4136 struct ath_tid *atid = &an->an_tid[tid]; 4137 struct ath_tx_status ts; 4138 struct ieee80211_tx_ampdu *tap; 4139 ath_bufhead bf_q; 4140 ath_bufhead bf_cq; 4141 int seq_st, tx_ok; 4142 int hasba, isaggr; 4143 uint32_t ba[2]; 4144 struct ath_buf *bf, *bf_next; 4145 int ba_index; 4146 int drops = 0; 4147 int nframes = 0, nbad = 0, nf; 4148 int pktlen; 4149 /* XXX there's too much on the stack? */ 4150 struct ath_rc_series rc[ATH_RC_NUM]; 4151 int txseq; 4152 4153 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n", 4154 __func__, atid->hwq_depth); 4155 4156 /* 4157 * Take a copy; this may be needed -after- bf_first 4158 * has been completed and freed. 4159 */ 4160 ts = bf_first->bf_status.ds_txstat; 4161 4162 TAILQ_INIT(&bf_q); 4163 TAILQ_INIT(&bf_cq); 4164 4165 /* The TID state is kept behind the TXQ lock */ 4166 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 4167 4168 atid->hwq_depth--; 4169 if (atid->hwq_depth < 0) 4170 device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n", 4171 __func__, atid->hwq_depth); 4172 4173 /* 4174 * If the TID is filtered, handle completing the filter 4175 * transition before potentially kicking it to the cleanup 4176 * function. 4177 * 4178 * XXX this is duplicate work, ew. 4179 */ 4180 if (atid->isfiltered) 4181 ath_tx_tid_filt_comp_complete(sc, atid); 4182 4183 /* 4184 * Punt cleanup to the relevant function, not our problem now 4185 */ 4186 if (atid->cleanup_inprogress) { 4187 if (atid->isfiltered) 4188 device_printf(sc->sc_dev, 4189 "%s: isfiltered=1, normal_comp?\n", 4190 __func__); 4191 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 4192 ath_tx_comp_cleanup_aggr(sc, bf_first); 4193 return; 4194 } 4195 4196 /* 4197 * If the frame is filtered, transition to filtered frame 4198 * mode and add this to the filtered frame list. 4199 * 4200 * XXX TODO: figure out how this interoperates with 4201 * BAR, pause and cleanup states. 4202 */ 4203 if ((ts.ts_status & HAL_TXERR_FILT) || 4204 (ts.ts_status != 0 && atid->isfiltered)) { 4205 if (fail != 0) 4206 device_printf(sc->sc_dev, 4207 "%s: isfiltered=1, fail=%d\n", __func__, fail); 4208 ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq); 4209 4210 /* Remove from BAW */ 4211 TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) { 4212 if (bf->bf_state.bfs_addedbaw) 4213 drops++; 4214 if (bf->bf_state.bfs_dobaw) { 4215 ath_tx_update_baw(sc, an, atid, bf); 4216 if (! bf->bf_state.bfs_addedbaw) 4217 device_printf(sc->sc_dev, 4218 "%s: wasn't added: seqno %d\n", 4219 __func__, 4220 SEQNO(bf->bf_state.bfs_seqno)); 4221 } 4222 bf->bf_state.bfs_dobaw = 0; 4223 } 4224 /* 4225 * If any intermediate frames in the BAW were dropped when 4226 * handling filtering things, send a BAR. 4227 */ 4228 if (drops) 4229 ath_tx_tid_bar_suspend(sc, atid); 4230 4231 /* 4232 * Finish up by sending a BAR if required and freeing 4233 * the frames outside of the TX lock. 4234 */ 4235 goto finish_send_bar; 4236 } 4237 4238 /* 4239 * XXX for now, use the first frame in the aggregate for 4240 * XXX rate control completion; it's at least consistent. 4241 */ 4242 pktlen = bf_first->bf_state.bfs_pktlen; 4243 4244 /* 4245 * Handle errors first! 4246 * 4247 * Here, handle _any_ error as a "exceeded retries" error. 4248 * Later on (when filtered frames are to be specially handled) 4249 * it'll have to be expanded. 4250 */ 4251 #if 0 4252 if (ts.ts_status & HAL_TXERR_XRETRY) { 4253 #endif 4254 if (ts.ts_status != 0) { 4255 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 4256 ath_tx_comp_aggr_error(sc, bf_first, atid); 4257 return; 4258 } 4259 4260 tap = ath_tx_get_tx_tid(an, tid); 4261 4262 /* 4263 * extract starting sequence and block-ack bitmap 4264 */ 4265 /* XXX endian-ness of seq_st, ba? */ 4266 seq_st = ts.ts_seqnum; 4267 hasba = !! (ts.ts_flags & HAL_TX_BA); 4268 tx_ok = (ts.ts_status == 0); 4269 isaggr = bf_first->bf_state.bfs_aggr; 4270 ba[0] = ts.ts_ba_low; 4271 ba[1] = ts.ts_ba_high; 4272 4273 /* 4274 * Copy the TX completion status and the rate control 4275 * series from the first descriptor, as it may be freed 4276 * before the rate control code can get its grubby fingers 4277 * into things. 4278 */ 4279 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc)); 4280 4281 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4282 "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, " 4283 "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n", 4284 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags, 4285 isaggr, seq_st, hasba, ba[0], ba[1]); 4286 4287 /* Occasionally, the MAC sends a tx status for the wrong TID. */ 4288 if (tid != ts.ts_tid) { 4289 device_printf(sc->sc_dev, "%s: tid %d != hw tid %d\n", 4290 __func__, tid, ts.ts_tid); 4291 tx_ok = 0; 4292 } 4293 4294 /* AR5416 BA bug; this requires an interface reset */ 4295 if (isaggr && tx_ok && (! hasba)) { 4296 device_printf(sc->sc_dev, 4297 "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, " 4298 "seq_st=%d\n", 4299 __func__, hasba, tx_ok, isaggr, seq_st); 4300 /* XXX TODO: schedule an interface reset */ 4301 #ifdef ATH_DEBUG 4302 ath_printtxbuf(sc, bf_first, 4303 sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0); 4304 #endif 4305 } 4306 4307 /* 4308 * Walk the list of frames, figure out which ones were correctly 4309 * sent and which weren't. 4310 */ 4311 bf = bf_first; 4312 nf = bf_first->bf_state.bfs_nframes; 4313 4314 /* bf_first is going to be invalid once this list is walked */ 4315 bf_first = NULL; 4316 4317 /* 4318 * Walk the list of completed frames and determine 4319 * which need to be completed and which need to be 4320 * retransmitted. 4321 * 4322 * For completed frames, the completion functions need 4323 * to be called at the end of this function as the last 4324 * node reference may free the node. 4325 * 4326 * Finally, since the TXQ lock can't be held during the 4327 * completion callback (to avoid lock recursion), 4328 * the completion calls have to be done outside of the 4329 * lock. 4330 */ 4331 while (bf) { 4332 nframes++; 4333 ba_index = ATH_BA_INDEX(seq_st, 4334 SEQNO(bf->bf_state.bfs_seqno)); 4335 bf_next = bf->bf_next; 4336 bf->bf_next = NULL; /* Remove it from the aggr list */ 4337 4338 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4339 "%s: checking bf=%p seqno=%d; ack=%d\n", 4340 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 4341 ATH_BA_ISSET(ba, ba_index)); 4342 4343 if (tx_ok && ATH_BA_ISSET(ba, ba_index)) { 4344 sc->sc_stats.ast_tx_aggr_ok++; 4345 ath_tx_update_baw(sc, an, atid, bf); 4346 bf->bf_state.bfs_dobaw = 0; 4347 if (! bf->bf_state.bfs_addedbaw) 4348 device_printf(sc->sc_dev, 4349 "%s: wasn't added: seqno %d\n", 4350 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4351 bf->bf_next = NULL; 4352 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4353 } else { 4354 sc->sc_stats.ast_tx_aggr_fail++; 4355 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4356 drops++; 4357 bf->bf_next = NULL; 4358 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4359 } 4360 nbad++; 4361 } 4362 bf = bf_next; 4363 } 4364 4365 /* 4366 * Now that the BAW updates have been done, unlock 4367 * 4368 * txseq is grabbed before the lock is released so we 4369 * have a consistent view of what -was- in the BAW. 4370 * Anything after this point will not yet have been 4371 * TXed. 4372 */ 4373 txseq = tap->txa_start; 4374 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 4375 4376 if (nframes != nf) 4377 device_printf(sc->sc_dev, 4378 "%s: num frames seen=%d; bf nframes=%d\n", 4379 __func__, nframes, nf); 4380 4381 /* 4382 * Now we know how many frames were bad, call the rate 4383 * control code. 4384 */ 4385 if (fail == 0) 4386 ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes, 4387 nbad); 4388 4389 /* 4390 * send bar if we dropped any frames 4391 */ 4392 if (drops) { 4393 /* Suspend the TX queue and get ready to send the BAR */ 4394 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 4395 ath_tx_tid_bar_suspend(sc, atid); 4396 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 4397 } 4398 4399 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4400 "%s: txa_start now %d\n", __func__, tap->txa_start); 4401 4402 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 4403 4404 /* Prepend all frames to the beginning of the queue */ 4405 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 4406 TAILQ_REMOVE(&bf_q, bf, bf_list); 4407 ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4408 } 4409 4410 /* 4411 * Reschedule to grab some further frames. 4412 */ 4413 ath_tx_tid_sched(sc, atid); 4414 4415 /* 4416 * If the queue is filtered, re-schedule as required. 4417 * 4418 * This is required as there may be a subsequent TX descriptor 4419 * for this end-node that has CLRDMASK set, so it's quite possible 4420 * that a filtered frame will be followed by a non-filtered 4421 * (complete or otherwise) frame. 4422 * 4423 * XXX should we do this before we complete the frame? 4424 */ 4425 if (atid->isfiltered) 4426 ath_tx_tid_filt_comp_complete(sc, atid); 4427 4428 finish_send_bar: 4429 4430 /* 4431 * Send BAR if required 4432 */ 4433 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4434 ath_tx_tid_bar_tx(sc, atid); 4435 4436 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 4437 4438 /* Do deferred completion */ 4439 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4440 TAILQ_REMOVE(&bf_cq, bf, bf_list); 4441 ath_tx_default_comp(sc, bf, 0); 4442 } 4443 } 4444 4445 /* 4446 * Handle completion of unaggregated frames in an ADDBA 4447 * session. 4448 * 4449 * Fail is set to 1 if the entry is being freed via a call to 4450 * ath_tx_draintxq(). 4451 */ 4452 static void 4453 ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail) 4454 { 4455 struct ieee80211_node *ni = bf->bf_node; 4456 struct ath_node *an = ATH_NODE(ni); 4457 int tid = bf->bf_state.bfs_tid; 4458 struct ath_tid *atid = &an->an_tid[tid]; 4459 struct ath_tx_status ts; 4460 int drops = 0; 4461 4462 /* 4463 * Take a copy of this; filtering/cloning the frame may free the 4464 * bf pointer. 4465 */ 4466 ts = bf->bf_status.ds_txstat; 4467 4468 /* 4469 * Update rate control status here, before we possibly 4470 * punt to retry or cleanup. 4471 * 4472 * Do it outside of the TXQ lock. 4473 */ 4474 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 4475 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 4476 &bf->bf_status.ds_txstat, 4477 bf->bf_state.bfs_pktlen, 4478 1, (ts.ts_status == 0) ? 0 : 1); 4479 4480 /* 4481 * This is called early so atid->hwq_depth can be tracked. 4482 * This unfortunately means that it's released and regrabbed 4483 * during retry and cleanup. That's rather inefficient. 4484 */ 4485 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 4486 4487 if (tid == IEEE80211_NONQOS_TID) 4488 device_printf(sc->sc_dev, "%s: TID=16!\n", __func__); 4489 4490 DPRINTF(sc, ATH_DEBUG_SW_TX, 4491 "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n", 4492 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth, 4493 SEQNO(bf->bf_state.bfs_seqno)); 4494 4495 atid->hwq_depth--; 4496 if (atid->hwq_depth < 0) 4497 device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n", 4498 __func__, atid->hwq_depth); 4499 4500 /* 4501 * If the TID is filtered, handle completing the filter 4502 * transition before potentially kicking it to the cleanup 4503 * function. 4504 */ 4505 if (atid->isfiltered) 4506 ath_tx_tid_filt_comp_complete(sc, atid); 4507 4508 /* 4509 * If a cleanup is in progress, punt to comp_cleanup; 4510 * rather than handling it here. It's thus their 4511 * responsibility to clean up, call the completion 4512 * function in net80211, etc. 4513 */ 4514 if (atid->cleanup_inprogress) { 4515 if (atid->isfiltered) 4516 device_printf(sc->sc_dev, 4517 "%s: isfiltered=1, normal_comp?\n", 4518 __func__); 4519 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 4520 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n", 4521 __func__); 4522 ath_tx_comp_cleanup_unaggr(sc, bf); 4523 return; 4524 } 4525 4526 /* 4527 * XXX TODO: how does cleanup, BAR and filtered frame handling 4528 * overlap? 4529 * 4530 * If the frame is filtered OR if it's any failure but 4531 * the TID is filtered, the frame must be added to the 4532 * filtered frame list. 4533 * 4534 * However - a busy buffer can't be added to the filtered 4535 * list as it will end up being recycled without having 4536 * been made available for the hardware. 4537 */ 4538 if ((ts.ts_status & HAL_TXERR_FILT) || 4539 (ts.ts_status != 0 && atid->isfiltered)) { 4540 int freeframe; 4541 4542 if (fail != 0) 4543 device_printf(sc->sc_dev, 4544 "%s: isfiltered=1, fail=%d\n", 4545 __func__, 4546 fail); 4547 freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf); 4548 if (freeframe) { 4549 /* Remove from BAW */ 4550 if (bf->bf_state.bfs_addedbaw) 4551 drops++; 4552 if (bf->bf_state.bfs_dobaw) { 4553 ath_tx_update_baw(sc, an, atid, bf); 4554 if (! bf->bf_state.bfs_addedbaw) 4555 device_printf(sc->sc_dev, 4556 "%s: wasn't added: seqno %d\n", 4557 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4558 } 4559 bf->bf_state.bfs_dobaw = 0; 4560 } 4561 4562 /* 4563 * If the frame couldn't be filtered, treat it as a drop and 4564 * prepare to send a BAR. 4565 */ 4566 if (freeframe && drops) 4567 ath_tx_tid_bar_suspend(sc, atid); 4568 4569 /* 4570 * Send BAR if required 4571 */ 4572 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4573 ath_tx_tid_bar_tx(sc, atid); 4574 4575 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 4576 /* 4577 * If freeframe is set, then the frame couldn't be 4578 * cloned and bf is still valid. Just complete/free it. 4579 */ 4580 if (freeframe) 4581 ath_tx_default_comp(sc, bf, fail); 4582 4583 4584 return; 4585 } 4586 /* 4587 * Don't bother with the retry check if all frames 4588 * are being failed (eg during queue deletion.) 4589 */ 4590 #if 0 4591 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) { 4592 #endif 4593 if (fail == 0 && ts.ts_status != 0) { 4594 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 4595 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n", 4596 __func__); 4597 ath_tx_aggr_retry_unaggr(sc, bf); 4598 return; 4599 } 4600 4601 /* Success? Complete */ 4602 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n", 4603 __func__, tid, SEQNO(bf->bf_state.bfs_seqno)); 4604 if (bf->bf_state.bfs_dobaw) { 4605 ath_tx_update_baw(sc, an, atid, bf); 4606 bf->bf_state.bfs_dobaw = 0; 4607 if (! bf->bf_state.bfs_addedbaw) 4608 device_printf(sc->sc_dev, 4609 "%s: wasn't added: seqno %d\n", 4610 __func__, SEQNO(bf->bf_state.bfs_seqno)); 4611 } 4612 4613 /* 4614 * If the queue is filtered, re-schedule as required. 4615 * 4616 * This is required as there may be a subsequent TX descriptor 4617 * for this end-node that has CLRDMASK set, so it's quite possible 4618 * that a filtered frame will be followed by a non-filtered 4619 * (complete or otherwise) frame. 4620 * 4621 * XXX should we do this before we complete the frame? 4622 */ 4623 if (atid->isfiltered) 4624 ath_tx_tid_filt_comp_complete(sc, atid); 4625 4626 /* 4627 * Send BAR if required 4628 */ 4629 if (ath_tx_tid_bar_tx_ready(sc, atid)) 4630 ath_tx_tid_bar_tx(sc, atid); 4631 4632 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 4633 4634 ath_tx_default_comp(sc, bf, fail); 4635 /* bf is freed at this point */ 4636 } 4637 4638 void 4639 ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4640 { 4641 if (bf->bf_state.bfs_aggr) 4642 ath_tx_aggr_comp_aggr(sc, bf, fail); 4643 else 4644 ath_tx_aggr_comp_unaggr(sc, bf, fail); 4645 } 4646 4647 /* 4648 * Schedule some packets from the given node/TID to the hardware. 4649 * 4650 * This is the aggregate version. 4651 */ 4652 void 4653 ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an, 4654 struct ath_tid *tid) 4655 { 4656 struct ath_buf *bf; 4657 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 4658 struct ieee80211_tx_ampdu *tap; 4659 ATH_AGGR_STATUS status; 4660 ath_bufhead bf_q; 4661 4662 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid); 4663 ATH_TXQ_LOCK_ASSERT(txq); 4664 4665 tap = ath_tx_get_tx_tid(an, tid->tid); 4666 4667 if (tid->tid == IEEE80211_NONQOS_TID) 4668 device_printf(sc->sc_dev, "%s: called for TID=NONQOS_TID?\n", 4669 __func__); 4670 4671 for (;;) { 4672 status = ATH_AGGR_DONE; 4673 4674 /* 4675 * If the upper layer has paused the TID, don't 4676 * queue any further packets. 4677 * 4678 * This can also occur from the completion task because 4679 * of packet loss; but as its serialised with this code, 4680 * it won't "appear" half way through queuing packets. 4681 */ 4682 if (tid->paused) 4683 break; 4684 4685 bf = ATH_TID_FIRST(tid); 4686 if (bf == NULL) { 4687 break; 4688 } 4689 4690 /* 4691 * If the packet doesn't fall within the BAW (eg a NULL 4692 * data frame), schedule it directly; continue. 4693 */ 4694 if (! bf->bf_state.bfs_dobaw) { 4695 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4696 "%s: non-baw packet\n", 4697 __func__); 4698 ATH_TID_REMOVE(tid, bf, bf_list); 4699 4700 if (bf->bf_state.bfs_nframes > 1) 4701 device_printf(sc->sc_dev, 4702 "%s: aggr=%d, nframes=%d\n", 4703 __func__, 4704 bf->bf_state.bfs_aggr, 4705 bf->bf_state.bfs_nframes); 4706 4707 /* 4708 * This shouldn't happen - such frames shouldn't 4709 * ever have been queued as an aggregate in the 4710 * first place. However, make sure the fields 4711 * are correctly setup just to be totally sure. 4712 */ 4713 bf->bf_state.bfs_aggr = 0; 4714 bf->bf_state.bfs_nframes = 1; 4715 4716 /* Update CLRDMASK just before this frame is queued */ 4717 ath_tx_update_clrdmask(sc, tid, bf); 4718 4719 ath_tx_do_ratelookup(sc, bf); 4720 ath_tx_calc_duration(sc, bf); 4721 ath_tx_calc_protection(sc, bf); 4722 ath_tx_set_rtscts(sc, bf); 4723 ath_tx_rate_fill_rcflags(sc, bf); 4724 ath_tx_setds(sc, bf); 4725 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 4726 4727 sc->sc_aggr_stats.aggr_nonbaw_pkt++; 4728 4729 /* Queue the packet; continue */ 4730 goto queuepkt; 4731 } 4732 4733 TAILQ_INIT(&bf_q); 4734 4735 /* 4736 * Do a rate control lookup on the first frame in the 4737 * list. The rate control code needs that to occur 4738 * before it can determine whether to TX. 4739 * It's inaccurate because the rate control code doesn't 4740 * really "do" aggregate lookups, so it only considers 4741 * the size of the first frame. 4742 */ 4743 ath_tx_do_ratelookup(sc, bf); 4744 bf->bf_state.bfs_rc[3].rix = 0; 4745 bf->bf_state.bfs_rc[3].tries = 0; 4746 4747 ath_tx_calc_duration(sc, bf); 4748 ath_tx_calc_protection(sc, bf); 4749 4750 ath_tx_set_rtscts(sc, bf); 4751 ath_tx_rate_fill_rcflags(sc, bf); 4752 4753 status = ath_tx_form_aggr(sc, an, tid, &bf_q); 4754 4755 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4756 "%s: ath_tx_form_aggr() status=%d\n", __func__, status); 4757 4758 /* 4759 * No frames to be picked up - out of BAW 4760 */ 4761 if (TAILQ_EMPTY(&bf_q)) 4762 break; 4763 4764 /* 4765 * This assumes that the descriptor list in the ath_bufhead 4766 * are already linked together via bf_next pointers. 4767 */ 4768 bf = TAILQ_FIRST(&bf_q); 4769 4770 if (status == ATH_AGGR_8K_LIMITED) 4771 sc->sc_aggr_stats.aggr_rts_aggr_limited++; 4772 4773 /* 4774 * If it's the only frame send as non-aggregate 4775 * assume that ath_tx_form_aggr() has checked 4776 * whether it's in the BAW and added it appropriately. 4777 */ 4778 if (bf->bf_state.bfs_nframes == 1) { 4779 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4780 "%s: single-frame aggregate\n", __func__); 4781 4782 /* Update CLRDMASK just before this frame is queued */ 4783 ath_tx_update_clrdmask(sc, tid, bf); 4784 4785 bf->bf_state.bfs_aggr = 0; 4786 bf->bf_state.bfs_ndelim = 0; 4787 ath_tx_setds(sc, bf); 4788 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 4789 if (status == ATH_AGGR_BAW_CLOSED) 4790 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++; 4791 else 4792 sc->sc_aggr_stats.aggr_single_pkt++; 4793 } else { 4794 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4795 "%s: multi-frame aggregate: %d frames, " 4796 "length %d\n", 4797 __func__, bf->bf_state.bfs_nframes, 4798 bf->bf_state.bfs_al); 4799 bf->bf_state.bfs_aggr = 1; 4800 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++; 4801 sc->sc_aggr_stats.aggr_aggr_pkt++; 4802 4803 /* Update CLRDMASK just before this frame is queued */ 4804 ath_tx_update_clrdmask(sc, tid, bf); 4805 4806 /* 4807 * Calculate the duration/protection as required. 4808 */ 4809 ath_tx_calc_duration(sc, bf); 4810 ath_tx_calc_protection(sc, bf); 4811 4812 /* 4813 * Update the rate and rtscts information based on the 4814 * rate decision made by the rate control code; 4815 * the first frame in the aggregate needs it. 4816 */ 4817 ath_tx_set_rtscts(sc, bf); 4818 4819 /* 4820 * Setup the relevant descriptor fields 4821 * for aggregation. The first descriptor 4822 * already points to the rest in the chain. 4823 */ 4824 ath_tx_setds_11n(sc, bf); 4825 4826 } 4827 queuepkt: 4828 //txq = bf->bf_state.bfs_txq; 4829 4830 /* Set completion handler, multi-frame aggregate or not */ 4831 bf->bf_comp = ath_tx_aggr_comp; 4832 4833 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID) 4834 device_printf(sc->sc_dev, "%s: TID=16?\n", __func__); 4835 4836 /* Punt to txq */ 4837 ath_tx_handoff(sc, txq, bf); 4838 4839 /* Track outstanding buffer count to hardware */ 4840 /* aggregates are "one" buffer */ 4841 tid->hwq_depth++; 4842 4843 /* 4844 * Break out if ath_tx_form_aggr() indicated 4845 * there can't be any further progress (eg BAW is full.) 4846 * Checking for an empty txq is done above. 4847 * 4848 * XXX locking on txq here? 4849 */ 4850 if (txq->axq_aggr_depth >= sc->sc_hwq_limit || 4851 status == ATH_AGGR_BAW_CLOSED) 4852 break; 4853 } 4854 } 4855 4856 /* 4857 * Schedule some packets from the given node/TID to the hardware. 4858 */ 4859 void 4860 ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an, 4861 struct ath_tid *tid) 4862 { 4863 struct ath_buf *bf; 4864 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 4865 4866 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n", 4867 __func__, an, tid->tid); 4868 4869 ATH_TID_LOCK_ASSERT(sc, tid); 4870 4871 /* Check - is AMPDU pending or running? then print out something */ 4872 if (ath_tx_ampdu_pending(sc, an, tid->tid)) 4873 device_printf(sc->sc_dev, "%s: tid=%d, ampdu pending?\n", 4874 __func__, tid->tid); 4875 if (ath_tx_ampdu_running(sc, an, tid->tid)) 4876 device_printf(sc->sc_dev, "%s: tid=%d, ampdu running?\n", 4877 __func__, tid->tid); 4878 4879 for (;;) { 4880 4881 /* 4882 * If the upper layers have paused the TID, don't 4883 * queue any further packets. 4884 */ 4885 if (tid->paused) 4886 break; 4887 4888 bf = ATH_TID_FIRST(tid); 4889 if (bf == NULL) { 4890 break; 4891 } 4892 4893 ATH_TID_REMOVE(tid, bf, bf_list); 4894 4895 KASSERT(txq == bf->bf_state.bfs_txq, ("txqs not equal!\n")); 4896 4897 /* Sanity check! */ 4898 if (tid->tid != bf->bf_state.bfs_tid) { 4899 device_printf(sc->sc_dev, "%s: bfs_tid %d !=" 4900 " tid %d\n", 4901 __func__, bf->bf_state.bfs_tid, tid->tid); 4902 } 4903 /* Normal completion handler */ 4904 bf->bf_comp = ath_tx_normal_comp; 4905 4906 /* 4907 * Override this for now, until the non-aggregate 4908 * completion handler correctly handles software retransmits. 4909 */ 4910 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 4911 4912 /* Update CLRDMASK just before this frame is queued */ 4913 ath_tx_update_clrdmask(sc, tid, bf); 4914 4915 /* Program descriptors + rate control */ 4916 ath_tx_do_ratelookup(sc, bf); 4917 ath_tx_calc_duration(sc, bf); 4918 ath_tx_calc_protection(sc, bf); 4919 ath_tx_set_rtscts(sc, bf); 4920 ath_tx_rate_fill_rcflags(sc, bf); 4921 ath_tx_setds(sc, bf); 4922 4923 /* Track outstanding buffer count to hardware */ 4924 /* aggregates are "one" buffer */ 4925 tid->hwq_depth++; 4926 4927 /* Punt to hardware or software txq */ 4928 ath_tx_handoff(sc, txq, bf); 4929 } 4930 } 4931 4932 /* 4933 * Schedule some packets to the given hardware queue. 4934 * 4935 * This function walks the list of TIDs (ie, ath_node TIDs 4936 * with queued traffic) and attempts to schedule traffic 4937 * from them. 4938 * 4939 * TID scheduling is implemented as a FIFO, with TIDs being 4940 * added to the end of the queue after some frames have been 4941 * scheduled. 4942 */ 4943 void 4944 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq) 4945 { 4946 struct ath_tid *tid, *next, *last; 4947 4948 ATH_TXQ_LOCK_ASSERT(txq); 4949 4950 /* 4951 * Don't schedule if the hardware queue is busy. 4952 * This (hopefully) gives some more time to aggregate 4953 * some packets in the aggregation queue. 4954 */ 4955 if (txq->axq_aggr_depth >= sc->sc_hwq_limit) { 4956 sc->sc_aggr_stats.aggr_sched_nopkt++; 4957 return; 4958 } 4959 4960 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s); 4961 4962 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) { 4963 /* 4964 * Suspend paused queues here; they'll be resumed 4965 * once the addba completes or times out. 4966 */ 4967 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n", 4968 __func__, tid->tid, tid->paused); 4969 ath_tx_tid_unsched(sc, tid); 4970 if (tid->paused) { 4971 continue; 4972 } 4973 if (ath_tx_ampdu_running(sc, tid->an, tid->tid)) 4974 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid); 4975 else 4976 ath_tx_tid_hw_queue_norm(sc, tid->an, tid); 4977 4978 /* Not empty? Re-schedule */ 4979 if (tid->axq_depth != 0) 4980 ath_tx_tid_sched(sc, tid); 4981 4982 /* Give the software queue time to aggregate more packets */ 4983 if (txq->axq_aggr_depth >= sc->sc_hwq_limit) { 4984 break; 4985 } 4986 4987 /* 4988 * If this was the last entry on the original list, stop. 4989 * Otherwise nodes that have been rescheduled onto the end 4990 * of the TID FIFO list will just keep being rescheduled. 4991 */ 4992 if (tid == last) 4993 break; 4994 } 4995 } 4996 4997 /* 4998 * TX addba handling 4999 */ 5000 5001 /* 5002 * Return net80211 TID struct pointer, or NULL for none 5003 */ 5004 struct ieee80211_tx_ampdu * 5005 ath_tx_get_tx_tid(struct ath_node *an, int tid) 5006 { 5007 struct ieee80211_node *ni = &an->an_node; 5008 struct ieee80211_tx_ampdu *tap; 5009 5010 if (tid == IEEE80211_NONQOS_TID) 5011 return NULL; 5012 5013 tap = &ni->ni_tx_ampdu[tid]; 5014 return tap; 5015 } 5016 5017 /* 5018 * Is AMPDU-TX running? 5019 */ 5020 static int 5021 ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid) 5022 { 5023 struct ieee80211_tx_ampdu *tap; 5024 5025 if (tid == IEEE80211_NONQOS_TID) 5026 return 0; 5027 5028 tap = ath_tx_get_tx_tid(an, tid); 5029 if (tap == NULL) 5030 return 0; /* Not valid; default to not running */ 5031 5032 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING); 5033 } 5034 5035 /* 5036 * Is AMPDU-TX negotiation pending? 5037 */ 5038 static int 5039 ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid) 5040 { 5041 struct ieee80211_tx_ampdu *tap; 5042 5043 if (tid == IEEE80211_NONQOS_TID) 5044 return 0; 5045 5046 tap = ath_tx_get_tx_tid(an, tid); 5047 if (tap == NULL) 5048 return 0; /* Not valid; default to not pending */ 5049 5050 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND); 5051 } 5052 5053 /* 5054 * Is AMPDU-TX pending for the given TID? 5055 */ 5056 5057 5058 /* 5059 * Method to handle sending an ADDBA request. 5060 * 5061 * We tap this so the relevant flags can be set to pause the TID 5062 * whilst waiting for the response. 5063 * 5064 * XXX there's no timeout handler we can override? 5065 */ 5066 int 5067 ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5068 int dialogtoken, int baparamset, int batimeout) 5069 { 5070 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5071 int tid = tap->txa_tid; 5072 struct ath_node *an = ATH_NODE(ni); 5073 struct ath_tid *atid = &an->an_tid[tid]; 5074 5075 /* 5076 * XXX danger Will Robinson! 5077 * 5078 * Although the taskqueue may be running and scheduling some more 5079 * packets, these should all be _before_ the addba sequence number. 5080 * However, net80211 will keep self-assigning sequence numbers 5081 * until addba has been negotiated. 5082 * 5083 * In the past, these packets would be "paused" (which still works 5084 * fine, as they're being scheduled to the driver in the same 5085 * serialised method which is calling the addba request routine) 5086 * and when the aggregation session begins, they'll be dequeued 5087 * as aggregate packets and added to the BAW. However, now there's 5088 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these 5089 * packets. Thus they never get included in the BAW tracking and 5090 * this can cause the initial burst of packets after the addba 5091 * negotiation to "hang", as they quickly fall outside the BAW. 5092 * 5093 * The "eventual" solution should be to tag these packets with 5094 * dobaw. Although net80211 has given us a sequence number, 5095 * it'll be "after" the left edge of the BAW and thus it'll 5096 * fall within it. 5097 */ 5098 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 5099 /* 5100 * This is a bit annoying. Until net80211 HT code inherits some 5101 * (any) locking, we may have this called in parallel BUT only 5102 * one response/timeout will be called. Grr. 5103 */ 5104 if (atid->addba_tx_pending == 0) { 5105 ath_tx_tid_pause(sc, atid); 5106 atid->addba_tx_pending = 1; 5107 } 5108 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 5109 5110 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5111 "%s: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n", 5112 __func__, dialogtoken, baparamset, batimeout); 5113 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5114 "%s: txa_start=%d, ni_txseqs=%d\n", 5115 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5116 5117 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 5118 batimeout); 5119 } 5120 5121 /* 5122 * Handle an ADDBA response. 5123 * 5124 * We unpause the queue so TX'ing can resume. 5125 * 5126 * Any packets TX'ed from this point should be "aggregate" (whether 5127 * aggregate or not) so the BAW is updated. 5128 * 5129 * Note! net80211 keeps self-assigning sequence numbers until 5130 * ampdu is negotiated. This means the initially-negotiated BAW left 5131 * edge won't match the ni->ni_txseq. 5132 * 5133 * So, being very dirty, the BAW left edge is "slid" here to match 5134 * ni->ni_txseq. 5135 * 5136 * What likely SHOULD happen is that all packets subsequent to the 5137 * addba request should be tagged as aggregate and queued as non-aggregate 5138 * frames; thus updating the BAW. For now though, I'll just slide the 5139 * window. 5140 */ 5141 int 5142 ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5143 int status, int code, int batimeout) 5144 { 5145 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5146 int tid = tap->txa_tid; 5147 struct ath_node *an = ATH_NODE(ni); 5148 struct ath_tid *atid = &an->an_tid[tid]; 5149 int r; 5150 5151 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5152 "%s: called; status=%d, code=%d, batimeout=%d\n", __func__, 5153 status, code, batimeout); 5154 5155 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5156 "%s: txa_start=%d, ni_txseqs=%d\n", 5157 __func__, tap->txa_start, ni->ni_txseqs[tid]); 5158 5159 /* 5160 * Call this first, so the interface flags get updated 5161 * before the TID is unpaused. Otherwise a race condition 5162 * exists where the unpaused TID still doesn't yet have 5163 * IEEE80211_AGGR_RUNNING set. 5164 */ 5165 r = sc->sc_addba_response(ni, tap, status, code, batimeout); 5166 5167 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 5168 atid->addba_tx_pending = 0; 5169 /* 5170 * XXX dirty! 5171 * Slide the BAW left edge to wherever net80211 left it for us. 5172 * Read above for more information. 5173 */ 5174 tap->txa_start = ni->ni_txseqs[tid]; 5175 ath_tx_tid_resume(sc, atid); 5176 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 5177 return r; 5178 } 5179 5180 5181 /* 5182 * Stop ADDBA on a queue. 5183 * 5184 * This can be called whilst BAR TX is currently active on the queue, 5185 * so make sure this is unblocked before continuing. 5186 */ 5187 void 5188 ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 5189 { 5190 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5191 int tid = tap->txa_tid; 5192 struct ath_node *an = ATH_NODE(ni); 5193 struct ath_tid *atid = &an->an_tid[tid]; 5194 5195 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: called\n", __func__); 5196 5197 /* 5198 * Pause TID traffic early, so there aren't any races 5199 * Unblock the pending BAR held traffic, if it's currently paused. 5200 */ 5201 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 5202 ath_tx_tid_pause(sc, atid); 5203 if (atid->bar_wait) { 5204 /* 5205 * bar_unsuspend() expects bar_tx == 1, as it should be 5206 * called from the TX completion path. This quietens 5207 * the warning. It's cleared for us anyway. 5208 */ 5209 atid->bar_tx = 1; 5210 ath_tx_tid_bar_unsuspend(sc, atid); 5211 } 5212 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 5213 5214 /* There's no need to hold the TXQ lock here */ 5215 sc->sc_addba_stop(ni, tap); 5216 5217 /* 5218 * ath_tx_tid_cleanup will resume the TID if possible, otherwise 5219 * it'll set the cleanup flag, and it'll be unpaused once 5220 * things have been cleaned up. 5221 */ 5222 ath_tx_tid_cleanup(sc, an, tid); 5223 } 5224 5225 /* 5226 * Note: net80211 bar_timeout() doesn't call this function on BAR failure; 5227 * it simply tears down the aggregation session. Ew. 5228 * 5229 * It however will call ieee80211_ampdu_stop() which will call 5230 * ic->ic_addba_stop(). 5231 * 5232 * XXX This uses a hard-coded max BAR count value; the whole 5233 * XXX BAR TX success or failure should be better handled! 5234 */ 5235 void 5236 ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5237 int status) 5238 { 5239 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5240 int tid = tap->txa_tid; 5241 struct ath_node *an = ATH_NODE(ni); 5242 struct ath_tid *atid = &an->an_tid[tid]; 5243 int attempts = tap->txa_attempts; 5244 5245 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 5246 "%s: called; tap=%p, atid=%p, txa_tid=%d, atid->tid=%d, status=%d, attempts=%d\n", 5247 __func__, 5248 tap, 5249 atid, 5250 tap->txa_tid, 5251 atid->tid, 5252 status, 5253 attempts); 5254 5255 /* Note: This may update the BAW details */ 5256 sc->sc_bar_response(ni, tap, status); 5257 5258 /* Unpause the TID */ 5259 /* 5260 * XXX if this is attempt=50, the TID will be downgraded 5261 * XXX to a non-aggregate session. So we must unpause the 5262 * XXX TID here or it'll never be done. 5263 * 5264 * Also, don't call it if bar_tx/bar_wait are 0; something 5265 * has beaten us to the punch? (XXX figure out what?) 5266 */ 5267 if (status == 0 || attempts == 50) { 5268 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 5269 if (atid->bar_tx == 0 || atid->bar_wait == 0) 5270 device_printf(sc->sc_dev, 5271 "%s: huh? bar_tx=%d, bar_wait=%d\n", 5272 __func__, 5273 atid->bar_tx, atid->bar_wait); 5274 else 5275 ath_tx_tid_bar_unsuspend(sc, atid); 5276 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 5277 } 5278 } 5279 5280 /* 5281 * This is called whenever the pending ADDBA request times out. 5282 * Unpause and reschedule the TID. 5283 */ 5284 void 5285 ath_addba_response_timeout(struct ieee80211_node *ni, 5286 struct ieee80211_tx_ampdu *tap) 5287 { 5288 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5289 int tid = tap->txa_tid; 5290 struct ath_node *an = ATH_NODE(ni); 5291 struct ath_tid *atid = &an->an_tid[tid]; 5292 5293 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5294 "%s: called; resuming\n", __func__); 5295 5296 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 5297 atid->addba_tx_pending = 0; 5298 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 5299 5300 /* Note: This updates the aggregate state to (again) pending */ 5301 sc->sc_addba_response_timeout(ni, tap); 5302 5303 /* Unpause the TID; which reschedules it */ 5304 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 5305 ath_tx_tid_resume(sc, atid); 5306 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 5307 } 5308 5309 /* 5310 * Check if a node is asleep or not. 5311 */ 5312 int 5313 ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an) 5314 { 5315 5316 ATH_NODE_LOCK_ASSERT(an); 5317 5318 return (an->an_is_powersave); 5319 } 5320 5321 /* 5322 * Mark a node as currently "in powersaving." 5323 * This suspends all traffic on the node. 5324 * 5325 * This must be called with the node/tx locks free. 5326 * 5327 * XXX TODO: the locking silliness below is due to how the node 5328 * locking currently works. Right now, the node lock is grabbed 5329 * to do rate control lookups and these are done with the TX 5330 * queue lock held. This means the node lock can't be grabbed 5331 * first here or a LOR will occur. 5332 * 5333 * Eventually (hopefully!) the TX path code will only grab 5334 * the TXQ lock when transmitting and the ath_node lock when 5335 * doing node/TID operations. There are other complications - 5336 * the sched/unsched operations involve walking the per-txq 5337 * 'active tid' list and this requires both locks to be held. 5338 */ 5339 void 5340 ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an) 5341 { 5342 struct ath_tid *atid; 5343 struct ath_txq *txq; 5344 int tid; 5345 5346 ATH_NODE_UNLOCK_ASSERT(an); 5347 5348 /* 5349 * It's possible that a parallel call to ath_tx_node_wakeup() 5350 * will unpause these queues. 5351 * 5352 * The node lock can't just be grabbed here, as there's places 5353 * in the driver where the node lock is grabbed _within_ a 5354 * TXQ lock. 5355 * So, we do this delicately and unwind state if needed. 5356 * 5357 * + Pause all the queues 5358 * + Grab the node lock 5359 * + If the queue is already asleep, unpause and quit 5360 * + else just mark as asleep. 5361 * 5362 * A parallel sleep() call will just pause and then 5363 * find they're already paused, so undo it. 5364 * 5365 * A parallel wakeup() call will check if asleep is 1 5366 * and if it's not (ie, it's 0), it'll treat it as already 5367 * being awake. If it's 1, it'll mark it as 0 and then 5368 * unpause everything. 5369 * 5370 * (Talk about a delicate hack.) 5371 */ 5372 5373 /* Suspend all traffic on the node */ 5374 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 5375 atid = &an->an_tid[tid]; 5376 txq = sc->sc_ac2q[atid->ac]; 5377 5378 ATH_TXQ_LOCK(txq); 5379 ath_tx_tid_pause(sc, atid); 5380 ATH_TXQ_UNLOCK(txq); 5381 } 5382 5383 ATH_NODE_LOCK(an); 5384 5385 /* In case of concurrency races from net80211.. */ 5386 if (an->an_is_powersave == 1) { 5387 ATH_NODE_UNLOCK(an); 5388 device_printf(sc->sc_dev, 5389 "%s: an=%p: node was already asleep\n", 5390 __func__, an); 5391 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 5392 atid = &an->an_tid[tid]; 5393 txq = sc->sc_ac2q[atid->ac]; 5394 5395 ATH_TXQ_LOCK(txq); 5396 ath_tx_tid_resume(sc, atid); 5397 ATH_TXQ_UNLOCK(txq); 5398 } 5399 return; 5400 } 5401 5402 /* Mark node as in powersaving */ 5403 an->an_is_powersave = 1; 5404 5405 ATH_NODE_UNLOCK(an); 5406 } 5407 5408 /* 5409 * Mark a node as currently "awake." 5410 * This resumes all traffic to the node. 5411 */ 5412 void 5413 ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an) 5414 { 5415 struct ath_tid *atid; 5416 struct ath_txq *txq; 5417 int tid; 5418 5419 ATH_NODE_UNLOCK_ASSERT(an); 5420 ATH_NODE_LOCK(an); 5421 5422 /* In case of concurrency races from net80211.. */ 5423 if (an->an_is_powersave == 0) { 5424 ATH_NODE_UNLOCK(an); 5425 device_printf(sc->sc_dev, 5426 "%s: an=%p: node was already awake\n", 5427 __func__, an); 5428 return; 5429 } 5430 5431 /* Mark node as awake */ 5432 an->an_is_powersave = 0; 5433 5434 ATH_NODE_UNLOCK(an); 5435 5436 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 5437 atid = &an->an_tid[tid]; 5438 txq = sc->sc_ac2q[atid->ac]; 5439 5440 ATH_TXQ_LOCK(txq); 5441 ath_tx_tid_resume(sc, atid); 5442 ATH_TXQ_UNLOCK(txq); 5443 } 5444 } 5445 5446 static int 5447 ath_legacy_dma_txsetup(struct ath_softc *sc) 5448 { 5449 5450 /* nothing new needed */ 5451 return (0); 5452 } 5453 5454 static int 5455 ath_legacy_dma_txteardown(struct ath_softc *sc) 5456 { 5457 5458 /* nothing new needed */ 5459 return (0); 5460 } 5461 5462 void 5463 ath_xmit_setup_legacy(struct ath_softc *sc) 5464 { 5465 /* 5466 * For now, just set the descriptor length to sizeof(ath_desc); 5467 * worry about extracting the real length out of the HAL later. 5468 */ 5469 sc->sc_tx_desclen = sizeof(struct ath_desc); 5470 sc->sc_tx_statuslen = 0; 5471 sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */ 5472 5473 sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup; 5474 sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown; 5475 sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func; 5476 5477 sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart; 5478 sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff; 5479 5480 sc->sc_tx.xmit_drain = ath_legacy_tx_drain; 5481 } 5482