1 /*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_ath.h" 42 #include "opt_wlan.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/sysctl.h> 47 #include <sys/mbuf.h> 48 #include <sys/malloc.h> 49 #include <sys/lock.h> 50 #include <sys/mutex.h> 51 #include <sys/kernel.h> 52 #include <sys/socket.h> 53 #include <sys/sockio.h> 54 #include <sys/errno.h> 55 #include <sys/callout.h> 56 #include <sys/bus.h> 57 #include <sys/endian.h> 58 #include <sys/kthread.h> 59 #include <sys/taskqueue.h> 60 #include <sys/priv.h> 61 62 #include <machine/bus.h> 63 64 #include <net/if.h> 65 #include <net/if_dl.h> 66 #include <net/if_media.h> 67 #include <net/if_types.h> 68 #include <net/if_arp.h> 69 #include <net/ethernet.h> 70 #include <net/if_llc.h> 71 72 #include <net80211/ieee80211_var.h> 73 #include <net80211/ieee80211_regdomain.h> 74 #ifdef IEEE80211_SUPPORT_SUPERG 75 #include <net80211/ieee80211_superg.h> 76 #endif 77 #ifdef IEEE80211_SUPPORT_TDMA 78 #include <net80211/ieee80211_tdma.h> 79 #endif 80 #include <net80211/ieee80211_ht.h> 81 82 #include <net/bpf.h> 83 84 #ifdef INET 85 #include <netinet/in.h> 86 #include <netinet/if_ether.h> 87 #endif 88 89 #include <dev/ath/if_athvar.h> 90 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 91 #include <dev/ath/ath_hal/ah_diagcodes.h> 92 93 #include <dev/ath/if_ath_debug.h> 94 95 #ifdef ATH_TX99_DIAG 96 #include <dev/ath/ath_tx99/ath_tx99.h> 97 #endif 98 99 #include <dev/ath/if_ath_misc.h> 100 #include <dev/ath/if_ath_tx.h> 101 #include <dev/ath/if_ath_tx_ht.h> 102 103 /* 104 * How many retries to perform in software 105 */ 106 #define SWMAX_RETRIES 10 107 108 static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, 109 int tid); 110 static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, 111 int tid); 112 static int ath_tx_action_frame_override_queue(struct ath_softc *sc, 113 struct ieee80211_node *ni, struct mbuf *m0, int *tid); 114 static int ath_tx_seqno_required(struct ath_softc *sc, 115 struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0); 116 117 /* 118 * Whether to use the 11n rate scenario functions or not 119 */ 120 static inline int 121 ath_tx_is_11n(struct ath_softc *sc) 122 { 123 return (sc->sc_ah->ah_magic == 0x20065416); 124 } 125 126 /* 127 * Obtain the current TID from the given frame. 128 * 129 * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.) 130 * This has implications for which AC/priority the packet is placed 131 * in. 132 */ 133 static int 134 ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0) 135 { 136 const struct ieee80211_frame *wh; 137 int pri = M_WME_GETAC(m0); 138 139 wh = mtod(m0, const struct ieee80211_frame *); 140 if (! IEEE80211_QOS_HAS_SEQ(wh)) 141 return IEEE80211_NONQOS_TID; 142 else 143 return WME_AC_TO_TID(pri); 144 } 145 146 /* 147 * Determine what the correct AC queue for the given frame 148 * should be. 149 * 150 * This code assumes that the TIDs map consistently to 151 * the underlying hardware (or software) ath_txq. 152 * Since the sender may try to set an AC which is 153 * arbitrary, non-QoS TIDs may end up being put on 154 * completely different ACs. There's no way to put a 155 * TID into multiple ath_txq's for scheduling, so 156 * for now we override the AC/TXQ selection and set 157 * non-QOS TID frames into the BE queue. 158 * 159 * This may be completely incorrect - specifically, 160 * some management frames may end up out of order 161 * compared to the QoS traffic they're controlling. 162 * I'll look into this later. 163 */ 164 static int 165 ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0) 166 { 167 const struct ieee80211_frame *wh; 168 int pri = M_WME_GETAC(m0); 169 wh = mtod(m0, const struct ieee80211_frame *); 170 if (IEEE80211_QOS_HAS_SEQ(wh)) 171 return pri; 172 173 return WME_AC_BE; 174 } 175 176 void 177 ath_txfrag_cleanup(struct ath_softc *sc, 178 ath_bufhead *frags, struct ieee80211_node *ni) 179 { 180 struct ath_buf *bf, *next; 181 182 ATH_TXBUF_LOCK_ASSERT(sc); 183 184 TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) { 185 /* NB: bf assumed clean */ 186 TAILQ_REMOVE(frags, bf, bf_list); 187 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 188 ieee80211_node_decref(ni); 189 } 190 } 191 192 /* 193 * Setup xmit of a fragmented frame. Allocate a buffer 194 * for each frag and bump the node reference count to 195 * reflect the held reference to be setup by ath_tx_start. 196 */ 197 int 198 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, 199 struct mbuf *m0, struct ieee80211_node *ni) 200 { 201 struct mbuf *m; 202 struct ath_buf *bf; 203 204 ATH_TXBUF_LOCK(sc); 205 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { 206 bf = _ath_getbuf_locked(sc); 207 if (bf == NULL) { /* out of buffers, cleanup */ 208 device_printf(sc->sc_dev, "%s: no buffer?\n", 209 __func__); 210 ath_txfrag_cleanup(sc, frags, ni); 211 break; 212 } 213 ieee80211_node_incref(ni); 214 TAILQ_INSERT_TAIL(frags, bf, bf_list); 215 } 216 ATH_TXBUF_UNLOCK(sc); 217 218 return !TAILQ_EMPTY(frags); 219 } 220 221 /* 222 * Reclaim mbuf resources. For fragmented frames we 223 * need to claim each frag chained with m_nextpkt. 224 */ 225 void 226 ath_freetx(struct mbuf *m) 227 { 228 struct mbuf *next; 229 230 do { 231 next = m->m_nextpkt; 232 m->m_nextpkt = NULL; 233 m_freem(m); 234 } while ((m = next) != NULL); 235 } 236 237 static int 238 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) 239 { 240 struct mbuf *m; 241 int error; 242 243 /* 244 * Load the DMA map so any coalescing is done. This 245 * also calculates the number of descriptors we need. 246 */ 247 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 248 bf->bf_segs, &bf->bf_nseg, 249 BUS_DMA_NOWAIT); 250 if (error == EFBIG) { 251 /* XXX packet requires too many descriptors */ 252 bf->bf_nseg = ATH_TXDESC+1; 253 } else if (error != 0) { 254 sc->sc_stats.ast_tx_busdma++; 255 ath_freetx(m0); 256 return error; 257 } 258 /* 259 * Discard null packets and check for packets that 260 * require too many TX descriptors. We try to convert 261 * the latter to a cluster. 262 */ 263 if (bf->bf_nseg > ATH_TXDESC) { /* too many desc's, linearize */ 264 sc->sc_stats.ast_tx_linear++; 265 m = m_collapse(m0, M_DONTWAIT, ATH_TXDESC); 266 if (m == NULL) { 267 ath_freetx(m0); 268 sc->sc_stats.ast_tx_nombuf++; 269 return ENOMEM; 270 } 271 m0 = m; 272 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 273 bf->bf_segs, &bf->bf_nseg, 274 BUS_DMA_NOWAIT); 275 if (error != 0) { 276 sc->sc_stats.ast_tx_busdma++; 277 ath_freetx(m0); 278 return error; 279 } 280 KASSERT(bf->bf_nseg <= ATH_TXDESC, 281 ("too many segments after defrag; nseg %u", bf->bf_nseg)); 282 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 283 sc->sc_stats.ast_tx_nodata++; 284 ath_freetx(m0); 285 return EIO; 286 } 287 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", 288 __func__, m0, m0->m_pkthdr.len); 289 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 290 bf->bf_m = m0; 291 292 return 0; 293 } 294 295 /* 296 * Chain together segments+descriptors for a non-11n frame. 297 */ 298 static void 299 ath_tx_chaindesclist(struct ath_softc *sc, struct ath_buf *bf) 300 { 301 struct ath_hal *ah = sc->sc_ah; 302 struct ath_desc *ds, *ds0; 303 int i; 304 305 /* 306 * Fillin the remainder of the descriptor info. 307 */ 308 ds0 = ds = bf->bf_desc; 309 for (i = 0; i < bf->bf_nseg; i++, ds++) { 310 ds->ds_data = bf->bf_segs[i].ds_addr; 311 if (i == bf->bf_nseg - 1) 312 ds->ds_link = 0; 313 else 314 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1); 315 ath_hal_filltxdesc(ah, ds 316 , bf->bf_segs[i].ds_len /* segment length */ 317 , i == 0 /* first segment */ 318 , i == bf->bf_nseg - 1 /* last segment */ 319 , ds0 /* first descriptor */ 320 ); 321 DPRINTF(sc, ATH_DEBUG_XMIT, 322 "%s: %d: %08x %08x %08x %08x %08x %08x\n", 323 __func__, i, ds->ds_link, ds->ds_data, 324 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]); 325 bf->bf_lastds = ds; 326 } 327 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 328 } 329 330 /* 331 * Fill in the descriptor list for a aggregate subframe. 332 * 333 * The subframe is returned with the ds_link field in the last subframe 334 * pointing to 0. 335 */ 336 static void 337 ath_tx_chaindesclist_subframe(struct ath_softc *sc, struct ath_buf *bf) 338 { 339 struct ath_hal *ah = sc->sc_ah; 340 struct ath_desc *ds, *ds0; 341 int i; 342 343 ds0 = ds = bf->bf_desc; 344 345 /* 346 * There's no need to call ath_hal_setupfirsttxdesc here; 347 * That's only going to occur for the first frame in an aggregate. 348 */ 349 for (i = 0; i < bf->bf_nseg; i++, ds++) { 350 ds->ds_data = bf->bf_segs[i].ds_addr; 351 if (i == bf->bf_nseg - 1) 352 ds->ds_link = 0; 353 else 354 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1); 355 356 /* 357 * This performs the setup for an aggregate frame. 358 * This includes enabling the aggregate flags if needed. 359 */ 360 ath_hal_chaintxdesc(ah, ds, 361 bf->bf_state.bfs_pktlen, 362 bf->bf_state.bfs_hdrlen, 363 HAL_PKT_TYPE_AMPDU, /* forces aggregate bits to be set */ 364 bf->bf_state.bfs_keyix, 365 0, /* cipher, calculated from keyix */ 366 bf->bf_state.bfs_ndelim, 367 bf->bf_segs[i].ds_len, /* segment length */ 368 i == 0, /* first segment */ 369 i == bf->bf_nseg - 1, /* last segment */ 370 bf->bf_next == NULL /* last sub-frame in aggr */ 371 ); 372 373 DPRINTF(sc, ATH_DEBUG_XMIT, 374 "%s: %d: %08x %08x %08x %08x %08x %08x\n", 375 __func__, i, ds->ds_link, ds->ds_data, 376 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]); 377 bf->bf_lastds = ds; 378 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 379 BUS_DMASYNC_PREWRITE); 380 } 381 } 382 383 /* 384 * Setup segments+descriptors for an 11n aggregate. 385 * bf_first is the first buffer in the aggregate. 386 * The descriptor list must already been linked together using 387 * bf->bf_next. 388 */ 389 static void 390 ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first) 391 { 392 struct ath_buf *bf, *bf_prev = NULL; 393 394 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n", 395 __func__, bf_first->bf_state.bfs_nframes, 396 bf_first->bf_state.bfs_al); 397 398 /* 399 * Setup all descriptors of all subframes. 400 */ 401 bf = bf_first; 402 while (bf != NULL) { 403 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 404 "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n", 405 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen, 406 SEQNO(bf->bf_state.bfs_seqno)); 407 408 /* Sub-frame setup */ 409 ath_tx_chaindesclist_subframe(sc, bf); 410 411 /* 412 * Link the last descriptor of the previous frame 413 * to the beginning descriptor of this frame. 414 */ 415 if (bf_prev != NULL) 416 bf_prev->bf_lastds->ds_link = bf->bf_daddr; 417 418 /* Save a copy so we can link the next descriptor in */ 419 bf_prev = bf; 420 bf = bf->bf_next; 421 } 422 423 /* 424 * Setup first descriptor of first frame. 425 * chaintxdesc() overwrites the descriptor entries; 426 * setupfirsttxdesc() merges in things. 427 * Otherwise various fields aren't set correctly (eg flags). 428 */ 429 ath_hal_setupfirsttxdesc(sc->sc_ah, 430 bf_first->bf_desc, 431 bf_first->bf_state.bfs_al, 432 bf_first->bf_state.bfs_txflags | HAL_TXDESC_INTREQ, 433 bf_first->bf_state.bfs_txpower, 434 bf_first->bf_state.bfs_txrate0, 435 bf_first->bf_state.bfs_try0, 436 bf_first->bf_state.bfs_txantenna, 437 bf_first->bf_state.bfs_ctsrate, 438 bf_first->bf_state.bfs_ctsduration); 439 440 /* 441 * Setup the last descriptor in the list. 442 * bf_prev points to the last; bf is NULL here. 443 */ 444 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_desc, 445 bf_first->bf_desc); 446 447 /* 448 * Set the first descriptor bf_lastds field to point to 449 * the last descriptor in the last subframe, that's where 450 * the status update will occur. 451 */ 452 bf_first->bf_lastds = bf_prev->bf_lastds; 453 454 /* 455 * And bf_last in the first descriptor points to the end of 456 * the aggregate list. 457 */ 458 bf_first->bf_last = bf_prev; 459 460 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__); 461 } 462 463 static void 464 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, 465 struct ath_buf *bf) 466 { 467 ATH_TXQ_LOCK_ASSERT(txq); 468 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 469 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 470 if (txq->axq_link != NULL) { 471 struct ath_buf *last = ATH_TXQ_LAST(txq, axq_q_s); 472 struct ieee80211_frame *wh; 473 474 /* mark previous frame */ 475 wh = mtod(last->bf_m, struct ieee80211_frame *); 476 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 477 bus_dmamap_sync(sc->sc_dmat, last->bf_dmamap, 478 BUS_DMASYNC_PREWRITE); 479 480 /* link descriptor */ 481 *txq->axq_link = bf->bf_daddr; 482 } 483 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 484 txq->axq_link = &bf->bf_lastds->ds_link; 485 } 486 487 /* 488 * Hand-off packet to a hardware queue. 489 */ 490 static void 491 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, 492 struct ath_buf *bf) 493 { 494 struct ath_hal *ah = sc->sc_ah; 495 496 /* 497 * Insert the frame on the outbound list and pass it on 498 * to the hardware. Multicast frames buffered for power 499 * save stations and transmit from the CAB queue are stored 500 * on a s/w only queue and loaded on to the CAB queue in 501 * the SWBA handler since frames only go out on DTIM and 502 * to avoid possible races. 503 */ 504 ATH_TXQ_LOCK_ASSERT(txq); 505 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 506 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 507 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ, 508 ("ath_tx_handoff_hw called for mcast queue")); 509 510 #if 0 511 /* 512 * This causes a LOR. Find out where the PCU lock is being 513 * held whilst the TXQ lock is grabbed - that shouldn't 514 * be occuring. 515 */ 516 ATH_PCU_LOCK(sc); 517 if (sc->sc_inreset_cnt) { 518 ATH_PCU_UNLOCK(sc); 519 DPRINTF(sc, ATH_DEBUG_RESET, 520 "%s: called with sc_in_reset != 0\n", 521 __func__); 522 DPRINTF(sc, ATH_DEBUG_XMIT, 523 "%s: queued: TXDP[%u] = %p (%p) depth %d\n", 524 __func__, txq->axq_qnum, 525 (caddr_t)bf->bf_daddr, bf->bf_desc, 526 txq->axq_depth); 527 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 528 if (bf->bf_state.bfs_aggr) 529 txq->axq_aggr_depth++; 530 /* 531 * There's no need to update axq_link; the hardware 532 * is in reset and once the reset is complete, any 533 * non-empty queues will simply have DMA restarted. 534 */ 535 return; 536 } 537 ATH_PCU_UNLOCK(sc); 538 #endif 539 540 /* For now, so not to generate whitespace diffs */ 541 if (1) { 542 #ifdef IEEE80211_SUPPORT_TDMA 543 int qbusy; 544 545 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 546 qbusy = ath_hal_txqenabled(ah, txq->axq_qnum); 547 if (txq->axq_link == NULL) { 548 /* 549 * Be careful writing the address to TXDP. If 550 * the tx q is enabled then this write will be 551 * ignored. Normally this is not an issue but 552 * when tdma is in use and the q is beacon gated 553 * this race can occur. If the q is busy then 554 * defer the work to later--either when another 555 * packet comes along or when we prepare a beacon 556 * frame at SWBA. 557 */ 558 if (!qbusy) { 559 ath_hal_puttxbuf(ah, txq->axq_qnum, 560 bf->bf_daddr); 561 txq->axq_flags &= ~ATH_TXQ_PUTPENDING; 562 DPRINTF(sc, ATH_DEBUG_XMIT, 563 "%s: TXDP[%u] = %p (%p) depth %d\n", 564 __func__, txq->axq_qnum, 565 (caddr_t)bf->bf_daddr, bf->bf_desc, 566 txq->axq_depth); 567 } else { 568 txq->axq_flags |= ATH_TXQ_PUTPENDING; 569 DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT, 570 "%s: Q%u busy, defer enable\n", __func__, 571 txq->axq_qnum); 572 } 573 } else { 574 *txq->axq_link = bf->bf_daddr; 575 DPRINTF(sc, ATH_DEBUG_XMIT, 576 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 577 txq->axq_qnum, txq->axq_link, 578 (caddr_t)bf->bf_daddr, bf->bf_desc, 579 txq->axq_depth); 580 if ((txq->axq_flags & ATH_TXQ_PUTPENDING) && !qbusy) { 581 /* 582 * The q was busy when we previously tried 583 * to write the address of the first buffer 584 * in the chain. Since it's not busy now 585 * handle this chore. We are certain the 586 * buffer at the front is the right one since 587 * axq_link is NULL only when the buffer list 588 * is/was empty. 589 */ 590 ath_hal_puttxbuf(ah, txq->axq_qnum, 591 TAILQ_FIRST(&txq->axq_q)->bf_daddr); 592 txq->axq_flags &= ~ATH_TXQ_PUTPENDING; 593 DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT, 594 "%s: Q%u restarted\n", __func__, 595 txq->axq_qnum); 596 } 597 } 598 #else 599 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 600 if (txq->axq_link == NULL) { 601 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 602 DPRINTF(sc, ATH_DEBUG_XMIT, 603 "%s: TXDP[%u] = %p (%p) depth %d\n", 604 __func__, txq->axq_qnum, 605 (caddr_t)bf->bf_daddr, bf->bf_desc, 606 txq->axq_depth); 607 } else { 608 *txq->axq_link = bf->bf_daddr; 609 DPRINTF(sc, ATH_DEBUG_XMIT, 610 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 611 txq->axq_qnum, txq->axq_link, 612 (caddr_t)bf->bf_daddr, bf->bf_desc, 613 txq->axq_depth); 614 } 615 #endif /* IEEE80211_SUPPORT_TDMA */ 616 if (bf->bf_state.bfs_aggr) 617 txq->axq_aggr_depth++; 618 txq->axq_link = &bf->bf_lastds->ds_link; 619 ath_hal_txstart(ah, txq->axq_qnum); 620 } 621 } 622 623 /* 624 * Restart TX DMA for the given TXQ. 625 * 626 * This must be called whether the queue is empty or not. 627 */ 628 void 629 ath_txq_restart_dma(struct ath_softc *sc, struct ath_txq *txq) 630 { 631 struct ath_hal *ah = sc->sc_ah; 632 struct ath_buf *bf, *bf_last; 633 634 ATH_TXQ_LOCK_ASSERT(txq); 635 636 /* This is always going to be cleared, empty or not */ 637 txq->axq_flags &= ~ATH_TXQ_PUTPENDING; 638 639 /* XXX make this ATH_TXQ_FIRST */ 640 bf = TAILQ_FIRST(&txq->axq_q); 641 bf_last = ATH_TXQ_LAST(txq, axq_q_s); 642 643 if (bf == NULL) 644 return; 645 646 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 647 txq->axq_link = &bf_last->bf_lastds->ds_link; 648 ath_hal_txstart(ah, txq->axq_qnum); 649 } 650 651 /* 652 * Hand off a packet to the hardware (or mcast queue.) 653 * 654 * The relevant hardware txq should be locked. 655 */ 656 static void 657 ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) 658 { 659 ATH_TXQ_LOCK_ASSERT(txq); 660 661 if (txq->axq_qnum == ATH_TXQ_SWQ) 662 ath_tx_handoff_mcast(sc, txq, bf); 663 else 664 ath_tx_handoff_hw(sc, txq, bf); 665 } 666 667 static int 668 ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni, 669 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, 670 int *keyix) 671 { 672 DPRINTF(sc, ATH_DEBUG_XMIT, 673 "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n", 674 __func__, 675 *hdrlen, 676 *pktlen, 677 isfrag, 678 iswep, 679 m0); 680 681 if (iswep) { 682 const struct ieee80211_cipher *cip; 683 struct ieee80211_key *k; 684 685 /* 686 * Construct the 802.11 header+trailer for an encrypted 687 * frame. The only reason this can fail is because of an 688 * unknown or unsupported cipher/key type. 689 */ 690 k = ieee80211_crypto_encap(ni, m0); 691 if (k == NULL) { 692 /* 693 * This can happen when the key is yanked after the 694 * frame was queued. Just discard the frame; the 695 * 802.11 layer counts failures and provides 696 * debugging/diagnostics. 697 */ 698 return (0); 699 } 700 /* 701 * Adjust the packet + header lengths for the crypto 702 * additions and calculate the h/w key index. When 703 * a s/w mic is done the frame will have had any mic 704 * added to it prior to entry so m0->m_pkthdr.len will 705 * account for it. Otherwise we need to add it to the 706 * packet length. 707 */ 708 cip = k->wk_cipher; 709 (*hdrlen) += cip->ic_header; 710 (*pktlen) += cip->ic_header + cip->ic_trailer; 711 /* NB: frags always have any TKIP MIC done in s/w */ 712 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) 713 (*pktlen) += cip->ic_miclen; 714 (*keyix) = k->wk_keyix; 715 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { 716 /* 717 * Use station key cache slot, if assigned. 718 */ 719 (*keyix) = ni->ni_ucastkey.wk_keyix; 720 if ((*keyix) == IEEE80211_KEYIX_NONE) 721 (*keyix) = HAL_TXKEYIX_INVALID; 722 } else 723 (*keyix) = HAL_TXKEYIX_INVALID; 724 725 return (1); 726 } 727 728 /* 729 * Calculate whether interoperability protection is required for 730 * this frame. 731 * 732 * This requires the rate control information be filled in, 733 * as the protection requirement depends upon the current 734 * operating mode / PHY. 735 */ 736 static void 737 ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf) 738 { 739 struct ieee80211_frame *wh; 740 uint8_t rix; 741 uint16_t flags; 742 int shortPreamble; 743 const HAL_RATE_TABLE *rt = sc->sc_currates; 744 struct ifnet *ifp = sc->sc_ifp; 745 struct ieee80211com *ic = ifp->if_l2com; 746 747 flags = bf->bf_state.bfs_txflags; 748 rix = bf->bf_state.bfs_rc[0].rix; 749 shortPreamble = bf->bf_state.bfs_shpream; 750 wh = mtod(bf->bf_m, struct ieee80211_frame *); 751 752 /* 753 * If 802.11g protection is enabled, determine whether 754 * to use RTS/CTS or just CTS. Note that this is only 755 * done for OFDM unicast frames. 756 */ 757 if ((ic->ic_flags & IEEE80211_F_USEPROT) && 758 rt->info[rix].phy == IEEE80211_T_OFDM && 759 (flags & HAL_TXDESC_NOACK) == 0) { 760 bf->bf_state.bfs_doprot = 1; 761 /* XXX fragments must use CCK rates w/ protection */ 762 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { 763 flags |= HAL_TXDESC_RTSENA; 764 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { 765 flags |= HAL_TXDESC_CTSENA; 766 } 767 /* 768 * For frags it would be desirable to use the 769 * highest CCK rate for RTS/CTS. But stations 770 * farther away may detect it at a lower CCK rate 771 * so use the configured protection rate instead 772 * (for now). 773 */ 774 sc->sc_stats.ast_tx_protect++; 775 } 776 777 /* 778 * If 11n protection is enabled and it's a HT frame, 779 * enable RTS. 780 * 781 * XXX ic_htprotmode or ic_curhtprotmode? 782 * XXX should it_htprotmode only matter if ic_curhtprotmode 783 * XXX indicates it's not a HT pure environment? 784 */ 785 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && 786 rt->info[rix].phy == IEEE80211_T_HT && 787 (flags & HAL_TXDESC_NOACK) == 0) { 788 flags |= HAL_TXDESC_RTSENA; 789 sc->sc_stats.ast_tx_htprotect++; 790 } 791 bf->bf_state.bfs_txflags = flags; 792 } 793 794 /* 795 * Update the frame duration given the currently selected rate. 796 * 797 * This also updates the frame duration value, so it will require 798 * a DMA flush. 799 */ 800 static void 801 ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf) 802 { 803 struct ieee80211_frame *wh; 804 uint8_t rix; 805 uint16_t flags; 806 int shortPreamble; 807 struct ath_hal *ah = sc->sc_ah; 808 const HAL_RATE_TABLE *rt = sc->sc_currates; 809 int isfrag = bf->bf_m->m_flags & M_FRAG; 810 811 flags = bf->bf_state.bfs_txflags; 812 rix = bf->bf_state.bfs_rc[0].rix; 813 shortPreamble = bf->bf_state.bfs_shpream; 814 wh = mtod(bf->bf_m, struct ieee80211_frame *); 815 816 /* 817 * Calculate duration. This logically belongs in the 802.11 818 * layer but it lacks sufficient information to calculate it. 819 */ 820 if ((flags & HAL_TXDESC_NOACK) == 0 && 821 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 822 u_int16_t dur; 823 if (shortPreamble) 824 dur = rt->info[rix].spAckDuration; 825 else 826 dur = rt->info[rix].lpAckDuration; 827 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 828 dur += dur; /* additional SIFS+ACK */ 829 KASSERT(bf->bf_m->m_nextpkt != NULL, ("no fragment")); 830 /* 831 * Include the size of next fragment so NAV is 832 * updated properly. The last fragment uses only 833 * the ACK duration 834 */ 835 dur += ath_hal_computetxtime(ah, rt, 836 bf->bf_m->m_nextpkt->m_pkthdr.len, 837 rix, shortPreamble); 838 } 839 if (isfrag) { 840 /* 841 * Force hardware to use computed duration for next 842 * fragment by disabling multi-rate retry which updates 843 * duration based on the multi-rate duration table. 844 */ 845 bf->bf_state.bfs_ismrr = 0; 846 bf->bf_state.bfs_try0 = ATH_TXMGTTRY; 847 /* XXX update bfs_rc[0].try? */ 848 } 849 850 /* Update the duration field itself */ 851 *(u_int16_t *)wh->i_dur = htole16(dur); 852 } 853 } 854 855 static uint8_t 856 ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt, 857 int cix, int shortPreamble) 858 { 859 uint8_t ctsrate; 860 861 /* 862 * CTS transmit rate is derived from the transmit rate 863 * by looking in the h/w rate table. We must also factor 864 * in whether or not a short preamble is to be used. 865 */ 866 /* NB: cix is set above where RTS/CTS is enabled */ 867 KASSERT(cix != 0xff, ("cix not setup")); 868 ctsrate = rt->info[cix].rateCode; 869 870 /* XXX this should only matter for legacy rates */ 871 if (shortPreamble) 872 ctsrate |= rt->info[cix].shortPreamble; 873 874 return (ctsrate); 875 } 876 877 /* 878 * Calculate the RTS/CTS duration for legacy frames. 879 */ 880 static int 881 ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix, 882 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt, 883 int flags) 884 { 885 int ctsduration = 0; 886 887 /* This mustn't be called for HT modes */ 888 if (rt->info[cix].phy == IEEE80211_T_HT) { 889 printf("%s: HT rate where it shouldn't be (0x%x)\n", 890 __func__, rt->info[cix].rateCode); 891 return (-1); 892 } 893 894 /* 895 * Compute the transmit duration based on the frame 896 * size and the size of an ACK frame. We call into the 897 * HAL to do the computation since it depends on the 898 * characteristics of the actual PHY being used. 899 * 900 * NB: CTS is assumed the same size as an ACK so we can 901 * use the precalculated ACK durations. 902 */ 903 if (shortPreamble) { 904 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 905 ctsduration += rt->info[cix].spAckDuration; 906 ctsduration += ath_hal_computetxtime(ah, 907 rt, pktlen, rix, AH_TRUE); 908 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 909 ctsduration += rt->info[rix].spAckDuration; 910 } else { 911 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 912 ctsduration += rt->info[cix].lpAckDuration; 913 ctsduration += ath_hal_computetxtime(ah, 914 rt, pktlen, rix, AH_FALSE); 915 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 916 ctsduration += rt->info[rix].lpAckDuration; 917 } 918 919 return (ctsduration); 920 } 921 922 /* 923 * Update the given ath_buf with updated rts/cts setup and duration 924 * values. 925 * 926 * To support rate lookups for each software retry, the rts/cts rate 927 * and cts duration must be re-calculated. 928 * 929 * This function assumes the RTS/CTS flags have been set as needed; 930 * mrr has been disabled; and the rate control lookup has been done. 931 * 932 * XXX TODO: MRR need only be disabled for the pre-11n NICs. 933 * XXX The 11n NICs support per-rate RTS/CTS configuration. 934 */ 935 static void 936 ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf) 937 { 938 uint16_t ctsduration = 0; 939 uint8_t ctsrate = 0; 940 uint8_t rix = bf->bf_state.bfs_rc[0].rix; 941 uint8_t cix = 0; 942 const HAL_RATE_TABLE *rt = sc->sc_currates; 943 944 /* 945 * No RTS/CTS enabled? Don't bother. 946 */ 947 if ((bf->bf_state.bfs_txflags & 948 (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) { 949 /* XXX is this really needed? */ 950 bf->bf_state.bfs_ctsrate = 0; 951 bf->bf_state.bfs_ctsduration = 0; 952 return; 953 } 954 955 /* 956 * If protection is enabled, use the protection rix control 957 * rate. Otherwise use the rate0 control rate. 958 */ 959 if (bf->bf_state.bfs_doprot) 960 rix = sc->sc_protrix; 961 else 962 rix = bf->bf_state.bfs_rc[0].rix; 963 964 /* 965 * If the raw path has hard-coded ctsrate0 to something, 966 * use it. 967 */ 968 if (bf->bf_state.bfs_ctsrate0 != 0) 969 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0); 970 else 971 /* Control rate from above */ 972 cix = rt->info[rix].controlRate; 973 974 /* Calculate the rtscts rate for the given cix */ 975 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix, 976 bf->bf_state.bfs_shpream); 977 978 /* The 11n chipsets do ctsduration calculations for you */ 979 if (! ath_tx_is_11n(sc)) 980 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix, 981 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, 982 rt, bf->bf_state.bfs_txflags); 983 984 /* Squirrel away in ath_buf */ 985 bf->bf_state.bfs_ctsrate = ctsrate; 986 bf->bf_state.bfs_ctsduration = ctsduration; 987 988 /* 989 * Must disable multi-rate retry when using RTS/CTS. 990 * XXX TODO: only for pre-11n NICs. 991 */ 992 bf->bf_state.bfs_ismrr = 0; 993 bf->bf_state.bfs_try0 = 994 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */ 995 } 996 997 /* 998 * Setup the descriptor chain for a normal or fast-frame 999 * frame. 1000 */ 1001 static void 1002 ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf) 1003 { 1004 struct ath_desc *ds = bf->bf_desc; 1005 struct ath_hal *ah = sc->sc_ah; 1006 1007 ath_hal_setuptxdesc(ah, ds 1008 , bf->bf_state.bfs_pktlen /* packet length */ 1009 , bf->bf_state.bfs_hdrlen /* header length */ 1010 , bf->bf_state.bfs_atype /* Atheros packet type */ 1011 , bf->bf_state.bfs_txpower /* txpower */ 1012 , bf->bf_state.bfs_txrate0 1013 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 1014 , bf->bf_state.bfs_keyix /* key cache index */ 1015 , bf->bf_state.bfs_txantenna /* antenna mode */ 1016 , bf->bf_state.bfs_txflags /* flags */ 1017 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 1018 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 1019 ); 1020 1021 /* 1022 * This will be overriden when the descriptor chain is written. 1023 */ 1024 bf->bf_lastds = ds; 1025 bf->bf_last = bf; 1026 1027 /* XXX TODO: Setup descriptor chain */ 1028 } 1029 1030 /* 1031 * Do a rate lookup. 1032 * 1033 * This performs a rate lookup for the given ath_buf only if it's required. 1034 * Non-data frames and raw frames don't require it. 1035 * 1036 * This populates the primary and MRR entries; MRR values are 1037 * then disabled later on if something requires it (eg RTS/CTS on 1038 * pre-11n chipsets. 1039 * 1040 * This needs to be done before the RTS/CTS fields are calculated 1041 * as they may depend upon the rate chosen. 1042 */ 1043 static void 1044 ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf) 1045 { 1046 uint8_t rate, rix; 1047 int try0; 1048 1049 if (! bf->bf_state.bfs_doratelookup) 1050 return; 1051 1052 /* Get rid of any previous state */ 1053 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1054 1055 ATH_NODE_LOCK(ATH_NODE(bf->bf_node)); 1056 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream, 1057 bf->bf_state.bfs_pktlen, &rix, &try0, &rate); 1058 1059 /* In case MRR is disabled, make sure rc[0] is setup correctly */ 1060 bf->bf_state.bfs_rc[0].rix = rix; 1061 bf->bf_state.bfs_rc[0].ratecode = rate; 1062 bf->bf_state.bfs_rc[0].tries = try0; 1063 1064 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY) 1065 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix, 1066 bf->bf_state.bfs_rc); 1067 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node)); 1068 1069 sc->sc_txrix = rix; /* for LED blinking */ 1070 sc->sc_lastdatarix = rix; /* for fast frames */ 1071 bf->bf_state.bfs_try0 = try0; 1072 bf->bf_state.bfs_txrate0 = rate; 1073 } 1074 1075 /* 1076 * Set the rate control fields in the given descriptor based on 1077 * the bf_state fields and node state. 1078 * 1079 * The bfs fields should already be set with the relevant rate 1080 * control information, including whether MRR is to be enabled. 1081 * 1082 * Since the FreeBSD HAL currently sets up the first TX rate 1083 * in ath_hal_setuptxdesc(), this will setup the MRR 1084 * conditionally for the pre-11n chips, and call ath_buf_set_rate 1085 * unconditionally for 11n chips. These require the 11n rate 1086 * scenario to be set if MCS rates are enabled, so it's easier 1087 * to just always call it. The caller can then only set rates 2, 3 1088 * and 4 if multi-rate retry is needed. 1089 */ 1090 static void 1091 ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 1092 struct ath_buf *bf) 1093 { 1094 struct ath_rc_series *rc = bf->bf_state.bfs_rc; 1095 1096 /* If mrr is disabled, blank tries 1, 2, 3 */ 1097 if (! bf->bf_state.bfs_ismrr) 1098 rc[1].tries = rc[2].tries = rc[3].tries = 0; 1099 1100 /* 1101 * Always call - that way a retried descriptor will 1102 * have the MRR fields overwritten. 1103 * 1104 * XXX TODO: see if this is really needed - setting up 1105 * the first descriptor should set the MRR fields to 0 1106 * for us anyway. 1107 */ 1108 if (ath_tx_is_11n(sc)) { 1109 ath_buf_set_rate(sc, ni, bf); 1110 } else { 1111 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc 1112 , rc[1].ratecode, rc[1].tries 1113 , rc[2].ratecode, rc[2].tries 1114 , rc[3].ratecode, rc[3].tries 1115 ); 1116 } 1117 } 1118 1119 /* 1120 * Transmit the given frame to the hardware. 1121 * 1122 * The frame must already be setup; rate control must already have 1123 * been done. 1124 * 1125 * XXX since the TXQ lock is being held here (and I dislike holding 1126 * it for this long when not doing software aggregation), later on 1127 * break this function into "setup_normal" and "xmit_normal". The 1128 * lock only needs to be held for the ath_tx_handoff call. 1129 */ 1130 static void 1131 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq, 1132 struct ath_buf *bf) 1133 { 1134 1135 ATH_TXQ_LOCK_ASSERT(txq); 1136 1137 /* Setup the descriptor before handoff */ 1138 ath_tx_do_ratelookup(sc, bf); 1139 ath_tx_calc_duration(sc, bf); 1140 ath_tx_calc_protection(sc, bf); 1141 ath_tx_set_rtscts(sc, bf); 1142 ath_tx_rate_fill_rcflags(sc, bf); 1143 ath_tx_setds(sc, bf); 1144 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 1145 ath_tx_chaindesclist(sc, bf); 1146 1147 /* Hand off to hardware */ 1148 ath_tx_handoff(sc, txq, bf); 1149 } 1150 1151 1152 1153 static int 1154 ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni, 1155 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq) 1156 { 1157 struct ieee80211vap *vap = ni->ni_vap; 1158 struct ath_hal *ah = sc->sc_ah; 1159 struct ifnet *ifp = sc->sc_ifp; 1160 struct ieee80211com *ic = ifp->if_l2com; 1161 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; 1162 int error, iswep, ismcast, isfrag, ismrr; 1163 int keyix, hdrlen, pktlen, try0 = 0; 1164 u_int8_t rix = 0, txrate = 0; 1165 struct ath_desc *ds; 1166 struct ieee80211_frame *wh; 1167 u_int subtype, flags; 1168 HAL_PKT_TYPE atype; 1169 const HAL_RATE_TABLE *rt; 1170 HAL_BOOL shortPreamble; 1171 struct ath_node *an; 1172 u_int pri; 1173 1174 wh = mtod(m0, struct ieee80211_frame *); 1175 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; 1176 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1177 isfrag = m0->m_flags & M_FRAG; 1178 hdrlen = ieee80211_anyhdrsize(wh); 1179 /* 1180 * Packet length must not include any 1181 * pad bytes; deduct them here. 1182 */ 1183 pktlen = m0->m_pkthdr.len - (hdrlen & 3); 1184 1185 /* Handle encryption twiddling if needed */ 1186 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, 1187 &pktlen, &keyix)) { 1188 ath_freetx(m0); 1189 return EIO; 1190 } 1191 1192 /* packet header may have moved, reset our local pointer */ 1193 wh = mtod(m0, struct ieee80211_frame *); 1194 1195 pktlen += IEEE80211_CRC_LEN; 1196 1197 /* 1198 * Load the DMA map so any coalescing is done. This 1199 * also calculates the number of descriptors we need. 1200 */ 1201 error = ath_tx_dmasetup(sc, bf, m0); 1202 if (error != 0) 1203 return error; 1204 bf->bf_node = ni; /* NB: held reference */ 1205 m0 = bf->bf_m; /* NB: may have changed */ 1206 wh = mtod(m0, struct ieee80211_frame *); 1207 1208 /* setup descriptors */ 1209 ds = bf->bf_desc; 1210 rt = sc->sc_currates; 1211 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1212 1213 /* 1214 * NB: the 802.11 layer marks whether or not we should 1215 * use short preamble based on the current mode and 1216 * negotiated parameters. 1217 */ 1218 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 1219 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 1220 shortPreamble = AH_TRUE; 1221 sc->sc_stats.ast_tx_shortpre++; 1222 } else { 1223 shortPreamble = AH_FALSE; 1224 } 1225 1226 an = ATH_NODE(ni); 1227 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 1228 ismrr = 0; /* default no multi-rate retry*/ 1229 pri = M_WME_GETAC(m0); /* honor classification */ 1230 /* XXX use txparams instead of fixed values */ 1231 /* 1232 * Calculate Atheros packet type from IEEE80211 packet header, 1233 * setup for rate calculations, and select h/w transmit queue. 1234 */ 1235 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 1236 case IEEE80211_FC0_TYPE_MGT: 1237 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1238 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 1239 atype = HAL_PKT_TYPE_BEACON; 1240 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 1241 atype = HAL_PKT_TYPE_PROBE_RESP; 1242 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 1243 atype = HAL_PKT_TYPE_ATIM; 1244 else 1245 atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 1246 rix = an->an_mgmtrix; 1247 txrate = rt->info[rix].rateCode; 1248 if (shortPreamble) 1249 txrate |= rt->info[rix].shortPreamble; 1250 try0 = ATH_TXMGTTRY; 1251 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1252 break; 1253 case IEEE80211_FC0_TYPE_CTL: 1254 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 1255 rix = an->an_mgmtrix; 1256 txrate = rt->info[rix].rateCode; 1257 if (shortPreamble) 1258 txrate |= rt->info[rix].shortPreamble; 1259 try0 = ATH_TXMGTTRY; 1260 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1261 break; 1262 case IEEE80211_FC0_TYPE_DATA: 1263 atype = HAL_PKT_TYPE_NORMAL; /* default */ 1264 /* 1265 * Data frames: multicast frames go out at a fixed rate, 1266 * EAPOL frames use the mgmt frame rate; otherwise consult 1267 * the rate control module for the rate to use. 1268 */ 1269 if (ismcast) { 1270 rix = an->an_mcastrix; 1271 txrate = rt->info[rix].rateCode; 1272 if (shortPreamble) 1273 txrate |= rt->info[rix].shortPreamble; 1274 try0 = 1; 1275 } else if (m0->m_flags & M_EAPOL) { 1276 /* XXX? maybe always use long preamble? */ 1277 rix = an->an_mgmtrix; 1278 txrate = rt->info[rix].rateCode; 1279 if (shortPreamble) 1280 txrate |= rt->info[rix].shortPreamble; 1281 try0 = ATH_TXMAXTRY; /* XXX?too many? */ 1282 } else { 1283 /* 1284 * Do rate lookup on each TX, rather than using 1285 * the hard-coded TX information decided here. 1286 */ 1287 ismrr = 1; 1288 bf->bf_state.bfs_doratelookup = 1; 1289 } 1290 if (cap->cap_wmeParams[pri].wmep_noackPolicy) 1291 flags |= HAL_TXDESC_NOACK; 1292 break; 1293 default: 1294 if_printf(ifp, "bogus frame type 0x%x (%s)\n", 1295 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 1296 /* XXX statistic */ 1297 ath_freetx(m0); 1298 return EIO; 1299 } 1300 1301 /* Check if the TXQ wouldn't match what the hardware TXQ is! */ 1302 if (txq != sc->sc_ac2q[pri]) { 1303 device_printf(sc->sc_dev, 1304 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n", 1305 __func__, 1306 txq, 1307 txq->axq_qnum, 1308 pri, 1309 sc->sc_ac2q[pri], 1310 sc->sc_ac2q[pri]->axq_qnum); 1311 } 1312 1313 /* 1314 * Calculate miscellaneous flags. 1315 */ 1316 if (ismcast) { 1317 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 1318 } else if (pktlen > vap->iv_rtsthreshold && 1319 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { 1320 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 1321 sc->sc_stats.ast_tx_rts++; 1322 } 1323 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ 1324 sc->sc_stats.ast_tx_noack++; 1325 #ifdef IEEE80211_SUPPORT_TDMA 1326 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) { 1327 DPRINTF(sc, ATH_DEBUG_TDMA, 1328 "%s: discard frame, ACK required w/ TDMA\n", __func__); 1329 sc->sc_stats.ast_tdma_ack++; 1330 ath_freetx(m0); 1331 return EIO; 1332 } 1333 #endif 1334 1335 /* 1336 * Determine if a tx interrupt should be generated for 1337 * this descriptor. We take a tx interrupt to reap 1338 * descriptors when the h/w hits an EOL condition or 1339 * when the descriptor is specifically marked to generate 1340 * an interrupt. We periodically mark descriptors in this 1341 * way to insure timely replenishing of the supply needed 1342 * for sending frames. Defering interrupts reduces system 1343 * load and potentially allows more concurrent work to be 1344 * done but if done to aggressively can cause senders to 1345 * backup. 1346 * 1347 * NB: use >= to deal with sc_txintrperiod changing 1348 * dynamically through sysctl. 1349 */ 1350 if (flags & HAL_TXDESC_INTREQ) { 1351 txq->axq_intrcnt = 0; 1352 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 1353 flags |= HAL_TXDESC_INTREQ; 1354 txq->axq_intrcnt = 0; 1355 } 1356 1357 /* This point forward is actual TX bits */ 1358 1359 /* 1360 * At this point we are committed to sending the frame 1361 * and we don't need to look at m_nextpkt; clear it in 1362 * case this frame is part of frag chain. 1363 */ 1364 m0->m_nextpkt = NULL; 1365 1366 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 1367 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 1368 sc->sc_hwmap[rix].ieeerate, -1); 1369 1370 if (ieee80211_radiotap_active_vap(vap)) { 1371 u_int64_t tsf = ath_hal_gettsf64(ah); 1372 1373 sc->sc_tx_th.wt_tsf = htole64(tsf); 1374 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 1375 if (iswep) 1376 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 1377 if (isfrag) 1378 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 1379 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 1380 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 1381 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 1382 1383 ieee80211_radiotap_tx(vap, m0); 1384 } 1385 1386 /* Blank the legacy rate array */ 1387 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1388 1389 /* 1390 * ath_buf_set_rate needs at least one rate/try to setup 1391 * the rate scenario. 1392 */ 1393 bf->bf_state.bfs_rc[0].rix = rix; 1394 bf->bf_state.bfs_rc[0].tries = try0; 1395 bf->bf_state.bfs_rc[0].ratecode = txrate; 1396 1397 /* Store the decided rate index values away */ 1398 bf->bf_state.bfs_pktlen = pktlen; 1399 bf->bf_state.bfs_hdrlen = hdrlen; 1400 bf->bf_state.bfs_atype = atype; 1401 bf->bf_state.bfs_txpower = ni->ni_txpower; 1402 bf->bf_state.bfs_txrate0 = txrate; 1403 bf->bf_state.bfs_try0 = try0; 1404 bf->bf_state.bfs_keyix = keyix; 1405 bf->bf_state.bfs_txantenna = sc->sc_txantenna; 1406 bf->bf_state.bfs_txflags = flags; 1407 bf->bf_state.bfs_shpream = shortPreamble; 1408 1409 /* XXX this should be done in ath_tx_setrate() */ 1410 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */ 1411 bf->bf_state.bfs_ctsrate = 0; /* calculated later */ 1412 bf->bf_state.bfs_ctsduration = 0; 1413 bf->bf_state.bfs_ismrr = ismrr; 1414 1415 return 0; 1416 } 1417 1418 /* 1419 * Direct-dispatch the current frame to the hardware. 1420 * 1421 * This can be called by the net80211 code. 1422 * 1423 * XXX what about locking? Or, push the seqno assign into the 1424 * XXX aggregate scheduler so its serialised? 1425 */ 1426 int 1427 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, 1428 struct ath_buf *bf, struct mbuf *m0) 1429 { 1430 struct ieee80211vap *vap = ni->ni_vap; 1431 struct ath_vap *avp = ATH_VAP(vap); 1432 int r = 0; 1433 u_int pri; 1434 int tid; 1435 struct ath_txq *txq; 1436 int ismcast; 1437 const struct ieee80211_frame *wh; 1438 int is_ampdu, is_ampdu_tx, is_ampdu_pending; 1439 //ieee80211_seq seqno; 1440 uint8_t type, subtype; 1441 1442 /* 1443 * Determine the target hardware queue. 1444 * 1445 * For multicast frames, the txq gets overridden appropriately 1446 * depending upon the state of PS. 1447 * 1448 * For any other frame, we do a TID/QoS lookup inside the frame 1449 * to see what the TID should be. If it's a non-QoS frame, the 1450 * AC and TID are overridden. The TID/TXQ code assumes the 1451 * TID is on a predictable hardware TXQ, so we don't support 1452 * having a node TID queued to multiple hardware TXQs. 1453 * This may change in the future but would require some locking 1454 * fudgery. 1455 */ 1456 pri = ath_tx_getac(sc, m0); 1457 tid = ath_tx_gettid(sc, m0); 1458 1459 txq = sc->sc_ac2q[pri]; 1460 wh = mtod(m0, struct ieee80211_frame *); 1461 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1462 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1463 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1464 1465 /* 1466 * Enforce how deep the multicast queue can grow. 1467 * 1468 * XXX duplicated in ath_raw_xmit(). 1469 */ 1470 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1471 ATH_TXQ_LOCK(sc->sc_cabq); 1472 1473 if (sc->sc_cabq->axq_depth > sc->sc_txq_mcastq_maxdepth) { 1474 sc->sc_stats.ast_tx_mcastq_overflow++; 1475 r = ENOBUFS; 1476 } 1477 1478 ATH_TXQ_UNLOCK(sc->sc_cabq); 1479 1480 if (r != 0) { 1481 m_freem(m0); 1482 return r; 1483 } 1484 } 1485 1486 /* A-MPDU TX */ 1487 is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid); 1488 is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid); 1489 is_ampdu = is_ampdu_tx | is_ampdu_pending; 1490 1491 DPRINTF(sc, ATH_DEBUG_SW_TX, 1492 "%s: bf=%p, tid=%d, ac=%d, is_ampdu=%d\n", 1493 __func__, bf, tid, pri, is_ampdu); 1494 1495 /* 1496 * When servicing one or more stations in power-save mode 1497 * (or) if there is some mcast data waiting on the mcast 1498 * queue (to prevent out of order delivery) multicast frames 1499 * must be bufferd until after the beacon. 1500 * 1501 * TODO: we should lock the mcastq before we check the length. 1502 */ 1503 if (ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) 1504 txq = &avp->av_mcastq; 1505 1506 /* Do the generic frame setup */ 1507 /* XXX should just bzero the bf_state? */ 1508 bf->bf_state.bfs_dobaw = 0; 1509 bf->bf_state.bfs_seqno_assigned = 0; 1510 bf->bf_state.bfs_need_seqno = 0; 1511 bf->bf_state.bfs_seqno = -1; /* XXX debugging */ 1512 1513 /* A-MPDU TX? Manually set sequence number */ 1514 /* Don't do it whilst pending; the net80211 layer still assigns them */ 1515 /* XXX do we need locking here? */ 1516 if (is_ampdu_tx) { 1517 ATH_TXQ_LOCK(txq); 1518 /* 1519 * Always call; this function will 1520 * handle making sure that null data frames 1521 * don't get a sequence number from the current 1522 * TID and thus mess with the BAW. 1523 */ 1524 //seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0); 1525 if (ath_tx_seqno_required(sc, ni, bf, m0)) { 1526 bf->bf_state.bfs_dobaw = 1; 1527 bf->bf_state.bfs_need_seqno = 1; 1528 } 1529 ATH_TXQ_UNLOCK(txq); 1530 } else { 1531 /* No AMPDU TX, we've been assigned a sequence number. */ 1532 if (IEEE80211_QOS_HAS_SEQ(wh)) { 1533 bf->bf_state.bfs_seqno_assigned = 1; 1534 /* XXX we should store the frag+seqno in bfs_seqno */ 1535 bf->bf_state.bfs_seqno = 1536 M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT; 1537 } 1538 } 1539 1540 /* 1541 * If needed, the sequence number has been assigned. 1542 * Squirrel it away somewhere easy to get to. 1543 */ 1544 //bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT; 1545 1546 /* Is ampdu pending? fetch the seqno and print it out */ 1547 if (is_ampdu_pending) 1548 DPRINTF(sc, ATH_DEBUG_SW_TX, 1549 "%s: tid %d: ampdu pending, seqno %d\n", 1550 __func__, tid, M_SEQNO_GET(m0)); 1551 1552 /* This also sets up the DMA map */ 1553 r = ath_tx_normal_setup(sc, ni, bf, m0, txq); 1554 1555 if (r != 0) 1556 return r; 1557 1558 /* At this point m0 could have changed! */ 1559 m0 = bf->bf_m; 1560 1561 DPRINTF(sc, ATH_DEBUG_SW_TX, 1562 "%s: DONE: bf=%p, tid=%d, ac=%d, is_ampdu=%d, dobaw=%d, seqno=%d\n", 1563 __func__, bf, tid, pri, is_ampdu, bf->bf_state.bfs_dobaw, M_SEQNO_GET(m0)); 1564 1565 #if 1 1566 /* 1567 * If it's a multicast frame, do a direct-dispatch to the 1568 * destination hardware queue. Don't bother software 1569 * queuing it. 1570 */ 1571 /* 1572 * If it's a BAR frame, do a direct dispatch to the 1573 * destination hardware queue. Don't bother software 1574 * queuing it, as the TID will now be paused. 1575 * Sending a BAR frame can occur from the net80211 txa timer 1576 * (ie, retries) or from the ath txtask (completion call.) 1577 * It queues directly to hardware because the TID is paused 1578 * at this point (and won't be unpaused until the BAR has 1579 * either been TXed successfully or max retries has been 1580 * reached.) 1581 */ 1582 if (txq == &avp->av_mcastq) { 1583 DPRINTF(sc, ATH_DEBUG_SW_TX, 1584 "%s: bf=%p: mcastq: TX'ing\n", __func__, bf); 1585 ATH_TXQ_LOCK(txq); 1586 ath_tx_xmit_normal(sc, txq, bf); 1587 ATH_TXQ_UNLOCK(txq); 1588 } else if (type == IEEE80211_FC0_TYPE_CTL && 1589 subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1590 DPRINTF(sc, ATH_DEBUG_SW_TX, 1591 "%s: BAR: TX'ing direct\n", __func__); 1592 ATH_TXQ_LOCK(txq); 1593 ath_tx_xmit_normal(sc, txq, bf); 1594 ATH_TXQ_UNLOCK(txq); 1595 } else { 1596 /* add to software queue */ 1597 DPRINTF(sc, ATH_DEBUG_SW_TX, 1598 "%s: bf=%p: swq: TX'ing\n", __func__, bf); 1599 ath_tx_swq(sc, ni, txq, bf); 1600 } 1601 #else 1602 /* 1603 * For now, since there's no software queue, 1604 * direct-dispatch to the hardware. 1605 */ 1606 ATH_TXQ_LOCK(txq); 1607 ath_tx_xmit_normal(sc, txq, bf); 1608 ATH_TXQ_UNLOCK(txq); 1609 #endif 1610 1611 return 0; 1612 } 1613 1614 static int 1615 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, 1616 struct ath_buf *bf, struct mbuf *m0, 1617 const struct ieee80211_bpf_params *params) 1618 { 1619 struct ifnet *ifp = sc->sc_ifp; 1620 struct ieee80211com *ic = ifp->if_l2com; 1621 struct ath_hal *ah = sc->sc_ah; 1622 struct ieee80211vap *vap = ni->ni_vap; 1623 int error, ismcast, ismrr; 1624 int keyix, hdrlen, pktlen, try0, txantenna; 1625 u_int8_t rix, txrate; 1626 struct ieee80211_frame *wh; 1627 u_int flags; 1628 HAL_PKT_TYPE atype; 1629 const HAL_RATE_TABLE *rt; 1630 struct ath_desc *ds; 1631 u_int pri; 1632 int o_tid = -1; 1633 int do_override; 1634 1635 wh = mtod(m0, struct ieee80211_frame *); 1636 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1637 hdrlen = ieee80211_anyhdrsize(wh); 1638 /* 1639 * Packet length must not include any 1640 * pad bytes; deduct them here. 1641 */ 1642 /* XXX honor IEEE80211_BPF_DATAPAD */ 1643 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; 1644 1645 1646 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n", 1647 __func__, ismcast); 1648 1649 /* Handle encryption twiddling if needed */ 1650 if (! ath_tx_tag_crypto(sc, ni, 1651 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, 1652 &hdrlen, &pktlen, &keyix)) { 1653 ath_freetx(m0); 1654 return EIO; 1655 } 1656 /* packet header may have moved, reset our local pointer */ 1657 wh = mtod(m0, struct ieee80211_frame *); 1658 1659 /* Do the generic frame setup */ 1660 /* XXX should just bzero the bf_state? */ 1661 bf->bf_state.bfs_dobaw = 0; 1662 1663 error = ath_tx_dmasetup(sc, bf, m0); 1664 if (error != 0) 1665 return error; 1666 m0 = bf->bf_m; /* NB: may have changed */ 1667 wh = mtod(m0, struct ieee80211_frame *); 1668 bf->bf_node = ni; /* NB: held reference */ 1669 1670 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 1671 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1672 if (params->ibp_flags & IEEE80211_BPF_RTS) 1673 flags |= HAL_TXDESC_RTSENA; 1674 else if (params->ibp_flags & IEEE80211_BPF_CTS) { 1675 /* XXX assume 11g/11n protection? */ 1676 bf->bf_state.bfs_doprot = 1; 1677 flags |= HAL_TXDESC_CTSENA; 1678 } 1679 /* XXX leave ismcast to injector? */ 1680 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) 1681 flags |= HAL_TXDESC_NOACK; 1682 1683 rt = sc->sc_currates; 1684 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1685 rix = ath_tx_findrix(sc, params->ibp_rate0); 1686 txrate = rt->info[rix].rateCode; 1687 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 1688 txrate |= rt->info[rix].shortPreamble; 1689 sc->sc_txrix = rix; 1690 try0 = params->ibp_try0; 1691 ismrr = (params->ibp_try1 != 0); 1692 txantenna = params->ibp_pri >> 2; 1693 if (txantenna == 0) /* XXX? */ 1694 txantenna = sc->sc_txantenna; 1695 1696 /* 1697 * Since ctsrate is fixed, store it away for later 1698 * use when the descriptor fields are being set. 1699 */ 1700 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) 1701 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate; 1702 1703 pri = params->ibp_pri & 3; 1704 /* Override pri if the frame isn't a QoS one */ 1705 if (! IEEE80211_QOS_HAS_SEQ(wh)) 1706 pri = ath_tx_getac(sc, m0); 1707 1708 /* 1709 * NB: we mark all packets as type PSPOLL so the h/w won't 1710 * set the sequence number, duration, etc. 1711 */ 1712 atype = HAL_PKT_TYPE_PSPOLL; 1713 1714 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 1715 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, 1716 sc->sc_hwmap[rix].ieeerate, -1); 1717 1718 if (ieee80211_radiotap_active_vap(vap)) { 1719 u_int64_t tsf = ath_hal_gettsf64(ah); 1720 1721 sc->sc_tx_th.wt_tsf = htole64(tsf); 1722 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 1723 if (wh->i_fc[1] & IEEE80211_FC1_WEP) 1724 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 1725 if (m0->m_flags & M_FRAG) 1726 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 1727 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 1728 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 1729 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 1730 1731 ieee80211_radiotap_tx(vap, m0); 1732 } 1733 1734 /* 1735 * Formulate first tx descriptor with tx controls. 1736 */ 1737 ds = bf->bf_desc; 1738 /* XXX check return value? */ 1739 1740 /* Store the decided rate index values away */ 1741 bf->bf_state.bfs_pktlen = pktlen; 1742 bf->bf_state.bfs_hdrlen = hdrlen; 1743 bf->bf_state.bfs_atype = atype; 1744 bf->bf_state.bfs_txpower = params->ibp_power; 1745 bf->bf_state.bfs_txrate0 = txrate; 1746 bf->bf_state.bfs_try0 = try0; 1747 bf->bf_state.bfs_keyix = keyix; 1748 bf->bf_state.bfs_txantenna = txantenna; 1749 bf->bf_state.bfs_txflags = flags; 1750 bf->bf_state.bfs_shpream = 1751 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE); 1752 1753 /* XXX this should be done in ath_tx_setrate() */ 1754 bf->bf_state.bfs_ctsrate = 0; 1755 bf->bf_state.bfs_ctsduration = 0; 1756 bf->bf_state.bfs_ismrr = ismrr; 1757 1758 /* Blank the legacy rate array */ 1759 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1760 1761 bf->bf_state.bfs_rc[0].rix = 1762 ath_tx_findrix(sc, params->ibp_rate0); 1763 bf->bf_state.bfs_rc[0].tries = try0; 1764 bf->bf_state.bfs_rc[0].ratecode = txrate; 1765 1766 if (ismrr) { 1767 int rix; 1768 1769 rix = ath_tx_findrix(sc, params->ibp_rate1); 1770 bf->bf_state.bfs_rc[1].rix = rix; 1771 bf->bf_state.bfs_rc[1].tries = params->ibp_try1; 1772 1773 rix = ath_tx_findrix(sc, params->ibp_rate2); 1774 bf->bf_state.bfs_rc[2].rix = rix; 1775 bf->bf_state.bfs_rc[2].tries = params->ibp_try2; 1776 1777 rix = ath_tx_findrix(sc, params->ibp_rate3); 1778 bf->bf_state.bfs_rc[3].rix = rix; 1779 bf->bf_state.bfs_rc[3].tries = params->ibp_try3; 1780 } 1781 /* 1782 * All the required rate control decisions have been made; 1783 * fill in the rc flags. 1784 */ 1785 ath_tx_rate_fill_rcflags(sc, bf); 1786 1787 /* NB: no buffered multicast in power save support */ 1788 1789 /* XXX If it's an ADDBA, override the correct queue */ 1790 do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid); 1791 1792 /* Map ADDBA to the correct priority */ 1793 if (do_override) { 1794 #if 0 1795 device_printf(sc->sc_dev, 1796 "%s: overriding tid %d pri %d -> %d\n", 1797 __func__, o_tid, pri, TID_TO_WME_AC(o_tid)); 1798 #endif 1799 pri = TID_TO_WME_AC(o_tid); 1800 } 1801 1802 /* 1803 * If we're overiding the ADDBA destination, dump directly 1804 * into the hardware queue, right after any pending 1805 * frames to that node are. 1806 */ 1807 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n", 1808 __func__, do_override); 1809 1810 if (do_override) { 1811 ATH_TXQ_LOCK(sc->sc_ac2q[pri]); 1812 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 1813 ATH_TXQ_UNLOCK(sc->sc_ac2q[pri]); 1814 } else { 1815 /* Queue to software queue */ 1816 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], bf); 1817 } 1818 1819 return 0; 1820 } 1821 1822 /* 1823 * Send a raw frame. 1824 * 1825 * This can be called by net80211. 1826 */ 1827 int 1828 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 1829 const struct ieee80211_bpf_params *params) 1830 { 1831 struct ieee80211com *ic = ni->ni_ic; 1832 struct ifnet *ifp = ic->ic_ifp; 1833 struct ath_softc *sc = ifp->if_softc; 1834 struct ath_buf *bf; 1835 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 1836 int error = 0; 1837 1838 ATH_PCU_LOCK(sc); 1839 if (sc->sc_inreset_cnt > 0) { 1840 device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; bailing\n", 1841 __func__); 1842 error = EIO; 1843 ATH_PCU_UNLOCK(sc); 1844 goto bad0; 1845 } 1846 sc->sc_txstart_cnt++; 1847 ATH_PCU_UNLOCK(sc); 1848 1849 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) { 1850 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__, 1851 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ? 1852 "!running" : "invalid"); 1853 m_freem(m); 1854 error = ENETDOWN; 1855 goto bad; 1856 } 1857 1858 /* 1859 * Enforce how deep the multicast queue can grow. 1860 * 1861 * XXX duplicated in ath_tx_start(). 1862 */ 1863 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1864 ATH_TXQ_LOCK(sc->sc_cabq); 1865 1866 if (sc->sc_cabq->axq_depth > sc->sc_txq_mcastq_maxdepth) { 1867 sc->sc_stats.ast_tx_mcastq_overflow++; 1868 error = ENOBUFS; 1869 } 1870 1871 ATH_TXQ_UNLOCK(sc->sc_cabq); 1872 1873 if (error != 0) { 1874 m_freem(m); 1875 goto bad; 1876 } 1877 } 1878 1879 /* 1880 * Grab a TX buffer and associated resources. 1881 */ 1882 bf = ath_getbuf(sc); 1883 if (bf == NULL) { 1884 sc->sc_stats.ast_tx_nobuf++; 1885 m_freem(m); 1886 error = ENOBUFS; 1887 goto bad; 1888 } 1889 1890 if (params == NULL) { 1891 /* 1892 * Legacy path; interpret frame contents to decide 1893 * precisely how to send the frame. 1894 */ 1895 if (ath_tx_start(sc, ni, bf, m)) { 1896 error = EIO; /* XXX */ 1897 goto bad2; 1898 } 1899 } else { 1900 /* 1901 * Caller supplied explicit parameters to use in 1902 * sending the frame. 1903 */ 1904 if (ath_tx_raw_start(sc, ni, bf, m, params)) { 1905 error = EIO; /* XXX */ 1906 goto bad2; 1907 } 1908 } 1909 sc->sc_wd_timer = 5; 1910 ifp->if_opackets++; 1911 sc->sc_stats.ast_tx_raw++; 1912 1913 ATH_PCU_LOCK(sc); 1914 sc->sc_txstart_cnt--; 1915 ATH_PCU_UNLOCK(sc); 1916 1917 return 0; 1918 bad2: 1919 ATH_TXBUF_LOCK(sc); 1920 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 1921 ATH_TXBUF_UNLOCK(sc); 1922 bad: 1923 ATH_PCU_LOCK(sc); 1924 sc->sc_txstart_cnt--; 1925 ATH_PCU_UNLOCK(sc); 1926 bad0: 1927 ifp->if_oerrors++; 1928 sc->sc_stats.ast_tx_raw_fail++; 1929 ieee80211_free_node(ni); 1930 1931 return error; 1932 } 1933 1934 /* Some helper functions */ 1935 1936 /* 1937 * ADDBA (and potentially others) need to be placed in the same 1938 * hardware queue as the TID/node it's relating to. This is so 1939 * it goes out after any pending non-aggregate frames to the 1940 * same node/TID. 1941 * 1942 * If this isn't done, the ADDBA can go out before the frames 1943 * queued in hardware. Even though these frames have a sequence 1944 * number -earlier- than the ADDBA can be transmitted (but 1945 * no frames whose sequence numbers are after the ADDBA should 1946 * be!) they'll arrive after the ADDBA - and the receiving end 1947 * will simply drop them as being out of the BAW. 1948 * 1949 * The frames can't be appended to the TID software queue - it'll 1950 * never be sent out. So these frames have to be directly 1951 * dispatched to the hardware, rather than queued in software. 1952 * So if this function returns true, the TXQ has to be 1953 * overridden and it has to be directly dispatched. 1954 * 1955 * It's a dirty hack, but someone's gotta do it. 1956 */ 1957 1958 /* 1959 * XXX doesn't belong here! 1960 */ 1961 static int 1962 ieee80211_is_action(struct ieee80211_frame *wh) 1963 { 1964 /* Type: Management frame? */ 1965 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 1966 IEEE80211_FC0_TYPE_MGT) 1967 return 0; 1968 1969 /* Subtype: Action frame? */ 1970 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) != 1971 IEEE80211_FC0_SUBTYPE_ACTION) 1972 return 0; 1973 1974 return 1; 1975 } 1976 1977 #define MS(_v, _f) (((_v) & _f) >> _f##_S) 1978 /* 1979 * Return an alternate TID for ADDBA request frames. 1980 * 1981 * Yes, this likely should be done in the net80211 layer. 1982 */ 1983 static int 1984 ath_tx_action_frame_override_queue(struct ath_softc *sc, 1985 struct ieee80211_node *ni, 1986 struct mbuf *m0, int *tid) 1987 { 1988 struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *); 1989 struct ieee80211_action_ba_addbarequest *ia; 1990 uint8_t *frm; 1991 uint16_t baparamset; 1992 1993 /* Not action frame? Bail */ 1994 if (! ieee80211_is_action(wh)) 1995 return 0; 1996 1997 /* XXX Not needed for frames we send? */ 1998 #if 0 1999 /* Correct length? */ 2000 if (! ieee80211_parse_action(ni, m)) 2001 return 0; 2002 #endif 2003 2004 /* Extract out action frame */ 2005 frm = (u_int8_t *)&wh[1]; 2006 ia = (struct ieee80211_action_ba_addbarequest *) frm; 2007 2008 /* Not ADDBA? Bail */ 2009 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA) 2010 return 0; 2011 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST) 2012 return 0; 2013 2014 /* Extract TID, return it */ 2015 baparamset = le16toh(ia->rq_baparamset); 2016 *tid = (int) MS(baparamset, IEEE80211_BAPS_TID); 2017 2018 return 1; 2019 } 2020 #undef MS 2021 2022 /* Per-node software queue operations */ 2023 2024 /* 2025 * Add the current packet to the given BAW. 2026 * It is assumed that the current packet 2027 * 2028 * + fits inside the BAW; 2029 * + already has had a sequence number allocated. 2030 * 2031 * Since the BAW status may be modified by both the ath task and 2032 * the net80211/ifnet contexts, the TID must be locked. 2033 */ 2034 void 2035 ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an, 2036 struct ath_tid *tid, struct ath_buf *bf) 2037 { 2038 int index, cindex; 2039 struct ieee80211_tx_ampdu *tap; 2040 2041 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 2042 2043 if (bf->bf_state.bfs_isretried) 2044 return; 2045 2046 /* 2047 * If this occurs we're in a lot of trouble. We should try to 2048 * recover from this without the session hanging? 2049 */ 2050 if (! bf->bf_state.bfs_seqno_assigned) { 2051 device_printf(sc->sc_dev, 2052 "%s: bf=%p, seqno_assigned is 0?!\n", __func__, bf); 2053 return; 2054 } 2055 2056 tap = ath_tx_get_tx_tid(an, tid->tid); 2057 2058 if (bf->bf_state.bfs_addedbaw) 2059 device_printf(sc->sc_dev, 2060 "%s: re-added? bf=%p, tid=%d, seqno %d; window %d:%d; " 2061 "baw head=%d tail=%d\n", 2062 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2063 tap->txa_start, tap->txa_wnd, tid->baw_head, 2064 tid->baw_tail); 2065 2066 /* 2067 * Verify that the given sequence number is not outside of the 2068 * BAW. Complain loudly if that's the case. 2069 */ 2070 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2071 SEQNO(bf->bf_state.bfs_seqno))) { 2072 device_printf(sc->sc_dev, 2073 "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; " 2074 "baw head=%d tail=%d\n", 2075 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2076 tap->txa_start, tap->txa_wnd, tid->baw_head, 2077 tid->baw_tail); 2078 2079 } 2080 2081 /* 2082 * ni->ni_txseqs[] is the currently allocated seqno. 2083 * the txa state contains the current baw start. 2084 */ 2085 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno)); 2086 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2087 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2088 "%s: bf=%p, tid=%d, seqno %d; window %d:%d; index=%d cindex=%d " 2089 "baw head=%d tail=%d\n", 2090 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2091 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, 2092 tid->baw_tail); 2093 2094 2095 #if 0 2096 assert(tid->tx_buf[cindex] == NULL); 2097 #endif 2098 if (tid->tx_buf[cindex] != NULL) { 2099 device_printf(sc->sc_dev, 2100 "%s: ba packet dup (index=%d, cindex=%d, " 2101 "head=%d, tail=%d)\n", 2102 __func__, index, cindex, tid->baw_head, tid->baw_tail); 2103 device_printf(sc->sc_dev, 2104 "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n", 2105 __func__, 2106 tid->tx_buf[cindex], 2107 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno), 2108 bf, 2109 SEQNO(bf->bf_state.bfs_seqno) 2110 ); 2111 } 2112 tid->tx_buf[cindex] = bf; 2113 2114 if (index >= ((tid->baw_tail - tid->baw_head) & 2115 (ATH_TID_MAX_BUFS - 1))) { 2116 tid->baw_tail = cindex; 2117 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 2118 } 2119 } 2120 2121 /* 2122 * Flip the BAW buffer entry over from the existing one to the new one. 2123 * 2124 * When software retransmitting a (sub-)frame, it is entirely possible that 2125 * the frame ath_buf is marked as BUSY and can't be immediately reused. 2126 * In that instance the buffer is cloned and the new buffer is used for 2127 * retransmit. We thus need to update the ath_buf slot in the BAW buf 2128 * tracking array to maintain consistency. 2129 */ 2130 static void 2131 ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an, 2132 struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf) 2133 { 2134 int index, cindex; 2135 struct ieee80211_tx_ampdu *tap; 2136 int seqno = SEQNO(old_bf->bf_state.bfs_seqno); 2137 2138 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 2139 2140 tap = ath_tx_get_tx_tid(an, tid->tid); 2141 index = ATH_BA_INDEX(tap->txa_start, seqno); 2142 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2143 2144 /* 2145 * Just warn for now; if it happens then we should find out 2146 * about it. It's highly likely the aggregation session will 2147 * soon hang. 2148 */ 2149 if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) { 2150 device_printf(sc->sc_dev, "%s: retransmitted buffer" 2151 " has mismatching seqno's, BA session may hang.\n", 2152 __func__); 2153 device_printf(sc->sc_dev, "%s: old seqno=%d, new_seqno=%d\n", 2154 __func__, 2155 old_bf->bf_state.bfs_seqno, 2156 new_bf->bf_state.bfs_seqno); 2157 } 2158 2159 if (tid->tx_buf[cindex] != old_bf) { 2160 device_printf(sc->sc_dev, "%s: ath_buf pointer incorrect; " 2161 " has m BA session may hang.\n", 2162 __func__); 2163 device_printf(sc->sc_dev, "%s: old bf=%p, new bf=%p\n", 2164 __func__, 2165 old_bf, new_bf); 2166 } 2167 2168 tid->tx_buf[cindex] = new_bf; 2169 } 2170 2171 /* 2172 * seq_start - left edge of BAW 2173 * seq_next - current/next sequence number to allocate 2174 * 2175 * Since the BAW status may be modified by both the ath task and 2176 * the net80211/ifnet contexts, the TID must be locked. 2177 */ 2178 static void 2179 ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an, 2180 struct ath_tid *tid, const struct ath_buf *bf) 2181 { 2182 int index, cindex; 2183 struct ieee80211_tx_ampdu *tap; 2184 int seqno = SEQNO(bf->bf_state.bfs_seqno); 2185 2186 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 2187 2188 tap = ath_tx_get_tx_tid(an, tid->tid); 2189 index = ATH_BA_INDEX(tap->txa_start, seqno); 2190 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2191 2192 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2193 "%s: bf=%p: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, " 2194 "baw head=%d, tail=%d\n", 2195 __func__, bf, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index, 2196 cindex, tid->baw_head, tid->baw_tail); 2197 2198 /* 2199 * If this occurs then we have a big problem - something else 2200 * has slid tap->txa_start along without updating the BAW 2201 * tracking start/end pointers. Thus the TX BAW state is now 2202 * completely busted. 2203 * 2204 * But for now, since I haven't yet fixed TDMA and buffer cloning, 2205 * it's quite possible that a cloned buffer is making its way 2206 * here and causing it to fire off. Disable TDMA for now. 2207 */ 2208 if (tid->tx_buf[cindex] != bf) { 2209 device_printf(sc->sc_dev, 2210 "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n", 2211 __func__, 2212 bf, SEQNO(bf->bf_state.bfs_seqno), 2213 tid->tx_buf[cindex], 2214 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno)); 2215 } 2216 2217 tid->tx_buf[cindex] = NULL; 2218 2219 while (tid->baw_head != tid->baw_tail && 2220 !tid->tx_buf[tid->baw_head]) { 2221 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 2222 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 2223 } 2224 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2225 "%s: baw is now %d:%d, baw head=%d\n", 2226 __func__, tap->txa_start, tap->txa_wnd, tid->baw_head); 2227 } 2228 2229 /* 2230 * Mark the current node/TID as ready to TX. 2231 * 2232 * This is done to make it easy for the software scheduler to 2233 * find which nodes have data to send. 2234 * 2235 * The TXQ lock must be held. 2236 */ 2237 static void 2238 ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid) 2239 { 2240 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2241 2242 ATH_TXQ_LOCK_ASSERT(txq); 2243 2244 if (tid->paused) 2245 return; /* paused, can't schedule yet */ 2246 2247 if (tid->sched) 2248 return; /* already scheduled */ 2249 2250 tid->sched = 1; 2251 2252 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2253 } 2254 2255 /* 2256 * Mark the current node as no longer needing to be polled for 2257 * TX packets. 2258 * 2259 * The TXQ lock must be held. 2260 */ 2261 static void 2262 ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid) 2263 { 2264 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2265 2266 ATH_TXQ_LOCK_ASSERT(txq); 2267 2268 if (tid->sched == 0) 2269 return; 2270 2271 tid->sched = 0; 2272 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem); 2273 } 2274 2275 /* 2276 * Return whether a sequence number is actually required. 2277 * 2278 * A sequence number must only be allocated at the time that a frame 2279 * is considered for addition to the BAW/aggregate and being TXed. 2280 * The sequence number must not be allocated before the frame 2281 * is added to the BAW (protected by the same lock instance) 2282 * otherwise a the multi-entrant TX path may result in a later seqno 2283 * being added to the BAW first. The subsequent addition of the 2284 * earlier seqno would then not go into the BAW as it's now outside 2285 * of said BAW. 2286 * 2287 * This routine is used by ath_tx_start() to mark whether the frame 2288 * should get a sequence number before adding it to the BAW. 2289 * 2290 * Then the actual aggregate TX routines will check whether this 2291 * flag is set and if the frame needs to go into the BAW, it'll 2292 * have a sequence number allocated for it. 2293 */ 2294 static int 2295 ath_tx_seqno_required(struct ath_softc *sc, struct ieee80211_node *ni, 2296 struct ath_buf *bf, struct mbuf *m0) 2297 { 2298 const struct ieee80211_frame *wh; 2299 uint8_t subtype; 2300 2301 wh = mtod(m0, const struct ieee80211_frame *); 2302 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2303 2304 /* XXX assert txq lock */ 2305 /* XXX assert ampdu is set */ 2306 2307 return ((IEEE80211_QOS_HAS_SEQ(wh) && 2308 subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL)); 2309 } 2310 2311 /* 2312 * Assign a sequence number manually to the given frame. 2313 * 2314 * This should only be called for A-MPDU TX frames. 2315 * 2316 * If this is called after the initial frame setup, make sure you've flushed 2317 * the DMA map or you'll risk sending stale data to the NIC. This routine 2318 * updates the actual frame contents with the relevant seqno. 2319 */ 2320 int 2321 ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, 2322 struct ath_buf *bf, struct mbuf *m0) 2323 { 2324 struct ieee80211_frame *wh; 2325 int tid, pri; 2326 ieee80211_seq seqno; 2327 uint8_t subtype; 2328 2329 /* TID lookup */ 2330 wh = mtod(m0, struct ieee80211_frame *); 2331 pri = M_WME_GETAC(m0); /* honor classification */ 2332 tid = WME_AC_TO_TID(pri); 2333 DPRINTF(sc, ATH_DEBUG_SW_TX, 2334 "%s: bf=%p, pri=%d, tid=%d, qos has seq=%d\n", 2335 __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 2336 2337 if (! bf->bf_state.bfs_need_seqno) { 2338 device_printf(sc->sc_dev, "%s: bf=%p: need_seqno not set?!\n", 2339 __func__, bf); 2340 return -1; 2341 } 2342 /* XXX check for bfs_need_seqno? */ 2343 if (bf->bf_state.bfs_seqno_assigned) { 2344 device_printf(sc->sc_dev, 2345 "%s: bf=%p: seqno already assigned (%d)?!\n", 2346 __func__, bf, SEQNO(bf->bf_state.bfs_seqno)); 2347 return bf->bf_state.bfs_seqno >> IEEE80211_SEQ_SEQ_SHIFT; 2348 } 2349 2350 /* XXX Is it a control frame? Ignore */ 2351 2352 /* Does the packet require a sequence number? */ 2353 if (! IEEE80211_QOS_HAS_SEQ(wh)) 2354 return -1; 2355 2356 /* 2357 * Is it a QOS NULL Data frame? Give it a sequence number from 2358 * the default TID (IEEE80211_NONQOS_TID.) 2359 * 2360 * The RX path of everything I've looked at doesn't include the NULL 2361 * data frame sequence number in the aggregation state updates, so 2362 * assigning it a sequence number there will cause a BAW hole on the 2363 * RX side. 2364 */ 2365 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2366 if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) { 2367 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; 2368 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); 2369 } else { 2370 /* Manually assign sequence number */ 2371 seqno = ni->ni_txseqs[tid]; 2372 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE); 2373 } 2374 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 2375 M_SEQNO_SET(m0, seqno); 2376 bf->bf_state.bfs_seqno = seqno << IEEE80211_SEQ_SEQ_SHIFT; 2377 bf->bf_state.bfs_seqno_assigned = 1; 2378 2379 /* Return so caller can do something with it if needed */ 2380 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: -> seqno=%d\n", 2381 __func__, 2382 bf, 2383 seqno); 2384 return seqno; 2385 } 2386 2387 /* 2388 * Attempt to direct dispatch an aggregate frame to hardware. 2389 * If the frame is out of BAW, queue. 2390 * Otherwise, schedule it as a single frame. 2391 */ 2392 static void 2393 ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an, struct ath_buf *bf) 2394 { 2395 struct ieee80211_node *ni = &an->an_node; 2396 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 2397 struct ath_txq *txq = bf->bf_state.bfs_txq; 2398 struct ieee80211_tx_ampdu *tap; 2399 2400 ATH_TXQ_LOCK_ASSERT(txq); 2401 2402 tap = ath_tx_get_tx_tid(an, tid->tid); 2403 2404 /* paused? queue */ 2405 if (tid->paused) { 2406 ATH_TXQ_INSERT_TAIL(tid, bf, bf_list); 2407 /* XXX don't sched - we're paused! */ 2408 return; 2409 } 2410 2411 /* 2412 * TODO: If it's _before_ the BAW left edge, complain very loudly. 2413 * This means something (else) has slid the left edge along 2414 * before we got a chance to be TXed. 2415 */ 2416 2417 /* 2418 * Is there space in this BAW for another frame? 2419 * If not, don't bother trying to schedule it; just 2420 * throw it back on the queue. 2421 * 2422 * If we allocate the sequence number before we add 2423 * it to the BAW, we risk racing with another TX 2424 * thread that gets in a frame into the BAW with 2425 * seqno greater than ours. We'd then fail the 2426 * below check and throw the frame on the tail of 2427 * the queue. The sender would then have a hole. 2428 * 2429 * XXX again, we're protecting ni->ni_txseqs[tid] 2430 * behind this hardware TXQ lock, like the rest of 2431 * the TIDs that map to it. Ugh. 2432 */ 2433 if (bf->bf_state.bfs_dobaw) { 2434 ieee80211_seq seqno; 2435 2436 /* 2437 * If the sequence number is allocated, use it. 2438 * Otherwise, use the sequence number we WOULD 2439 * allocate. 2440 */ 2441 if (bf->bf_state.bfs_seqno_assigned) 2442 seqno = SEQNO(bf->bf_state.bfs_seqno); 2443 else 2444 seqno = ni->ni_txseqs[bf->bf_state.bfs_tid]; 2445 2446 /* 2447 * Check whether either the currently allocated 2448 * sequence number _OR_ the to-be allocated 2449 * sequence number is inside the BAW. 2450 */ 2451 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, seqno)) { 2452 ATH_TXQ_INSERT_TAIL(tid, bf, bf_list); 2453 ath_tx_tid_sched(sc, tid); 2454 return; 2455 } 2456 if (! bf->bf_state.bfs_seqno_assigned) { 2457 int seqno; 2458 2459 seqno = ath_tx_tid_seqno_assign(sc, ni, bf, bf->bf_m); 2460 if (seqno < 0) { 2461 device_printf(sc->sc_dev, 2462 "%s: bf=%p, huh, seqno=-1?\n", 2463 __func__, 2464 bf); 2465 /* XXX what can we even do here? */ 2466 } 2467 /* Flush seqno update to RAM */ 2468 /* 2469 * XXX This is required because the dmasetup 2470 * XXX is done early rather than at dispatch 2471 * XXX time. Ew, we should fix this! 2472 */ 2473 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 2474 BUS_DMASYNC_PREWRITE); 2475 } 2476 } 2477 2478 /* outside baw? queue */ 2479 if (bf->bf_state.bfs_dobaw && 2480 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2481 SEQNO(bf->bf_state.bfs_seqno)))) { 2482 device_printf(sc->sc_dev, 2483 "%s: bf=%p, shouldn't be outside BAW now?!\n", 2484 __func__, 2485 bf); 2486 ATH_TXQ_INSERT_TAIL(tid, bf, bf_list); 2487 ath_tx_tid_sched(sc, tid); 2488 return; 2489 } 2490 2491 /* Direct dispatch to hardware */ 2492 ath_tx_do_ratelookup(sc, bf); 2493 ath_tx_calc_duration(sc, bf); 2494 ath_tx_calc_protection(sc, bf); 2495 ath_tx_set_rtscts(sc, bf); 2496 ath_tx_rate_fill_rcflags(sc, bf); 2497 ath_tx_setds(sc, bf); 2498 ath_tx_set_ratectrl(sc, bf->bf_node, bf); 2499 ath_tx_chaindesclist(sc, bf); 2500 2501 /* Statistics */ 2502 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++; 2503 2504 /* Track per-TID hardware queue depth correctly */ 2505 tid->hwq_depth++; 2506 2507 /* Add to BAW */ 2508 if (bf->bf_state.bfs_dobaw) { 2509 ath_tx_addto_baw(sc, an, tid, bf); 2510 bf->bf_state.bfs_addedbaw = 1; 2511 } 2512 2513 /* Set completion handler, multi-frame aggregate or not */ 2514 bf->bf_comp = ath_tx_aggr_comp; 2515 2516 /* Hand off to hardware */ 2517 ath_tx_handoff(sc, txq, bf); 2518 } 2519 2520 /* 2521 * Attempt to send the packet. 2522 * If the queue isn't busy, direct-dispatch. 2523 * If the queue is busy enough, queue the given packet on the 2524 * relevant software queue. 2525 */ 2526 void 2527 ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_txq *txq, 2528 struct ath_buf *bf) 2529 { 2530 struct ath_node *an = ATH_NODE(ni); 2531 struct ieee80211_frame *wh; 2532 struct ath_tid *atid; 2533 int pri, tid; 2534 struct mbuf *m0 = bf->bf_m; 2535 2536 /* Fetch the TID - non-QoS frames get assigned to TID 16 */ 2537 wh = mtod(m0, struct ieee80211_frame *); 2538 pri = ath_tx_getac(sc, m0); 2539 tid = ath_tx_gettid(sc, m0); 2540 atid = &an->an_tid[tid]; 2541 2542 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d, seqno=%d\n", 2543 __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh), SEQNO(bf->bf_state.bfs_seqno)); 2544 2545 /* Set local packet state, used to queue packets to hardware */ 2546 bf->bf_state.bfs_tid = tid; 2547 bf->bf_state.bfs_txq = txq; 2548 bf->bf_state.bfs_pri = pri; 2549 2550 /* 2551 * If the hardware queue isn't busy, queue it directly. 2552 * If the hardware queue is busy, queue it. 2553 * If the TID is paused or the traffic it outside BAW, software 2554 * queue it. 2555 */ 2556 ATH_TXQ_LOCK(txq); 2557 if (atid->paused) { 2558 /* TID is paused, queue */ 2559 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: paused\n", __func__, bf); 2560 ATH_TXQ_INSERT_TAIL(atid, bf, bf_list); 2561 } else if (ath_tx_ampdu_pending(sc, an, tid)) { 2562 /* AMPDU pending; queue */ 2563 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: pending\n", __func__, bf); 2564 ATH_TXQ_INSERT_TAIL(atid, bf, bf_list); 2565 /* XXX sched? */ 2566 } else if (ath_tx_ampdu_running(sc, an, tid)) { 2567 /* AMPDU running, attempt direct dispatch if possible */ 2568 if (txq->axq_depth < sc->sc_hwq_limit) { 2569 DPRINTF(sc, ATH_DEBUG_SW_TX, 2570 "%s: bf=%p: xmit_aggr\n", 2571 __func__, bf); 2572 ath_tx_xmit_aggr(sc, an, bf); 2573 } else { 2574 DPRINTF(sc, ATH_DEBUG_SW_TX, 2575 "%s: bf=%p: ampdu; swq'ing\n", 2576 __func__, bf); 2577 ATH_TXQ_INSERT_TAIL(atid, bf, bf_list); 2578 ath_tx_tid_sched(sc, atid); 2579 } 2580 } else if (txq->axq_depth < sc->sc_hwq_limit) { 2581 /* AMPDU not running, attempt direct dispatch */ 2582 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: xmit_normal\n", __func__, bf); 2583 ath_tx_xmit_normal(sc, txq, bf); 2584 } else { 2585 /* Busy; queue */ 2586 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: swq'ing\n", __func__, bf); 2587 ATH_TXQ_INSERT_TAIL(atid, bf, bf_list); 2588 ath_tx_tid_sched(sc, atid); 2589 } 2590 ATH_TXQ_UNLOCK(txq); 2591 } 2592 2593 /* 2594 * Do the basic frame setup stuff that's required before the frame 2595 * is added to a software queue. 2596 * 2597 * All frames get mostly the same treatment and it's done once. 2598 * Retransmits fiddle with things like the rate control setup, 2599 * setting the retransmit bit in the packet; doing relevant DMA/bus 2600 * syncing and relinking it (back) into the hardware TX queue. 2601 * 2602 * Note that this may cause the mbuf to be reallocated, so 2603 * m0 may not be valid. 2604 */ 2605 2606 2607 /* 2608 * Configure the per-TID node state. 2609 * 2610 * This likely belongs in if_ath_node.c but I can't think of anywhere 2611 * else to put it just yet. 2612 * 2613 * This sets up the SLISTs and the mutex as appropriate. 2614 */ 2615 void 2616 ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an) 2617 { 2618 int i, j; 2619 struct ath_tid *atid; 2620 2621 for (i = 0; i < IEEE80211_TID_SIZE; i++) { 2622 atid = &an->an_tid[i]; 2623 TAILQ_INIT(&atid->axq_q); 2624 atid->tid = i; 2625 atid->an = an; 2626 for (j = 0; j < ATH_TID_MAX_BUFS; j++) 2627 atid->tx_buf[j] = NULL; 2628 atid->baw_head = atid->baw_tail = 0; 2629 atid->paused = 0; 2630 atid->sched = 0; 2631 atid->hwq_depth = 0; 2632 atid->cleanup_inprogress = 0; 2633 if (i == IEEE80211_NONQOS_TID) 2634 atid->ac = WME_AC_BE; 2635 else 2636 atid->ac = TID_TO_WME_AC(i); 2637 } 2638 } 2639 2640 /* 2641 * Pause the current TID. This stops packets from being transmitted 2642 * on it. 2643 * 2644 * Since this is also called from upper layers as well as the driver, 2645 * it will get the TID lock. 2646 */ 2647 static void 2648 ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid) 2649 { 2650 2651 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 2652 tid->paused++; 2653 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: paused = %d\n", 2654 __func__, tid->paused); 2655 } 2656 2657 /* 2658 * Unpause the current TID, and schedule it if needed. 2659 */ 2660 static void 2661 ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid) 2662 { 2663 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 2664 2665 tid->paused--; 2666 2667 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: unpaused = %d\n", 2668 __func__, tid->paused); 2669 2670 if (tid->paused || tid->axq_depth == 0) { 2671 return; 2672 } 2673 2674 ath_tx_tid_sched(sc, tid); 2675 /* Punt some frames to the hardware if needed */ 2676 //ath_txq_sched(sc, sc->sc_ac2q[tid->ac]); 2677 taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask); 2678 } 2679 2680 /* 2681 * Suspend the queue because we need to TX a BAR. 2682 */ 2683 static void 2684 ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid) 2685 { 2686 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 2687 2688 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 2689 "%s: tid=%p, bar_wait=%d, bar_tx=%d, called\n", 2690 __func__, 2691 tid, 2692 tid->bar_wait, 2693 tid->bar_tx); 2694 2695 /* We shouldn't be called when bar_tx is 1 */ 2696 if (tid->bar_tx) { 2697 device_printf(sc->sc_dev, "%s: bar_tx is 1?!\n", 2698 __func__); 2699 } 2700 2701 /* If we've already been called, just be patient. */ 2702 if (tid->bar_wait) 2703 return; 2704 2705 /* Wait! */ 2706 tid->bar_wait = 1; 2707 2708 /* Only one pause, no matter how many frames fail */ 2709 ath_tx_tid_pause(sc, tid); 2710 } 2711 2712 /* 2713 * We've finished with BAR handling - either we succeeded or 2714 * failed. Either way, unsuspend TX. 2715 */ 2716 static void 2717 ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid) 2718 { 2719 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 2720 2721 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 2722 "%s: tid=%p, called\n", 2723 __func__, 2724 tid); 2725 2726 if (tid->bar_tx == 0 || tid->bar_wait == 0) { 2727 device_printf(sc->sc_dev, "%s: bar_tx=%d, bar_wait=%d: ?\n", 2728 __func__, tid->bar_tx, tid->bar_wait); 2729 } 2730 2731 tid->bar_tx = tid->bar_wait = 0; 2732 ath_tx_tid_resume(sc, tid); 2733 } 2734 2735 /* 2736 * Return whether we're ready to TX a BAR frame. 2737 * 2738 * Requires the TID lock be held. 2739 */ 2740 static int 2741 ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid) 2742 { 2743 2744 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 2745 2746 if (tid->bar_wait == 0 || tid->hwq_depth > 0) 2747 return (0); 2748 2749 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: tid=%p (%d), bar ready\n", 2750 __func__, tid, tid->tid); 2751 2752 return (1); 2753 } 2754 2755 /* 2756 * Check whether the current TID is ready to have a BAR 2757 * TXed and if so, do the TX. 2758 * 2759 * Since the TID/TXQ lock can't be held during a call to 2760 * ieee80211_send_bar(), we have to do the dirty thing of unlocking it, 2761 * sending the BAR and locking it again. 2762 * 2763 * Eventually, the code to send the BAR should be broken out 2764 * from this routine so the lock doesn't have to be reacquired 2765 * just to be immediately dropped by the caller. 2766 */ 2767 static void 2768 ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid) 2769 { 2770 struct ieee80211_tx_ampdu *tap; 2771 2772 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 2773 2774 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 2775 "%s: tid=%p, called\n", 2776 __func__, 2777 tid); 2778 2779 tap = ath_tx_get_tx_tid(tid->an, tid->tid); 2780 2781 /* 2782 * This is an error condition! 2783 */ 2784 if (tid->bar_wait == 0 || tid->bar_tx == 1) { 2785 device_printf(sc->sc_dev, 2786 "%s: tid=%p, bar_tx=%d, bar_wait=%d: ?\n", 2787 __func__, 2788 tid, 2789 tid->bar_tx, 2790 tid->bar_wait); 2791 return; 2792 } 2793 2794 /* Don't do anything if we still have pending frames */ 2795 if (tid->hwq_depth > 0) { 2796 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 2797 "%s: tid=%p, hwq_depth=%d, waiting\n", 2798 __func__, 2799 tid, 2800 tid->hwq_depth); 2801 return; 2802 } 2803 2804 /* We're now about to TX */ 2805 tid->bar_tx = 1; 2806 2807 /* 2808 * Calculate new BAW left edge, now that all frames have either 2809 * succeeded or failed. 2810 * 2811 * XXX verify this is _actually_ the valid value to begin at! 2812 */ 2813 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 2814 "%s: tid=%p, new BAW left edge=%d\n", 2815 __func__, 2816 tid, 2817 tap->txa_start); 2818 2819 /* Try sending the BAR frame */ 2820 /* We can't hold the lock here! */ 2821 2822 ATH_TXQ_UNLOCK(sc->sc_ac2q[tid->ac]); 2823 if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) { 2824 /* Success? Now we wait for notification that it's done */ 2825 ATH_TXQ_LOCK(sc->sc_ac2q[tid->ac]); 2826 return; 2827 } 2828 2829 /* Failure? For now, warn loudly and continue */ 2830 ATH_TXQ_LOCK(sc->sc_ac2q[tid->ac]); 2831 device_printf(sc->sc_dev, "%s: tid=%p, failed to TX BAR, continue!\n", 2832 __func__, tid); 2833 ath_tx_tid_bar_unsuspend(sc, tid); 2834 } 2835 2836 2837 /* 2838 * Free any packets currently pending in the software TX queue. 2839 * 2840 * This will be called when a node is being deleted. 2841 * 2842 * It can also be called on an active node during an interface 2843 * reset or state transition. 2844 * 2845 * (From Linux/reference): 2846 * 2847 * TODO: For frame(s) that are in the retry state, we will reuse the 2848 * sequence number(s) without setting the retry bit. The 2849 * alternative is to give up on these and BAR the receiver's window 2850 * forward. 2851 */ 2852 static void 2853 ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an, 2854 struct ath_tid *tid, ath_bufhead *bf_cq) 2855 { 2856 struct ath_buf *bf; 2857 struct ieee80211_tx_ampdu *tap; 2858 struct ieee80211_node *ni = &an->an_node; 2859 int t = 0; 2860 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2861 2862 tap = ath_tx_get_tx_tid(an, tid->tid); 2863 2864 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[tid->ac]); 2865 2866 /* Walk the queue, free frames */ 2867 for (;;) { 2868 bf = TAILQ_FIRST(&tid->axq_q); 2869 if (bf == NULL) { 2870 break; 2871 } 2872 2873 if (t == 0) { 2874 device_printf(sc->sc_dev, 2875 "%s: node %p: bf=%p: addbaw=%d, dobaw=%d, " 2876 "seqno_assign=%d, seqno_required=%d, seqno=%d, retry=%d\n", 2877 __func__, ni, bf, 2878 bf->bf_state.bfs_addedbaw, 2879 bf->bf_state.bfs_dobaw, 2880 bf->bf_state.bfs_need_seqno, 2881 bf->bf_state.bfs_seqno_assigned, 2882 SEQNO(bf->bf_state.bfs_seqno), 2883 bf->bf_state.bfs_retries); 2884 device_printf(sc->sc_dev, 2885 "%s: node %p: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d\n", 2886 __func__, ni, bf, 2887 tid->axq_depth, 2888 tid->hwq_depth, 2889 tid->bar_wait); 2890 device_printf(sc->sc_dev, 2891 "%s: node %p: bf=%p: tid %d: txq_depth=%d, " 2892 "txq_aggr_depth=%d, sched=%d, paused=%d, " 2893 "hwq_depth=%d, incomp=%d, baw_head=%d, " 2894 "baw_tail=%d txa_start=%d, ni_txseqs=%d\n", 2895 __func__, ni, bf, tid->tid, txq->axq_depth, 2896 txq->axq_aggr_depth, tid->sched, tid->paused, 2897 tid->hwq_depth, tid->incomp, tid->baw_head, 2898 tid->baw_tail, tap == NULL ? -1 : tap->txa_start, 2899 ni->ni_txseqs[tid->tid]); 2900 2901 /* XXX Dump the frame, see what it is? */ 2902 ieee80211_dump_pkt(ni->ni_ic, 2903 mtod(bf->bf_m, const uint8_t *), 2904 bf->bf_m->m_len, 0, -1); 2905 2906 t = 1; 2907 } 2908 2909 2910 /* 2911 * If the current TID is running AMPDU, update 2912 * the BAW. 2913 */ 2914 if (ath_tx_ampdu_running(sc, an, tid->tid) && 2915 bf->bf_state.bfs_dobaw) { 2916 /* 2917 * Only remove the frame from the BAW if it's 2918 * been transmitted at least once; this means 2919 * the frame was in the BAW to begin with. 2920 */ 2921 if (bf->bf_state.bfs_retries > 0) { 2922 ath_tx_update_baw(sc, an, tid, bf); 2923 bf->bf_state.bfs_dobaw = 0; 2924 } 2925 /* 2926 * This has become a non-fatal error now 2927 */ 2928 if (! bf->bf_state.bfs_addedbaw) 2929 device_printf(sc->sc_dev, 2930 "%s: wasn't added: seqno %d\n", 2931 __func__, SEQNO(bf->bf_state.bfs_seqno)); 2932 } 2933 ATH_TXQ_REMOVE(tid, bf, bf_list); 2934 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 2935 } 2936 2937 /* 2938 * Now that it's completed, grab the TID lock and update 2939 * the sequence number and BAW window. 2940 * Because sequence numbers have been assigned to frames 2941 * that haven't been sent yet, it's entirely possible 2942 * we'll be called with some pending frames that have not 2943 * been transmitted. 2944 * 2945 * The cleaner solution is to do the sequence number allocation 2946 * when the packet is first transmitted - and thus the "retries" 2947 * check above would be enough to update the BAW/seqno. 2948 */ 2949 2950 /* But don't do it for non-QoS TIDs */ 2951 if (tap) { 2952 #if 0 2953 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 2954 "%s: node %p: TID %d: sliding BAW left edge to %d\n", 2955 __func__, an, tid->tid, tap->txa_start); 2956 #endif 2957 ni->ni_txseqs[tid->tid] = tap->txa_start; 2958 tid->baw_tail = tid->baw_head; 2959 } 2960 } 2961 2962 /* 2963 * Flush all software queued packets for the given node. 2964 * 2965 * This occurs when a completion handler frees the last buffer 2966 * for a node, and the node is thus freed. This causes the node 2967 * to be cleaned up, which ends up calling ath_tx_node_flush. 2968 */ 2969 void 2970 ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an) 2971 { 2972 int tid; 2973 ath_bufhead bf_cq; 2974 struct ath_buf *bf; 2975 2976 TAILQ_INIT(&bf_cq); 2977 2978 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 2979 struct ath_tid *atid = &an->an_tid[tid]; 2980 struct ath_txq *txq = sc->sc_ac2q[atid->ac]; 2981 2982 /* Remove this tid from the list of active tids */ 2983 ATH_TXQ_LOCK(txq); 2984 ath_tx_tid_unsched(sc, atid); 2985 2986 /* Free packets */ 2987 ath_tx_tid_drain(sc, an, atid, &bf_cq); 2988 ATH_TXQ_UNLOCK(txq); 2989 } 2990 2991 /* Handle completed frames */ 2992 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 2993 TAILQ_REMOVE(&bf_cq, bf, bf_list); 2994 ath_tx_default_comp(sc, bf, 0); 2995 } 2996 } 2997 2998 /* 2999 * Drain all the software TXQs currently with traffic queued. 3000 */ 3001 void 3002 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq) 3003 { 3004 struct ath_tid *tid; 3005 ath_bufhead bf_cq; 3006 struct ath_buf *bf; 3007 3008 TAILQ_INIT(&bf_cq); 3009 ATH_TXQ_LOCK(txq); 3010 3011 /* 3012 * Iterate over all active tids for the given txq, 3013 * flushing and unsched'ing them 3014 */ 3015 while (! TAILQ_EMPTY(&txq->axq_tidq)) { 3016 tid = TAILQ_FIRST(&txq->axq_tidq); 3017 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq); 3018 ath_tx_tid_unsched(sc, tid); 3019 } 3020 3021 ATH_TXQ_UNLOCK(txq); 3022 3023 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 3024 TAILQ_REMOVE(&bf_cq, bf, bf_list); 3025 ath_tx_default_comp(sc, bf, 0); 3026 } 3027 } 3028 3029 /* 3030 * Handle completion of non-aggregate session frames. 3031 */ 3032 void 3033 ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 3034 { 3035 struct ieee80211_node *ni = bf->bf_node; 3036 struct ath_node *an = ATH_NODE(ni); 3037 int tid = bf->bf_state.bfs_tid; 3038 struct ath_tid *atid = &an->an_tid[tid]; 3039 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 3040 3041 /* The TID state is protected behind the TXQ lock */ 3042 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 3043 3044 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n", 3045 __func__, bf, fail, atid->hwq_depth - 1); 3046 3047 atid->hwq_depth--; 3048 if (atid->hwq_depth < 0) 3049 device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n", 3050 __func__, atid->hwq_depth); 3051 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 3052 3053 /* 3054 * punt to rate control if we're not being cleaned up 3055 * during a hw queue drain and the frame wanted an ACK. 3056 */ 3057 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 3058 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 3059 ts, bf->bf_state.bfs_pktlen, 3060 1, (ts->ts_status == 0) ? 0 : 1); 3061 3062 ath_tx_default_comp(sc, bf, fail); 3063 } 3064 3065 /* 3066 * Handle cleanup of aggregate session packets that aren't 3067 * an A-MPDU. 3068 * 3069 * There's no need to update the BAW here - the session is being 3070 * torn down. 3071 */ 3072 static void 3073 ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf) 3074 { 3075 struct ieee80211_node *ni = bf->bf_node; 3076 struct ath_node *an = ATH_NODE(ni); 3077 int tid = bf->bf_state.bfs_tid; 3078 struct ath_tid *atid = &an->an_tid[tid]; 3079 3080 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n", 3081 __func__, tid, atid->incomp); 3082 3083 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 3084 atid->incomp--; 3085 if (atid->incomp == 0) { 3086 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3087 "%s: TID %d: cleaned up! resume!\n", 3088 __func__, tid); 3089 atid->cleanup_inprogress = 0; 3090 ath_tx_tid_resume(sc, atid); 3091 } 3092 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 3093 3094 ath_tx_default_comp(sc, bf, 0); 3095 } 3096 3097 /* 3098 * Performs transmit side cleanup when TID changes from aggregated to 3099 * unaggregated. 3100 * 3101 * - Discard all retry frames from the s/w queue. 3102 * - Fix the tx completion function for all buffers in s/w queue. 3103 * - Count the number of unacked frames, and let transmit completion 3104 * handle it later. 3105 * 3106 * The caller is responsible for pausing the TID. 3107 */ 3108 static void 3109 ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid) 3110 { 3111 struct ath_tid *atid = &an->an_tid[tid]; 3112 struct ieee80211_tx_ampdu *tap; 3113 struct ath_buf *bf, *bf_next; 3114 ath_bufhead bf_cq; 3115 3116 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 3117 "%s: TID %d: called\n", __func__, tid); 3118 3119 TAILQ_INIT(&bf_cq); 3120 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 3121 3122 /* 3123 * Update the frames in the software TX queue: 3124 * 3125 * + Discard retry frames in the queue 3126 * + Fix the completion function to be non-aggregate 3127 */ 3128 bf = TAILQ_FIRST(&atid->axq_q); 3129 while (bf) { 3130 if (bf->bf_state.bfs_isretried) { 3131 bf_next = TAILQ_NEXT(bf, bf_list); 3132 TAILQ_REMOVE(&atid->axq_q, bf, bf_list); 3133 atid->axq_depth--; 3134 if (bf->bf_state.bfs_dobaw) { 3135 ath_tx_update_baw(sc, an, atid, bf); 3136 if (! bf->bf_state.bfs_addedbaw) 3137 device_printf(sc->sc_dev, 3138 "%s: wasn't added: seqno %d\n", 3139 __func__, 3140 SEQNO(bf->bf_state.bfs_seqno)); 3141 } 3142 bf->bf_state.bfs_dobaw = 0; 3143 /* 3144 * Call the default completion handler with "fail" just 3145 * so upper levels are suitably notified about this. 3146 */ 3147 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 3148 bf = bf_next; 3149 continue; 3150 } 3151 /* Give these the default completion handler */ 3152 bf->bf_comp = ath_tx_normal_comp; 3153 bf = TAILQ_NEXT(bf, bf_list); 3154 } 3155 3156 /* The caller is required to pause the TID */ 3157 #if 0 3158 /* Pause the TID */ 3159 ath_tx_tid_pause(sc, atid); 3160 #endif 3161 3162 /* 3163 * Calculate what hardware-queued frames exist based 3164 * on the current BAW size. Ie, what frames have been 3165 * added to the TX hardware queue for this TID but 3166 * not yet ACKed. 3167 */ 3168 tap = ath_tx_get_tx_tid(an, tid); 3169 /* Need the lock - fiddling with BAW */ 3170 while (atid->baw_head != atid->baw_tail) { 3171 if (atid->tx_buf[atid->baw_head]) { 3172 atid->incomp++; 3173 atid->cleanup_inprogress = 1; 3174 atid->tx_buf[atid->baw_head] = NULL; 3175 } 3176 INCR(atid->baw_head, ATH_TID_MAX_BUFS); 3177 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 3178 } 3179 3180 /* 3181 * If cleanup is required, defer TID scheduling 3182 * until all the HW queued packets have been 3183 * sent. 3184 */ 3185 if (! atid->cleanup_inprogress) 3186 ath_tx_tid_resume(sc, atid); 3187 3188 if (atid->cleanup_inprogress) 3189 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3190 "%s: TID %d: cleanup needed: %d packets\n", 3191 __func__, tid, atid->incomp); 3192 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 3193 3194 /* Handle completing frames and fail them */ 3195 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 3196 TAILQ_REMOVE(&bf_cq, bf, bf_list); 3197 ath_tx_default_comp(sc, bf, 1); 3198 } 3199 } 3200 3201 static void 3202 ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) 3203 { 3204 struct ieee80211_frame *wh; 3205 3206 wh = mtod(bf->bf_m, struct ieee80211_frame *); 3207 /* Only update/resync if needed */ 3208 if (bf->bf_state.bfs_isretried == 0) { 3209 wh->i_fc[1] |= IEEE80211_FC1_RETRY; 3210 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 3211 BUS_DMASYNC_PREWRITE); 3212 } 3213 sc->sc_stats.ast_tx_swretries++; 3214 bf->bf_state.bfs_isretried = 1; 3215 bf->bf_state.bfs_retries ++; 3216 } 3217 3218 static struct ath_buf * 3219 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 3220 struct ath_tid *tid, struct ath_buf *bf) 3221 { 3222 struct ath_buf *nbf; 3223 int error; 3224 3225 nbf = ath_buf_clone(sc, bf); 3226 3227 #if 0 3228 device_printf(sc->sc_dev, "%s: ATH_BUF_BUSY; cloning\n", 3229 __func__); 3230 #endif 3231 3232 if (nbf == NULL) { 3233 /* Failed to clone */ 3234 device_printf(sc->sc_dev, 3235 "%s: failed to clone a busy buffer\n", 3236 __func__); 3237 return NULL; 3238 } 3239 3240 /* Setup the dma for the new buffer */ 3241 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m); 3242 if (error != 0) { 3243 device_printf(sc->sc_dev, 3244 "%s: failed to setup dma for clone\n", 3245 __func__); 3246 /* 3247 * Put this at the head of the list, not tail; 3248 * that way it doesn't interfere with the 3249 * busy buffer logic (which uses the tail of 3250 * the list.) 3251 */ 3252 ATH_TXBUF_LOCK(sc); 3253 TAILQ_INSERT_HEAD(&sc->sc_txbuf, nbf, bf_list); 3254 ATH_TXBUF_UNLOCK(sc); 3255 return NULL; 3256 } 3257 3258 /* Update BAW if required, before we free the original buf */ 3259 if (bf->bf_state.bfs_dobaw) 3260 ath_tx_switch_baw_buf(sc, an, tid, bf, nbf); 3261 3262 /* Free current buffer; return the older buffer */ 3263 bf->bf_m = NULL; 3264 bf->bf_node = NULL; 3265 ath_freebuf(sc, bf); 3266 return nbf; 3267 } 3268 3269 /* 3270 * Handle retrying an unaggregate frame in an aggregate 3271 * session. 3272 * 3273 * If too many retries occur, pause the TID, wait for 3274 * any further retransmits (as there's no reason why 3275 * non-aggregate frames in an aggregate session are 3276 * transmitted in-order; they just have to be in-BAW) 3277 * and then queue a BAR. 3278 */ 3279 static void 3280 ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf) 3281 { 3282 struct ieee80211_node *ni = bf->bf_node; 3283 struct ath_node *an = ATH_NODE(ni); 3284 int tid = bf->bf_state.bfs_tid; 3285 struct ath_tid *atid = &an->an_tid[tid]; 3286 struct ieee80211_tx_ampdu *tap; 3287 3288 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 3289 3290 tap = ath_tx_get_tx_tid(an, tid); 3291 3292 /* 3293 * If the buffer is marked as busy, we can't directly 3294 * reuse it. Instead, try to clone the buffer. 3295 * If the clone is successful, recycle the old buffer. 3296 * If the clone is unsuccessful, set bfs_retries to max 3297 * to force the next bit of code to free the buffer 3298 * for us. 3299 */ 3300 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 3301 (bf->bf_flags & ATH_BUF_BUSY)) { 3302 struct ath_buf *nbf; 3303 nbf = ath_tx_retry_clone(sc, an, atid, bf); 3304 if (nbf) 3305 /* bf has been freed at this point */ 3306 bf = nbf; 3307 else 3308 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 3309 } 3310 3311 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 3312 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 3313 "%s: exceeded retries; seqno %d\n", 3314 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3315 sc->sc_stats.ast_tx_swretrymax++; 3316 3317 /* Update BAW anyway */ 3318 if (bf->bf_state.bfs_dobaw) { 3319 ath_tx_update_baw(sc, an, atid, bf); 3320 if (! bf->bf_state.bfs_addedbaw) 3321 device_printf(sc->sc_dev, 3322 "%s: wasn't added: seqno %d\n", 3323 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3324 } 3325 bf->bf_state.bfs_dobaw = 0; 3326 3327 /* Suspend the TX queue and get ready to send the BAR */ 3328 ath_tx_tid_bar_suspend(sc, atid); 3329 3330 /* Send the BAR if there are no other frames waiting */ 3331 if (ath_tx_tid_bar_tx_ready(sc, atid)) 3332 ath_tx_tid_bar_tx(sc, atid); 3333 3334 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 3335 3336 /* Free buffer, bf is free after this call */ 3337 ath_tx_default_comp(sc, bf, 0); 3338 return; 3339 } 3340 3341 /* 3342 * This increments the retry counter as well as 3343 * sets the retry flag in the ath_buf and packet 3344 * body. 3345 */ 3346 ath_tx_set_retry(sc, bf); 3347 3348 /* 3349 * Insert this at the head of the queue, so it's 3350 * retried before any current/subsequent frames. 3351 */ 3352 ATH_TXQ_INSERT_HEAD(atid, bf, bf_list); 3353 ath_tx_tid_sched(sc, atid); 3354 /* Send the BAR if there are no other frames waiting */ 3355 if (ath_tx_tid_bar_tx_ready(sc, atid)) 3356 ath_tx_tid_bar_tx(sc, atid); 3357 3358 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 3359 } 3360 3361 /* 3362 * Common code for aggregate excessive retry/subframe retry. 3363 * If retrying, queues buffers to bf_q. If not, frees the 3364 * buffers. 3365 * 3366 * XXX should unify this with ath_tx_aggr_retry_unaggr() 3367 */ 3368 static int 3369 ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf, 3370 ath_bufhead *bf_q) 3371 { 3372 struct ieee80211_node *ni = bf->bf_node; 3373 struct ath_node *an = ATH_NODE(ni); 3374 int tid = bf->bf_state.bfs_tid; 3375 struct ath_tid *atid = &an->an_tid[tid]; 3376 3377 ATH_TXQ_LOCK_ASSERT(sc->sc_ac2q[atid->ac]); 3378 3379 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 3380 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0); 3381 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */ 3382 3383 /* 3384 * If the buffer is marked as busy, we can't directly 3385 * reuse it. Instead, try to clone the buffer. 3386 * If the clone is successful, recycle the old buffer. 3387 * If the clone is unsuccessful, set bfs_retries to max 3388 * to force the next bit of code to free the buffer 3389 * for us. 3390 */ 3391 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 3392 (bf->bf_flags & ATH_BUF_BUSY)) { 3393 struct ath_buf *nbf; 3394 nbf = ath_tx_retry_clone(sc, an, atid, bf); 3395 if (nbf) 3396 /* bf has been freed at this point */ 3397 bf = nbf; 3398 else 3399 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 3400 } 3401 3402 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 3403 sc->sc_stats.ast_tx_swretrymax++; 3404 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 3405 "%s: max retries: seqno %d\n", 3406 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3407 ath_tx_update_baw(sc, an, atid, bf); 3408 if (! bf->bf_state.bfs_addedbaw) 3409 device_printf(sc->sc_dev, 3410 "%s: wasn't added: seqno %d\n", 3411 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3412 bf->bf_state.bfs_dobaw = 0; 3413 return 1; 3414 } 3415 3416 ath_tx_set_retry(sc, bf); 3417 bf->bf_next = NULL; /* Just to make sure */ 3418 3419 TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3420 return 0; 3421 } 3422 3423 /* 3424 * error pkt completion for an aggregate destination 3425 */ 3426 static void 3427 ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first, 3428 struct ath_tid *tid) 3429 { 3430 struct ieee80211_node *ni = bf_first->bf_node; 3431 struct ath_node *an = ATH_NODE(ni); 3432 struct ath_buf *bf_next, *bf; 3433 ath_bufhead bf_q; 3434 int drops = 0; 3435 struct ieee80211_tx_ampdu *tap; 3436 ath_bufhead bf_cq; 3437 3438 TAILQ_INIT(&bf_q); 3439 TAILQ_INIT(&bf_cq); 3440 3441 /* 3442 * Update rate control - all frames have failed. 3443 * 3444 * XXX use the length in the first frame in the series; 3445 * XXX just so things are consistent for now. 3446 */ 3447 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc, 3448 &bf_first->bf_status.ds_txstat, 3449 bf_first->bf_state.bfs_pktlen, 3450 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes); 3451 3452 ATH_TXQ_LOCK(sc->sc_ac2q[tid->ac]); 3453 tap = ath_tx_get_tx_tid(an, tid->tid); 3454 sc->sc_stats.ast_tx_aggr_failall++; 3455 3456 /* Retry all subframes */ 3457 bf = bf_first; 3458 while (bf) { 3459 bf_next = bf->bf_next; 3460 bf->bf_next = NULL; /* Remove it from the aggr list */ 3461 sc->sc_stats.ast_tx_aggr_fail++; 3462 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 3463 drops++; 3464 bf->bf_next = NULL; 3465 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 3466 } 3467 bf = bf_next; 3468 } 3469 3470 /* Prepend all frames to the beginning of the queue */ 3471 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 3472 TAILQ_REMOVE(&bf_q, bf, bf_list); 3473 ATH_TXQ_INSERT_HEAD(tid, bf, bf_list); 3474 } 3475 3476 /* 3477 * Schedule the TID to be re-tried. 3478 */ 3479 ath_tx_tid_sched(sc, tid); 3480 3481 /* 3482 * send bar if we dropped any frames 3483 * 3484 * Keep the txq lock held for now, as we need to ensure 3485 * that ni_txseqs[] is consistent (as it's being updated 3486 * in the ifnet TX context or raw TX context.) 3487 */ 3488 if (drops) { 3489 /* Suspend the TX queue and get ready to send the BAR */ 3490 ath_tx_tid_bar_suspend(sc, tid); 3491 } 3492 3493 /* 3494 * Send BAR if required 3495 */ 3496 if (ath_tx_tid_bar_tx_ready(sc, tid)) 3497 ath_tx_tid_bar_tx(sc, tid); 3498 ATH_TXQ_UNLOCK(sc->sc_ac2q[tid->ac]); 3499 3500 /* Complete frames which errored out */ 3501 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 3502 TAILQ_REMOVE(&bf_cq, bf, bf_list); 3503 ath_tx_default_comp(sc, bf, 0); 3504 } 3505 } 3506 3507 /* 3508 * Handle clean-up of packets from an aggregate list. 3509 * 3510 * There's no need to update the BAW here - the session is being 3511 * torn down. 3512 */ 3513 static void 3514 ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first) 3515 { 3516 struct ath_buf *bf, *bf_next; 3517 struct ieee80211_node *ni = bf_first->bf_node; 3518 struct ath_node *an = ATH_NODE(ni); 3519 int tid = bf_first->bf_state.bfs_tid; 3520 struct ath_tid *atid = &an->an_tid[tid]; 3521 3522 bf = bf_first; 3523 3524 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 3525 3526 /* update incomp */ 3527 while (bf) { 3528 atid->incomp--; 3529 bf = bf->bf_next; 3530 } 3531 3532 if (atid->incomp == 0) { 3533 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3534 "%s: TID %d: cleaned up! resume!\n", 3535 __func__, tid); 3536 atid->cleanup_inprogress = 0; 3537 ath_tx_tid_resume(sc, atid); 3538 } 3539 3540 /* Send BAR if required */ 3541 if (ath_tx_tid_bar_tx_ready(sc, atid)) 3542 ath_tx_tid_bar_tx(sc, atid); 3543 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 3544 3545 /* Handle frame completion */ 3546 while (bf) { 3547 bf_next = bf->bf_next; 3548 ath_tx_default_comp(sc, bf, 1); 3549 bf = bf_next; 3550 } 3551 } 3552 3553 /* 3554 * Handle completion of an set of aggregate frames. 3555 * 3556 * XXX for now, simply complete each sub-frame. 3557 * 3558 * Note: the completion handler is the last descriptor in the aggregate, 3559 * not the last descriptor in the first frame. 3560 */ 3561 static void 3562 ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first, 3563 int fail) 3564 { 3565 //struct ath_desc *ds = bf->bf_lastds; 3566 struct ieee80211_node *ni = bf_first->bf_node; 3567 struct ath_node *an = ATH_NODE(ni); 3568 int tid = bf_first->bf_state.bfs_tid; 3569 struct ath_tid *atid = &an->an_tid[tid]; 3570 struct ath_tx_status ts; 3571 struct ieee80211_tx_ampdu *tap; 3572 ath_bufhead bf_q; 3573 ath_bufhead bf_cq; 3574 int seq_st, tx_ok; 3575 int hasba, isaggr; 3576 uint32_t ba[2]; 3577 struct ath_buf *bf, *bf_next; 3578 int ba_index; 3579 int drops = 0; 3580 int nframes = 0, nbad = 0, nf; 3581 int pktlen; 3582 /* XXX there's too much on the stack? */ 3583 struct ath_rc_series rc[ATH_RC_NUM]; 3584 int txseq; 3585 3586 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n", 3587 __func__, atid->hwq_depth); 3588 3589 /* The TID state is kept behind the TXQ lock */ 3590 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 3591 3592 atid->hwq_depth--; 3593 if (atid->hwq_depth < 0) 3594 device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n", 3595 __func__, atid->hwq_depth); 3596 3597 /* 3598 * Punt cleanup to the relevant function, not our problem now 3599 */ 3600 if (atid->cleanup_inprogress) { 3601 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 3602 ath_tx_comp_cleanup_aggr(sc, bf_first); 3603 return; 3604 } 3605 3606 /* 3607 * Take a copy; this may be needed -after- bf_first 3608 * has been completed and freed. 3609 */ 3610 ts = bf_first->bf_status.ds_txstat; 3611 /* 3612 * XXX for now, use the first frame in the aggregate for 3613 * XXX rate control completion; it's at least consistent. 3614 */ 3615 pktlen = bf_first->bf_state.bfs_pktlen; 3616 3617 /* 3618 * Handle errors first! 3619 * 3620 * Here, handle _any_ error as a "exceeded retries" error. 3621 * Later on (when filtered frames are to be specially handled) 3622 * it'll have to be expanded. 3623 */ 3624 #if 0 3625 if (ts.ts_status & HAL_TXERR_XRETRY) { 3626 #endif 3627 if (ts.ts_status != 0) { 3628 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 3629 ath_tx_comp_aggr_error(sc, bf_first, atid); 3630 return; 3631 } 3632 3633 TAILQ_INIT(&bf_q); 3634 TAILQ_INIT(&bf_cq); 3635 tap = ath_tx_get_tx_tid(an, tid); 3636 3637 /* 3638 * extract starting sequence and block-ack bitmap 3639 */ 3640 /* XXX endian-ness of seq_st, ba? */ 3641 seq_st = ts.ts_seqnum; 3642 hasba = !! (ts.ts_flags & HAL_TX_BA); 3643 tx_ok = (ts.ts_status == 0); 3644 isaggr = bf_first->bf_state.bfs_aggr; 3645 ba[0] = ts.ts_ba_low; 3646 ba[1] = ts.ts_ba_high; 3647 3648 /* 3649 * Copy the TX completion status and the rate control 3650 * series from the first descriptor, as it may be freed 3651 * before the rate control code can get its grubby fingers 3652 * into things. 3653 */ 3654 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc)); 3655 3656 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 3657 "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, " 3658 "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n", 3659 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags, 3660 isaggr, seq_st, hasba, ba[0], ba[1]); 3661 3662 /* Occasionally, the MAC sends a tx status for the wrong TID. */ 3663 if (tid != ts.ts_tid) { 3664 device_printf(sc->sc_dev, "%s: tid %d != hw tid %d\n", 3665 __func__, tid, ts.ts_tid); 3666 tx_ok = 0; 3667 } 3668 3669 /* AR5416 BA bug; this requires an interface reset */ 3670 if (isaggr && tx_ok && (! hasba)) { 3671 device_printf(sc->sc_dev, 3672 "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, " 3673 "seq_st=%d\n", 3674 __func__, hasba, tx_ok, isaggr, seq_st); 3675 /* XXX TODO: schedule an interface reset */ 3676 } 3677 3678 /* 3679 * Walk the list of frames, figure out which ones were correctly 3680 * sent and which weren't. 3681 */ 3682 bf = bf_first; 3683 nf = bf_first->bf_state.bfs_nframes; 3684 3685 /* bf_first is going to be invalid once this list is walked */ 3686 bf_first = NULL; 3687 3688 /* 3689 * Walk the list of completed frames and determine 3690 * which need to be completed and which need to be 3691 * retransmitted. 3692 * 3693 * For completed frames, the completion functions need 3694 * to be called at the end of this function as the last 3695 * node reference may free the node. 3696 * 3697 * Finally, since the TXQ lock can't be held during the 3698 * completion callback (to avoid lock recursion), 3699 * the completion calls have to be done outside of the 3700 * lock. 3701 */ 3702 while (bf) { 3703 nframes++; 3704 ba_index = ATH_BA_INDEX(seq_st, 3705 SEQNO(bf->bf_state.bfs_seqno)); 3706 bf_next = bf->bf_next; 3707 bf->bf_next = NULL; /* Remove it from the aggr list */ 3708 3709 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 3710 "%s: checking bf=%p seqno=%d; ack=%d\n", 3711 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 3712 ATH_BA_ISSET(ba, ba_index)); 3713 3714 if (tx_ok && ATH_BA_ISSET(ba, ba_index)) { 3715 sc->sc_stats.ast_tx_aggr_ok++; 3716 ath_tx_update_baw(sc, an, atid, bf); 3717 bf->bf_state.bfs_dobaw = 0; 3718 if (! bf->bf_state.bfs_addedbaw) 3719 device_printf(sc->sc_dev, 3720 "%s: wasn't added: seqno %d\n", 3721 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3722 bf->bf_next = NULL; 3723 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 3724 } else { 3725 sc->sc_stats.ast_tx_aggr_fail++; 3726 if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 3727 drops++; 3728 bf->bf_next = NULL; 3729 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 3730 } 3731 nbad++; 3732 } 3733 bf = bf_next; 3734 } 3735 3736 /* 3737 * Now that the BAW updates have been done, unlock 3738 * 3739 * txseq is grabbed before the lock is released so we 3740 * have a consistent view of what -was- in the BAW. 3741 * Anything after this point will not yet have been 3742 * TXed. 3743 */ 3744 txseq = tap->txa_start; 3745 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 3746 3747 if (nframes != nf) 3748 device_printf(sc->sc_dev, 3749 "%s: num frames seen=%d; bf nframes=%d\n", 3750 __func__, nframes, nf); 3751 3752 /* 3753 * Now we know how many frames were bad, call the rate 3754 * control code. 3755 */ 3756 if (fail == 0) 3757 ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes, 3758 nbad); 3759 3760 /* 3761 * send bar if we dropped any frames 3762 */ 3763 if (drops) { 3764 /* Suspend the TX queue and get ready to send the BAR */ 3765 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 3766 ath_tx_tid_bar_suspend(sc, atid); 3767 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 3768 } 3769 3770 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 3771 "%s: txa_start now %d\n", __func__, tap->txa_start); 3772 3773 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 3774 3775 /* Prepend all frames to the beginning of the queue */ 3776 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 3777 TAILQ_REMOVE(&bf_q, bf, bf_list); 3778 ATH_TXQ_INSERT_HEAD(atid, bf, bf_list); 3779 } 3780 3781 /* 3782 * Reschedule to grab some further frames. 3783 */ 3784 ath_tx_tid_sched(sc, atid); 3785 3786 /* 3787 * Send BAR if required 3788 */ 3789 if (ath_tx_tid_bar_tx_ready(sc, atid)) 3790 ath_tx_tid_bar_tx(sc, atid); 3791 3792 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 3793 3794 /* Do deferred completion */ 3795 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 3796 TAILQ_REMOVE(&bf_cq, bf, bf_list); 3797 ath_tx_default_comp(sc, bf, 0); 3798 } 3799 } 3800 3801 /* 3802 * Handle completion of unaggregated frames in an ADDBA 3803 * session. 3804 * 3805 * Fail is set to 1 if the entry is being freed via a call to 3806 * ath_tx_draintxq(). 3807 */ 3808 static void 3809 ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail) 3810 { 3811 struct ieee80211_node *ni = bf->bf_node; 3812 struct ath_node *an = ATH_NODE(ni); 3813 int tid = bf->bf_state.bfs_tid; 3814 struct ath_tid *atid = &an->an_tid[tid]; 3815 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 3816 3817 /* 3818 * Update rate control status here, before we possibly 3819 * punt to retry or cleanup. 3820 * 3821 * Do it outside of the TXQ lock. 3822 */ 3823 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 3824 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 3825 &bf->bf_status.ds_txstat, 3826 bf->bf_state.bfs_pktlen, 3827 1, (ts->ts_status == 0) ? 0 : 1); 3828 3829 /* 3830 * This is called early so atid->hwq_depth can be tracked. 3831 * This unfortunately means that it's released and regrabbed 3832 * during retry and cleanup. That's rather inefficient. 3833 */ 3834 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 3835 3836 if (tid == IEEE80211_NONQOS_TID) 3837 device_printf(sc->sc_dev, "%s: TID=16!\n", __func__); 3838 3839 DPRINTF(sc, ATH_DEBUG_SW_TX, 3840 "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n", 3841 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth, 3842 SEQNO(bf->bf_state.bfs_seqno)); 3843 3844 atid->hwq_depth--; 3845 if (atid->hwq_depth < 0) 3846 device_printf(sc->sc_dev, "%s: hwq_depth < 0: %d\n", 3847 __func__, atid->hwq_depth); 3848 3849 /* 3850 * If a cleanup is in progress, punt to comp_cleanup; 3851 * rather than handling it here. It's thus their 3852 * responsibility to clean up, call the completion 3853 * function in net80211, etc. 3854 */ 3855 if (atid->cleanup_inprogress) { 3856 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 3857 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n", 3858 __func__); 3859 ath_tx_comp_cleanup_unaggr(sc, bf); 3860 return; 3861 } 3862 3863 /* 3864 * Don't bother with the retry check if all frames 3865 * are being failed (eg during queue deletion.) 3866 */ 3867 #if 0 3868 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) { 3869 #endif 3870 if (fail == 0 && ts->ts_status != 0) { 3871 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 3872 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n", 3873 __func__); 3874 ath_tx_aggr_retry_unaggr(sc, bf); 3875 return; 3876 } 3877 3878 /* Success? Complete */ 3879 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n", 3880 __func__, tid, SEQNO(bf->bf_state.bfs_seqno)); 3881 if (bf->bf_state.bfs_dobaw) { 3882 ath_tx_update_baw(sc, an, atid, bf); 3883 bf->bf_state.bfs_dobaw = 0; 3884 if (! bf->bf_state.bfs_addedbaw) 3885 device_printf(sc->sc_dev, 3886 "%s: wasn't added: seqno %d\n", 3887 __func__, SEQNO(bf->bf_state.bfs_seqno)); 3888 } 3889 3890 /* 3891 * Send BAR if required 3892 */ 3893 if (ath_tx_tid_bar_tx_ready(sc, atid)) 3894 ath_tx_tid_bar_tx(sc, atid); 3895 3896 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 3897 3898 ath_tx_default_comp(sc, bf, fail); 3899 /* bf is freed at this point */ 3900 } 3901 3902 void 3903 ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 3904 { 3905 if (bf->bf_state.bfs_aggr) 3906 ath_tx_aggr_comp_aggr(sc, bf, fail); 3907 else 3908 ath_tx_aggr_comp_unaggr(sc, bf, fail); 3909 } 3910 3911 /* 3912 * Schedule some packets from the given node/TID to the hardware. 3913 * 3914 * This is the aggregate version. 3915 */ 3916 void 3917 ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an, 3918 struct ath_tid *tid) 3919 { 3920 struct ath_buf *bf; 3921 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 3922 struct ieee80211_tx_ampdu *tap; 3923 struct ieee80211_node *ni = &an->an_node; 3924 ATH_AGGR_STATUS status; 3925 ath_bufhead bf_q; 3926 3927 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid); 3928 ATH_TXQ_LOCK_ASSERT(txq); 3929 3930 tap = ath_tx_get_tx_tid(an, tid->tid); 3931 3932 if (tid->tid == IEEE80211_NONQOS_TID) 3933 device_printf(sc->sc_dev, "%s: called for TID=NONQOS_TID?\n", 3934 __func__); 3935 3936 for (;;) { 3937 status = ATH_AGGR_DONE; 3938 3939 /* 3940 * If the upper layer has paused the TID, don't 3941 * queue any further packets. 3942 * 3943 * This can also occur from the completion task because 3944 * of packet loss; but as its serialised with this code, 3945 * it won't "appear" half way through queuing packets. 3946 */ 3947 if (tid->paused) 3948 break; 3949 3950 bf = TAILQ_FIRST(&tid->axq_q); 3951 if (bf == NULL) { 3952 break; 3953 } 3954 3955 /* 3956 * If the packet doesn't fall within the BAW (eg a NULL 3957 * data frame), schedule it directly; continue. 3958 */ 3959 if (! bf->bf_state.bfs_dobaw) { 3960 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 3961 "%s: non-baw packet\n", 3962 __func__); 3963 ATH_TXQ_REMOVE(tid, bf, bf_list); 3964 bf->bf_state.bfs_aggr = 0; 3965 ath_tx_do_ratelookup(sc, bf); 3966 ath_tx_calc_duration(sc, bf); 3967 ath_tx_calc_protection(sc, bf); 3968 ath_tx_set_rtscts(sc, bf); 3969 ath_tx_rate_fill_rcflags(sc, bf); 3970 ath_tx_setds(sc, bf); 3971 ath_tx_chaindesclist(sc, bf); 3972 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 3973 ath_tx_set_ratectrl(sc, ni, bf); 3974 3975 sc->sc_aggr_stats.aggr_nonbaw_pkt++; 3976 3977 /* Queue the packet; continue */ 3978 goto queuepkt; 3979 } 3980 3981 TAILQ_INIT(&bf_q); 3982 3983 /* 3984 * Do a rate control lookup on the first frame in the 3985 * list. The rate control code needs that to occur 3986 * before it can determine whether to TX. 3987 * It's inaccurate because the rate control code doesn't 3988 * really "do" aggregate lookups, so it only considers 3989 * the size of the first frame. 3990 */ 3991 ath_tx_do_ratelookup(sc, bf); 3992 bf->bf_state.bfs_rc[3].rix = 0; 3993 bf->bf_state.bfs_rc[3].tries = 0; 3994 3995 ath_tx_calc_duration(sc, bf); 3996 ath_tx_calc_protection(sc, bf); 3997 3998 ath_tx_set_rtscts(sc, bf); 3999 ath_tx_rate_fill_rcflags(sc, bf); 4000 4001 status = ath_tx_form_aggr(sc, an, tid, &bf_q); 4002 4003 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4004 "%s: ath_tx_form_aggr() status=%d\n", __func__, status); 4005 4006 /* 4007 * No frames to be picked up - out of BAW 4008 */ 4009 if (TAILQ_EMPTY(&bf_q)) 4010 break; 4011 4012 /* 4013 * This assumes that the descriptor list in the ath_bufhead 4014 * are already linked together via bf_next pointers. 4015 */ 4016 bf = TAILQ_FIRST(&bf_q); 4017 4018 if (status == ATH_AGGR_8K_LIMITED) 4019 sc->sc_aggr_stats.aggr_rts_aggr_limited++; 4020 4021 /* 4022 * If it's the only frame send as non-aggregate 4023 * assume that ath_tx_form_aggr() has checked 4024 * whether it's in the BAW and added it appropriately. 4025 */ 4026 if (bf->bf_state.bfs_nframes == 1) { 4027 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4028 "%s: single-frame aggregate\n", __func__); 4029 bf->bf_state.bfs_aggr = 0; 4030 ath_tx_setds(sc, bf); 4031 ath_tx_chaindesclist(sc, bf); 4032 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 4033 ath_tx_set_ratectrl(sc, ni, bf); 4034 if (status == ATH_AGGR_BAW_CLOSED) 4035 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++; 4036 else 4037 sc->sc_aggr_stats.aggr_single_pkt++; 4038 } else { 4039 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4040 "%s: multi-frame aggregate: %d frames, " 4041 "length %d\n", 4042 __func__, bf->bf_state.bfs_nframes, 4043 bf->bf_state.bfs_al); 4044 bf->bf_state.bfs_aggr = 1; 4045 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++; 4046 sc->sc_aggr_stats.aggr_aggr_pkt++; 4047 4048 /* 4049 * Calculate the duration/protection as required. 4050 */ 4051 ath_tx_calc_duration(sc, bf); 4052 ath_tx_calc_protection(sc, bf); 4053 4054 /* 4055 * Update the rate and rtscts information based on the 4056 * rate decision made by the rate control code; 4057 * the first frame in the aggregate needs it. 4058 */ 4059 ath_tx_set_rtscts(sc, bf); 4060 4061 /* 4062 * Setup the relevant descriptor fields 4063 * for aggregation. The first descriptor 4064 * already points to the rest in the chain. 4065 */ 4066 ath_tx_setds_11n(sc, bf); 4067 4068 /* 4069 * setup first desc with rate and aggr info 4070 */ 4071 ath_tx_set_ratectrl(sc, ni, bf); 4072 } 4073 queuepkt: 4074 //txq = bf->bf_state.bfs_txq; 4075 4076 /* Set completion handler, multi-frame aggregate or not */ 4077 bf->bf_comp = ath_tx_aggr_comp; 4078 4079 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID) 4080 device_printf(sc->sc_dev, "%s: TID=16?\n", __func__); 4081 4082 /* Punt to txq */ 4083 ath_tx_handoff(sc, txq, bf); 4084 4085 /* Track outstanding buffer count to hardware */ 4086 /* aggregates are "one" buffer */ 4087 tid->hwq_depth++; 4088 4089 /* 4090 * Break out if ath_tx_form_aggr() indicated 4091 * there can't be any further progress (eg BAW is full.) 4092 * Checking for an empty txq is done above. 4093 * 4094 * XXX locking on txq here? 4095 */ 4096 if (txq->axq_aggr_depth >= sc->sc_hwq_limit || 4097 status == ATH_AGGR_BAW_CLOSED) 4098 break; 4099 } 4100 } 4101 4102 /* 4103 * Schedule some packets from the given node/TID to the hardware. 4104 */ 4105 void 4106 ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an, 4107 struct ath_tid *tid) 4108 { 4109 struct ath_buf *bf; 4110 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 4111 struct ieee80211_node *ni = &an->an_node; 4112 4113 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n", 4114 __func__, an, tid->tid); 4115 4116 ATH_TXQ_LOCK_ASSERT(txq); 4117 4118 /* Check - is AMPDU pending or running? then print out something */ 4119 if (ath_tx_ampdu_pending(sc, an, tid->tid)) 4120 device_printf(sc->sc_dev, "%s: tid=%d, ampdu pending?\n", 4121 __func__, tid->tid); 4122 if (ath_tx_ampdu_running(sc, an, tid->tid)) 4123 device_printf(sc->sc_dev, "%s: tid=%d, ampdu running?\n", 4124 __func__, tid->tid); 4125 4126 for (;;) { 4127 4128 /* 4129 * If the upper layers have paused the TID, don't 4130 * queue any further packets. 4131 */ 4132 if (tid->paused) 4133 break; 4134 4135 bf = TAILQ_FIRST(&tid->axq_q); 4136 if (bf == NULL) { 4137 break; 4138 } 4139 4140 ATH_TXQ_REMOVE(tid, bf, bf_list); 4141 4142 KASSERT(txq == bf->bf_state.bfs_txq, ("txqs not equal!\n")); 4143 4144 /* Sanity check! */ 4145 if (tid->tid != bf->bf_state.bfs_tid) { 4146 device_printf(sc->sc_dev, "%s: bfs_tid %d !=" 4147 " tid %d\n", 4148 __func__, bf->bf_state.bfs_tid, tid->tid); 4149 } 4150 /* Normal completion handler */ 4151 bf->bf_comp = ath_tx_normal_comp; 4152 4153 /* Program descriptors + rate control */ 4154 ath_tx_do_ratelookup(sc, bf); 4155 ath_tx_calc_duration(sc, bf); 4156 ath_tx_calc_protection(sc, bf); 4157 ath_tx_set_rtscts(sc, bf); 4158 ath_tx_rate_fill_rcflags(sc, bf); 4159 ath_tx_setds(sc, bf); 4160 ath_tx_chaindesclist(sc, bf); 4161 ath_tx_set_ratectrl(sc, ni, bf); 4162 4163 /* Track outstanding buffer count to hardware */ 4164 /* aggregates are "one" buffer */ 4165 tid->hwq_depth++; 4166 4167 /* Punt to hardware or software txq */ 4168 ath_tx_handoff(sc, txq, bf); 4169 } 4170 } 4171 4172 /* 4173 * Schedule some packets to the given hardware queue. 4174 * 4175 * This function walks the list of TIDs (ie, ath_node TIDs 4176 * with queued traffic) and attempts to schedule traffic 4177 * from them. 4178 * 4179 * TID scheduling is implemented as a FIFO, with TIDs being 4180 * added to the end of the queue after some frames have been 4181 * scheduled. 4182 */ 4183 void 4184 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq) 4185 { 4186 struct ath_tid *tid, *next, *last; 4187 4188 ATH_TXQ_LOCK_ASSERT(txq); 4189 4190 /* 4191 * Don't schedule if the hardware queue is busy. 4192 * This (hopefully) gives some more time to aggregate 4193 * some packets in the aggregation queue. 4194 */ 4195 if (txq->axq_aggr_depth >= sc->sc_hwq_limit) { 4196 sc->sc_aggr_stats.aggr_sched_nopkt++; 4197 return; 4198 } 4199 4200 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s); 4201 4202 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) { 4203 /* 4204 * Suspend paused queues here; they'll be resumed 4205 * once the addba completes or times out. 4206 */ 4207 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n", 4208 __func__, tid->tid, tid->paused); 4209 ath_tx_tid_unsched(sc, tid); 4210 if (tid->paused) { 4211 continue; 4212 } 4213 if (ath_tx_ampdu_running(sc, tid->an, tid->tid)) 4214 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid); 4215 else 4216 ath_tx_tid_hw_queue_norm(sc, tid->an, tid); 4217 4218 /* Not empty? Re-schedule */ 4219 if (tid->axq_depth != 0) 4220 ath_tx_tid_sched(sc, tid); 4221 4222 /* Give the software queue time to aggregate more packets */ 4223 if (txq->axq_aggr_depth >= sc->sc_hwq_limit) { 4224 break; 4225 } 4226 4227 /* 4228 * If this was the last entry on the original list, stop. 4229 * Otherwise nodes that have been rescheduled onto the end 4230 * of the TID FIFO list will just keep being rescheduled. 4231 */ 4232 if (tid == last) 4233 break; 4234 } 4235 } 4236 4237 /* 4238 * TX addba handling 4239 */ 4240 4241 /* 4242 * Return net80211 TID struct pointer, or NULL for none 4243 */ 4244 struct ieee80211_tx_ampdu * 4245 ath_tx_get_tx_tid(struct ath_node *an, int tid) 4246 { 4247 struct ieee80211_node *ni = &an->an_node; 4248 struct ieee80211_tx_ampdu *tap; 4249 4250 if (tid == IEEE80211_NONQOS_TID) 4251 return NULL; 4252 4253 tap = &ni->ni_tx_ampdu[tid]; 4254 return tap; 4255 } 4256 4257 /* 4258 * Is AMPDU-TX running? 4259 */ 4260 static int 4261 ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid) 4262 { 4263 struct ieee80211_tx_ampdu *tap; 4264 4265 if (tid == IEEE80211_NONQOS_TID) 4266 return 0; 4267 4268 tap = ath_tx_get_tx_tid(an, tid); 4269 if (tap == NULL) 4270 return 0; /* Not valid; default to not running */ 4271 4272 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING); 4273 } 4274 4275 /* 4276 * Is AMPDU-TX negotiation pending? 4277 */ 4278 static int 4279 ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid) 4280 { 4281 struct ieee80211_tx_ampdu *tap; 4282 4283 if (tid == IEEE80211_NONQOS_TID) 4284 return 0; 4285 4286 tap = ath_tx_get_tx_tid(an, tid); 4287 if (tap == NULL) 4288 return 0; /* Not valid; default to not pending */ 4289 4290 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND); 4291 } 4292 4293 /* 4294 * Is AMPDU-TX pending for the given TID? 4295 */ 4296 4297 4298 /* 4299 * Method to handle sending an ADDBA request. 4300 * 4301 * We tap this so the relevant flags can be set to pause the TID 4302 * whilst waiting for the response. 4303 * 4304 * XXX there's no timeout handler we can override? 4305 */ 4306 int 4307 ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 4308 int dialogtoken, int baparamset, int batimeout) 4309 { 4310 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4311 int tid = tap->txa_tid; 4312 struct ath_node *an = ATH_NODE(ni); 4313 struct ath_tid *atid = &an->an_tid[tid]; 4314 4315 /* 4316 * XXX danger Will Robinson! 4317 * 4318 * Although the taskqueue may be running and scheduling some more 4319 * packets, these should all be _before_ the addba sequence number. 4320 * However, net80211 will keep self-assigning sequence numbers 4321 * until addba has been negotiated. 4322 * 4323 * In the past, these packets would be "paused" (which still works 4324 * fine, as they're being scheduled to the driver in the same 4325 * serialised method which is calling the addba request routine) 4326 * and when the aggregation session begins, they'll be dequeued 4327 * as aggregate packets and added to the BAW. However, now there's 4328 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these 4329 * packets. Thus they never get included in the BAW tracking and 4330 * this can cause the initial burst of packets after the addba 4331 * negotiation to "hang", as they quickly fall outside the BAW. 4332 * 4333 * The "eventual" solution should be to tag these packets with 4334 * dobaw. Although net80211 has given us a sequence number, 4335 * it'll be "after" the left edge of the BAW and thus it'll 4336 * fall within it. 4337 */ 4338 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 4339 /* 4340 * This is a bit annoying. Until net80211 HT code inherits some 4341 * (any) locking, we may have this called in parallel BUT only 4342 * one response/timeout will be called. Grr. 4343 */ 4344 if (atid->addba_tx_pending == 0) { 4345 ath_tx_tid_pause(sc, atid); 4346 atid->addba_tx_pending = 1; 4347 } 4348 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 4349 4350 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4351 "%s: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n", 4352 __func__, dialogtoken, baparamset, batimeout); 4353 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4354 "%s: txa_start=%d, ni_txseqs=%d\n", 4355 __func__, tap->txa_start, ni->ni_txseqs[tid]); 4356 4357 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 4358 batimeout); 4359 } 4360 4361 /* 4362 * Handle an ADDBA response. 4363 * 4364 * We unpause the queue so TX'ing can resume. 4365 * 4366 * Any packets TX'ed from this point should be "aggregate" (whether 4367 * aggregate or not) so the BAW is updated. 4368 * 4369 * Note! net80211 keeps self-assigning sequence numbers until 4370 * ampdu is negotiated. This means the initially-negotiated BAW left 4371 * edge won't match the ni->ni_txseq. 4372 * 4373 * So, being very dirty, the BAW left edge is "slid" here to match 4374 * ni->ni_txseq. 4375 * 4376 * What likely SHOULD happen is that all packets subsequent to the 4377 * addba request should be tagged as aggregate and queued as non-aggregate 4378 * frames; thus updating the BAW. For now though, I'll just slide the 4379 * window. 4380 */ 4381 int 4382 ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 4383 int status, int code, int batimeout) 4384 { 4385 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4386 int tid = tap->txa_tid; 4387 struct ath_node *an = ATH_NODE(ni); 4388 struct ath_tid *atid = &an->an_tid[tid]; 4389 int r; 4390 4391 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4392 "%s: called; status=%d, code=%d, batimeout=%d\n", __func__, 4393 status, code, batimeout); 4394 4395 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4396 "%s: txa_start=%d, ni_txseqs=%d\n", 4397 __func__, tap->txa_start, ni->ni_txseqs[tid]); 4398 4399 /* 4400 * Call this first, so the interface flags get updated 4401 * before the TID is unpaused. Otherwise a race condition 4402 * exists where the unpaused TID still doesn't yet have 4403 * IEEE80211_AGGR_RUNNING set. 4404 */ 4405 r = sc->sc_addba_response(ni, tap, status, code, batimeout); 4406 4407 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 4408 atid->addba_tx_pending = 0; 4409 /* 4410 * XXX dirty! 4411 * Slide the BAW left edge to wherever net80211 left it for us. 4412 * Read above for more information. 4413 */ 4414 tap->txa_start = ni->ni_txseqs[tid]; 4415 ath_tx_tid_resume(sc, atid); 4416 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 4417 return r; 4418 } 4419 4420 4421 /* 4422 * Stop ADDBA on a queue. 4423 */ 4424 void 4425 ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 4426 { 4427 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4428 int tid = tap->txa_tid; 4429 struct ath_node *an = ATH_NODE(ni); 4430 struct ath_tid *atid = &an->an_tid[tid]; 4431 4432 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: called\n", __func__); 4433 4434 /* Pause TID traffic early, so there aren't any races */ 4435 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 4436 ath_tx_tid_pause(sc, atid); 4437 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 4438 4439 /* There's no need to hold the TXQ lock here */ 4440 sc->sc_addba_stop(ni, tap); 4441 4442 /* 4443 * ath_tx_tid_cleanup will resume the TID if possible, otherwise 4444 * it'll set the cleanup flag, and it'll be unpaused once 4445 * things have been cleaned up. 4446 */ 4447 ath_tx_tid_cleanup(sc, an, tid); 4448 } 4449 4450 /* 4451 * Note: net80211 bar_timeout() doesn't call this function on BAR failure; 4452 * it simply tears down the aggregation session. Ew. 4453 * 4454 * It however will call ieee80211_ampdu_stop() which will call 4455 * ic->ic_addba_stop(). 4456 * 4457 * XXX This uses a hard-coded max BAR count value; the whole 4458 * XXX BAR TX success or failure should be better handled! 4459 */ 4460 void 4461 ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 4462 int status) 4463 { 4464 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4465 int tid = tap->txa_tid; 4466 struct ath_node *an = ATH_NODE(ni); 4467 struct ath_tid *atid = &an->an_tid[tid]; 4468 int attempts = tap->txa_attempts; 4469 4470 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 4471 "%s: called; tap=%p, atid=%p, txa_tid=%d, atid->tid=%d, status=%d, attempts=%d\n", 4472 __func__, 4473 tap, 4474 atid, 4475 tap->txa_tid, 4476 atid->tid, 4477 status, 4478 attempts); 4479 4480 /* Note: This may update the BAW details */ 4481 sc->sc_bar_response(ni, tap, status); 4482 4483 /* Unpause the TID */ 4484 /* 4485 * XXX if this is attempt=50, the TID will be downgraded 4486 * XXX to a non-aggregate session. So we must unpause the 4487 * XXX TID here or it'll never be done. 4488 */ 4489 if (status == 0 || attempts == 50) { 4490 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 4491 ath_tx_tid_bar_unsuspend(sc, atid); 4492 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 4493 } 4494 } 4495 4496 /* 4497 * This is called whenever the pending ADDBA request times out. 4498 * Unpause and reschedule the TID. 4499 */ 4500 void 4501 ath_addba_response_timeout(struct ieee80211_node *ni, 4502 struct ieee80211_tx_ampdu *tap) 4503 { 4504 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4505 int tid = tap->txa_tid; 4506 struct ath_node *an = ATH_NODE(ni); 4507 struct ath_tid *atid = &an->an_tid[tid]; 4508 4509 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4510 "%s: called; resuming\n", __func__); 4511 4512 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 4513 atid->addba_tx_pending = 0; 4514 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 4515 4516 /* Note: This updates the aggregate state to (again) pending */ 4517 sc->sc_addba_response_timeout(ni, tap); 4518 4519 /* Unpause the TID; which reschedules it */ 4520 ATH_TXQ_LOCK(sc->sc_ac2q[atid->ac]); 4521 ath_tx_tid_resume(sc, atid); 4522 ATH_TXQ_UNLOCK(sc->sc_ac2q[atid->ac]); 4523 } 4524