1 /*- 2 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_ath.h" 42 /* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49 #include "opt_ah.h" 50 #include "opt_wlan.h" 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/sysctl.h> 55 #include <sys/mbuf.h> 56 #include <sys/malloc.h> 57 #include <sys/lock.h> 58 #include <sys/mutex.h> 59 #include <sys/kernel.h> 60 #include <sys/socket.h> 61 #include <sys/sockio.h> 62 #include <sys/errno.h> 63 #include <sys/callout.h> 64 #include <sys/bus.h> 65 #include <sys/endian.h> 66 #include <sys/kthread.h> 67 #include <sys/taskqueue.h> 68 #include <sys/priv.h> 69 #include <sys/module.h> 70 #include <sys/ktr.h> 71 #include <sys/smp.h> /* for mp_ncpus */ 72 73 #include <machine/bus.h> 74 75 #include <net/if.h> 76 #include <net/if_dl.h> 77 #include <net/if_media.h> 78 #include <net/if_types.h> 79 #include <net/if_arp.h> 80 #include <net/ethernet.h> 81 #include <net/if_llc.h> 82 83 #include <net80211/ieee80211_var.h> 84 #include <net80211/ieee80211_regdomain.h> 85 #ifdef IEEE80211_SUPPORT_SUPERG 86 #include <net80211/ieee80211_superg.h> 87 #endif 88 #ifdef IEEE80211_SUPPORT_TDMA 89 #include <net80211/ieee80211_tdma.h> 90 #endif 91 92 #include <net/bpf.h> 93 94 #ifdef INET 95 #include <netinet/in.h> 96 #include <netinet/if_ether.h> 97 #endif 98 99 #include <dev/ath/if_athvar.h> 100 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 101 #include <dev/ath/ath_hal/ah_diagcodes.h> 102 103 #include <dev/ath/if_ath_debug.h> 104 #include <dev/ath/if_ath_misc.h> 105 #include <dev/ath/if_ath_tsf.h> 106 #include <dev/ath/if_ath_tx.h> 107 #include <dev/ath/if_ath_sysctl.h> 108 #include <dev/ath/if_ath_led.h> 109 #include <dev/ath/if_ath_keycache.h> 110 #include <dev/ath/if_ath_rx.h> 111 #include <dev/ath/if_ath_beacon.h> 112 #include <dev/ath/if_athdfs.h> 113 114 #ifdef ATH_TX99_DIAG 115 #include <dev/ath/ath_tx99/ath_tx99.h> 116 #endif 117 118 #include <dev/ath/if_ath_tx_edma.h> 119 120 #ifdef ATH_DEBUG_ALQ 121 #include <dev/ath/if_ath_alq.h> 122 #endif 123 124 /* 125 * some general macros 126 */ 127 #define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1) 128 #define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1) 129 130 /* 131 * XXX doesn't belong here, and should be tunable 132 */ 133 #define ATH_TXSTATUS_RING_SIZE 512 134 135 MALLOC_DECLARE(M_ATHDEV); 136 137 static void ath_edma_tx_processq(struct ath_softc *sc, int dosched); 138 139 /* 140 * Push some frames into the TX FIFO if we have space. 141 */ 142 static void 143 ath_edma_tx_fifo_fill(struct ath_softc *sc, struct ath_txq *txq) 144 { 145 struct ath_buf *bf, *bf_last; 146 int i = 0; 147 148 ATH_TXQ_LOCK_ASSERT(txq); 149 150 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: called\n", 151 __func__, 152 txq->axq_qnum); 153 154 TAILQ_FOREACH(bf, &txq->axq_q, bf_list) { 155 if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH) 156 break; 157 158 /* 159 * We have space in the FIFO - so let's push a frame 160 * into it. 161 */ 162 163 /* 164 * Remove it from the normal list 165 */ 166 ATH_TXQ_REMOVE(txq, bf, bf_list); 167 168 /* 169 * XXX for now, we only dequeue a frame at a time, so 170 * that's only one buffer. Later on when we just 171 * push this staging _list_ into the queue, we'll 172 * set bf_last to the end pointer in the list. 173 */ 174 bf_last = bf; 175 DPRINTF(sc, ATH_DEBUG_TX_PROC, 176 "%s: Q%d: depth=%d; pushing %p->%p\n", 177 __func__, 178 txq->axq_qnum, 179 txq->axq_fifo_depth, 180 bf, 181 bf_last); 182 183 /* 184 * Append it to the FIFO staging list 185 */ 186 ATH_TXQ_INSERT_TAIL(&txq->fifo, bf, bf_list); 187 188 /* 189 * Set fifo start / fifo end flags appropriately 190 * 191 */ 192 bf->bf_flags |= ATH_BUF_FIFOPTR; 193 bf_last->bf_flags |= ATH_BUF_FIFOEND; 194 195 /* 196 * Push _into_ the FIFO. 197 */ 198 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); 199 #ifdef ATH_DEBUG 200 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 201 ath_printtxbuf(sc, bf, txq->axq_qnum, i, 0); 202 #endif/* ATH_DEBUG */ 203 #ifdef ATH_DEBUG_ALQ 204 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 205 ath_tx_alq_post(sc, bf); 206 #endif /* ATH_DEBUG_ALQ */ 207 txq->axq_fifo_depth++; 208 i++; 209 } 210 if (i > 0) 211 ath_hal_txstart(sc->sc_ah, txq->axq_qnum); 212 } 213 214 /* 215 * Re-initialise the DMA FIFO with the current contents of 216 * said TXQ. 217 * 218 * This should only be called as part of the chip reset path, as it 219 * assumes the FIFO is currently empty. 220 */ 221 static void 222 ath_edma_dma_restart(struct ath_softc *sc, struct ath_txq *txq) 223 { 224 struct ath_buf *bf; 225 int i = 0; 226 int fifostart = 1; 227 int old_fifo_depth; 228 229 DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: called\n", 230 __func__, 231 txq->axq_qnum); 232 233 ATH_TXQ_LOCK_ASSERT(txq); 234 235 /* 236 * Let's log if the tracked FIFO depth doesn't match 237 * what we actually push in. 238 */ 239 old_fifo_depth = txq->axq_fifo_depth; 240 txq->axq_fifo_depth = 0; 241 242 /* 243 * Walk the FIFO staging list, looking for "head" entries. 244 * Since we may have a partially completed list of frames, 245 * we push the first frame we see into the FIFO and re-mark 246 * it as the head entry. We then skip entries until we see 247 * FIFO end, at which point we get ready to push another 248 * entry into the FIFO. 249 */ 250 TAILQ_FOREACH(bf, &txq->fifo.axq_q, bf_list) { 251 /* 252 * If we're looking for FIFOEND and we haven't found 253 * it, skip. 254 * 255 * If we're looking for FIFOEND and we've found it, 256 * reset for another descriptor. 257 */ 258 #ifdef ATH_DEBUG 259 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 260 ath_printtxbuf(sc, bf, txq->axq_qnum, i, 0); 261 #endif/* ATH_DEBUG */ 262 #ifdef ATH_DEBUG_ALQ 263 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 264 ath_tx_alq_post(sc, bf); 265 #endif /* ATH_DEBUG_ALQ */ 266 267 if (fifostart == 0) { 268 if (bf->bf_flags & ATH_BUF_FIFOEND) 269 fifostart = 1; 270 continue; 271 } 272 273 /* Make sure we're not overflowing the FIFO! */ 274 if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH) { 275 device_printf(sc->sc_dev, 276 "%s: Q%d: more frames in the queue; FIFO depth=%d?!\n", 277 __func__, 278 txq->axq_qnum, 279 txq->axq_fifo_depth); 280 } 281 282 #if 0 283 DPRINTF(sc, ATH_DEBUG_RESET, 284 "%s: Q%d: depth=%d: pushing bf=%p; start=%d, end=%d\n", 285 __func__, 286 txq->axq_qnum, 287 txq->axq_fifo_depth, 288 bf, 289 !! (bf->bf_flags & ATH_BUF_FIFOPTR), 290 !! (bf->bf_flags & ATH_BUF_FIFOEND)); 291 #endif 292 293 /* 294 * Set this to be the first buffer in the FIFO 295 * list - even if it's also the last buffer in 296 * a FIFO list! 297 */ 298 bf->bf_flags |= ATH_BUF_FIFOPTR; 299 300 /* Push it into the FIFO and bump the FIFO count */ 301 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); 302 txq->axq_fifo_depth++; 303 304 /* 305 * If this isn't the last entry either, let's 306 * clear fifostart so we continue looking for 307 * said last entry. 308 */ 309 if (! (bf->bf_flags & ATH_BUF_FIFOEND)) 310 fifostart = 0; 311 i++; 312 } 313 314 /* Only bother starting the queue if there's something in it */ 315 if (i > 0) 316 ath_hal_txstart(sc->sc_ah, txq->axq_qnum); 317 318 DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: FIFO depth was %d, is %d\n", 319 __func__, 320 txq->axq_qnum, 321 old_fifo_depth, 322 txq->axq_fifo_depth); 323 324 /* And now, let's check! */ 325 if (txq->axq_fifo_depth != old_fifo_depth) { 326 device_printf(sc->sc_dev, 327 "%s: Q%d: FIFO depth should be %d, is %d\n", 328 __func__, 329 txq->axq_qnum, 330 old_fifo_depth, 331 txq->axq_fifo_depth); 332 } 333 } 334 335 /* 336 * Hand off this frame to a hardware queue. 337 * 338 * Things are a bit hairy in the EDMA world. The TX FIFO is only 339 * 8 entries deep, so we need to keep track of exactly what we've 340 * pushed into the FIFO and what's just sitting in the TX queue, 341 * waiting to go out. 342 * 343 * So this is split into two halves - frames get appended to the 344 * TXQ; then a scheduler is called to push some frames into the 345 * actual TX FIFO. 346 */ 347 static void 348 ath_edma_xmit_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, 349 struct ath_buf *bf) 350 { 351 352 ATH_TXQ_LOCK(txq); 353 354 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 355 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 356 357 /* 358 * XXX TODO: write a hard-coded check to ensure that 359 * the queue id in the TX descriptor matches txq->axq_qnum. 360 */ 361 362 /* Update aggr stats */ 363 if (bf->bf_state.bfs_aggr) 364 txq->axq_aggr_depth++; 365 366 /* Push and update frame stats */ 367 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 368 369 /* For now, set the link pointer in the last descriptor 370 * to be NULL. 371 * 372 * Later on, when it comes time to handling multiple descriptors 373 * in one FIFO push, we can link descriptors together this way. 374 */ 375 376 /* 377 * Finally, call the FIFO schedule routine to schedule some 378 * frames to the FIFO. 379 */ 380 ath_edma_tx_fifo_fill(sc, txq); 381 ATH_TXQ_UNLOCK(txq); 382 } 383 384 /* 385 * Hand off this frame to a multicast software queue. 386 * 387 * The EDMA TX CABQ will get a list of chained frames, chained 388 * together using the next pointer. The single head of that 389 * particular queue is pushed to the hardware CABQ. 390 */ 391 static void 392 ath_edma_xmit_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, 393 struct ath_buf *bf) 394 { 395 396 ATH_TX_LOCK_ASSERT(sc); 397 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 398 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 399 400 ATH_TXQ_LOCK(txq); 401 /* 402 * XXX this is mostly duplicated in ath_tx_handoff_mcast(). 403 */ 404 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) { 405 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s); 406 struct ieee80211_frame *wh; 407 408 /* mark previous frame */ 409 wh = mtod(bf_last->bf_m, struct ieee80211_frame *); 410 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 411 412 /* re-sync buffer to memory */ 413 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, 414 BUS_DMASYNC_PREWRITE); 415 416 /* link descriptor */ 417 ath_hal_settxdesclink(sc->sc_ah, 418 bf_last->bf_lastds, 419 bf->bf_daddr); 420 } 421 #ifdef ATH_DEBUG_ALQ 422 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 423 ath_tx_alq_post(sc, bf); 424 #endif /* ATH_DEBUG_ALQ */ 425 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 426 ATH_TXQ_UNLOCK(txq); 427 } 428 429 /* 430 * Handoff this frame to the hardware. 431 * 432 * For the multicast queue, this will treat it as a software queue 433 * and append it to the list, after updating the MORE_DATA flag 434 * in the previous frame. The cabq processing code will ensure 435 * that the queue contents gets transferred over. 436 * 437 * For the hardware queues, this will queue a frame to the queue 438 * like before, then populate the FIFO from that. Since the 439 * EDMA hardware has 8 FIFO slots per TXQ, this ensures that 440 * frames such as management frames don't get prematurely dropped. 441 * 442 * This does imply that a similar flush-hwq-to-fifoq method will 443 * need to be called from the processq function, before the 444 * per-node software scheduler is called. 445 */ 446 static void 447 ath_edma_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, 448 struct ath_buf *bf) 449 { 450 451 DPRINTF(sc, ATH_DEBUG_XMIT_DESC, 452 "%s: called; bf=%p, txq=%p, qnum=%d\n", 453 __func__, 454 bf, 455 txq, 456 txq->axq_qnum); 457 458 if (txq->axq_qnum == ATH_TXQ_SWQ) 459 ath_edma_xmit_handoff_mcast(sc, txq, bf); 460 else 461 ath_edma_xmit_handoff_hw(sc, txq, bf); 462 } 463 464 static int 465 ath_edma_setup_txfifo(struct ath_softc *sc, int qnum) 466 { 467 struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum]; 468 469 te->m_fifo = malloc(sizeof(struct ath_buf *) * HAL_TXFIFO_DEPTH, 470 M_ATHDEV, 471 M_NOWAIT | M_ZERO); 472 if (te->m_fifo == NULL) { 473 device_printf(sc->sc_dev, "%s: malloc failed\n", 474 __func__); 475 return (-ENOMEM); 476 } 477 478 /* 479 * Set initial "empty" state. 480 */ 481 te->m_fifo_head = te->m_fifo_tail = te->m_fifo_depth = 0; 482 483 return (0); 484 } 485 486 static int 487 ath_edma_free_txfifo(struct ath_softc *sc, int qnum) 488 { 489 struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum]; 490 491 /* XXX TODO: actually deref the ath_buf entries? */ 492 free(te->m_fifo, M_ATHDEV); 493 return (0); 494 } 495 496 static int 497 ath_edma_dma_txsetup(struct ath_softc *sc) 498 { 499 int error; 500 int i; 501 502 error = ath_descdma_alloc_desc(sc, &sc->sc_txsdma, 503 NULL, "txcomp", sc->sc_tx_statuslen, ATH_TXSTATUS_RING_SIZE); 504 if (error != 0) 505 return (error); 506 507 ath_hal_setuptxstatusring(sc->sc_ah, 508 (void *) sc->sc_txsdma.dd_desc, 509 sc->sc_txsdma.dd_desc_paddr, 510 ATH_TXSTATUS_RING_SIZE); 511 512 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 513 ath_edma_setup_txfifo(sc, i); 514 } 515 516 return (0); 517 } 518 519 static int 520 ath_edma_dma_txteardown(struct ath_softc *sc) 521 { 522 int i; 523 524 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 525 ath_edma_free_txfifo(sc, i); 526 } 527 528 ath_descdma_cleanup(sc, &sc->sc_txsdma, NULL); 529 return (0); 530 } 531 532 /* 533 * Drain all TXQs, potentially after completing the existing completed 534 * frames. 535 */ 536 static void 537 ath_edma_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 538 { 539 struct ifnet *ifp = sc->sc_ifp; 540 int i; 541 542 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 543 544 (void) ath_stoptxdma(sc); 545 546 /* 547 * If reset type is noloss, the TX FIFO needs to be serviced 548 * and those frames need to be handled. 549 * 550 * Otherwise, just toss everything in each TX queue. 551 */ 552 if (reset_type == ATH_RESET_NOLOSS) { 553 ath_edma_tx_processq(sc, 0); 554 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 555 if (ATH_TXQ_SETUP(sc, i)) { 556 ATH_TXQ_LOCK(&sc->sc_txq[i]); 557 /* 558 * Free the holding buffer; DMA is now 559 * stopped. 560 */ 561 ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]); 562 /* 563 * Reset the link pointer to NULL; there's 564 * no frames to chain DMA to. 565 */ 566 sc->sc_txq[i].axq_link = NULL; 567 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 568 } 569 } 570 } else { 571 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 572 if (ATH_TXQ_SETUP(sc, i)) 573 ath_tx_draintxq(sc, &sc->sc_txq[i]); 574 } 575 } 576 577 /* XXX dump out the TX completion FIFO contents */ 578 579 /* XXX dump out the frames */ 580 581 IF_LOCK(&ifp->if_snd); 582 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 583 IF_UNLOCK(&ifp->if_snd); 584 sc->sc_wd_timer = 0; 585 } 586 587 /* 588 * TX completion tasklet. 589 */ 590 591 static void 592 ath_edma_tx_proc(void *arg, int npending) 593 { 594 struct ath_softc *sc = (struct ath_softc *) arg; 595 596 #if 0 597 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: called, npending=%d\n", 598 __func__, npending); 599 #endif 600 ath_edma_tx_processq(sc, 1); 601 } 602 603 /* 604 * Process the TX status queue. 605 */ 606 static void 607 ath_edma_tx_processq(struct ath_softc *sc, int dosched) 608 { 609 struct ath_hal *ah = sc->sc_ah; 610 HAL_STATUS status; 611 struct ath_tx_status ts; 612 struct ath_txq *txq; 613 struct ath_buf *bf; 614 struct ieee80211_node *ni; 615 int nacked = 0; 616 int idx; 617 618 #ifdef ATH_DEBUG 619 /* XXX */ 620 uint32_t txstatus[32]; 621 #endif 622 623 for (idx = 0; ; idx++) { 624 bzero(&ts, sizeof(ts)); 625 626 ATH_TXSTATUS_LOCK(sc); 627 #ifdef ATH_DEBUG 628 ath_hal_gettxrawtxdesc(ah, txstatus); 629 #endif 630 status = ath_hal_txprocdesc(ah, NULL, (void *) &ts); 631 ATH_TXSTATUS_UNLOCK(sc); 632 633 if (status == HAL_EINPROGRESS) 634 break; 635 636 #ifdef ATH_DEBUG 637 if (sc->sc_debug & ATH_DEBUG_TX_PROC) 638 if (ts.ts_queue_id != sc->sc_bhalq) 639 ath_printtxstatbuf(sc, NULL, txstatus, ts.ts_queue_id, 640 idx, (status == HAL_OK)); 641 #endif 642 643 /* 644 * If there is an error with this descriptor, continue 645 * processing. 646 * 647 * XXX TBD: log some statistics? 648 */ 649 if (status == HAL_EIO) { 650 device_printf(sc->sc_dev, "%s: invalid TX status?\n", 651 __func__); 652 break; 653 } 654 655 #if defined(ATH_DEBUG_ALQ) && defined(ATH_DEBUG) 656 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS)) 657 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS, 658 sc->sc_tx_statuslen, 659 (char *) txstatus); 660 #endif /* ATH_DEBUG_ALQ */ 661 662 /* 663 * At this point we have a valid status descriptor. 664 * The QID and descriptor ID (which currently isn't set) 665 * is part of the status. 666 * 667 * We then assume that the descriptor in question is the 668 * -head- of the given QID. Eventually we should verify 669 * this by using the descriptor ID. 670 */ 671 672 /* 673 * The beacon queue is not currently a "real" queue. 674 * Frames aren't pushed onto it and the lock isn't setup. 675 * So skip it for now; the beacon handling code will 676 * free and alloc more beacon buffers as appropriate. 677 */ 678 if (ts.ts_queue_id == sc->sc_bhalq) 679 continue; 680 681 txq = &sc->sc_txq[ts.ts_queue_id]; 682 683 ATH_TXQ_LOCK(txq); 684 bf = ATH_TXQ_FIRST(&txq->fifo); 685 686 /* 687 * Work around the situation where I'm seeing notifications 688 * for Q1 when no frames are available. That needs to be 689 * debugged but not by crashing _here_. 690 */ 691 if (bf == NULL) { 692 device_printf(sc->sc_dev, "%s: Q%d: empty?\n", 693 __func__, 694 ts.ts_queue_id); 695 ATH_TXQ_UNLOCK(txq); 696 continue; 697 } 698 699 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d, bf=%p, start=%d, end=%d\n", 700 __func__, 701 ts.ts_queue_id, bf, 702 !! (bf->bf_flags & ATH_BUF_FIFOPTR), 703 !! (bf->bf_flags & ATH_BUF_FIFOEND)); 704 705 /* XXX TODO: actually output debugging info about this */ 706 707 #if 0 708 /* XXX assert the buffer/descriptor matches the status descid */ 709 if (ts.ts_desc_id != bf->bf_descid) { 710 device_printf(sc->sc_dev, 711 "%s: mismatched descid (qid=%d, tsdescid=%d, " 712 "bfdescid=%d\n", 713 __func__, 714 ts.ts_queue_id, 715 ts.ts_desc_id, 716 bf->bf_descid); 717 } 718 #endif 719 720 /* This removes the buffer and decrements the queue depth */ 721 ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list); 722 if (bf->bf_state.bfs_aggr) 723 txq->axq_aggr_depth--; 724 725 /* 726 * If this was the end of a FIFO set, decrement FIFO depth 727 */ 728 if (bf->bf_flags & ATH_BUF_FIFOEND) 729 txq->axq_fifo_depth--; 730 731 /* 732 * If this isn't the final buffer in a FIFO set, mark 733 * the buffer as busy so it goes onto the holding queue. 734 */ 735 if (! (bf->bf_flags & ATH_BUF_FIFOEND)) 736 bf->bf_flags |= ATH_BUF_BUSY; 737 738 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: FIFO depth is now %d (%d)\n", 739 __func__, 740 txq->axq_qnum, 741 txq->axq_fifo_depth, 742 txq->fifo.axq_depth); 743 744 /* XXX assert FIFO depth >= 0 */ 745 ATH_TXQ_UNLOCK(txq); 746 747 /* 748 * Outside of the TX lock - if the buffer is end 749 * end buffer in this FIFO, we don't need a holding 750 * buffer any longer. 751 */ 752 if (bf->bf_flags & ATH_BUF_FIFOEND) { 753 ATH_TXQ_LOCK(txq); 754 ath_txq_freeholdingbuf(sc, txq); 755 ATH_TXQ_UNLOCK(txq); 756 } 757 758 /* 759 * First we need to make sure ts_rate is valid. 760 * 761 * Pre-EDMA chips pass the whole TX descriptor to 762 * the proctxdesc function which will then fill out 763 * ts_rate based on the ts_finaltsi (final TX index) 764 * in the TX descriptor. However the TX completion 765 * FIFO doesn't have this information. So here we 766 * do a separate HAL call to populate that information. 767 * 768 * The same problem exists with ts_longretry. 769 * The FreeBSD HAL corrects ts_longretry in the HAL layer; 770 * the AR9380 HAL currently doesn't. So until the HAL 771 * is imported and this can be added, we correct for it 772 * here. 773 */ 774 /* XXX TODO */ 775 /* XXX faked for now. Ew. */ 776 if (ts.ts_finaltsi < 4) { 777 ts.ts_rate = 778 bf->bf_state.bfs_rc[ts.ts_finaltsi].ratecode; 779 switch (ts.ts_finaltsi) { 780 case 3: ts.ts_longretry += 781 bf->bf_state.bfs_rc[2].tries; 782 case 2: ts.ts_longretry += 783 bf->bf_state.bfs_rc[1].tries; 784 case 1: ts.ts_longretry += 785 bf->bf_state.bfs_rc[0].tries; 786 } 787 } else { 788 device_printf(sc->sc_dev, "%s: finaltsi=%d\n", 789 __func__, 790 ts.ts_finaltsi); 791 ts.ts_rate = bf->bf_state.bfs_rc[0].ratecode; 792 } 793 794 /* 795 * XXX This is terrible. 796 * 797 * Right now, some code uses the TX status that is 798 * passed in here, but the completion handlers in the 799 * software TX path also use bf_status.ds_txstat. 800 * Ew. That should all go away. 801 * 802 * XXX It's also possible the rate control completion 803 * routine is called twice. 804 */ 805 memcpy(&bf->bf_status, &ts, sizeof(ts)); 806 807 ni = bf->bf_node; 808 809 /* Update RSSI */ 810 /* XXX duplicate from ath_tx_processq */ 811 if (ni != NULL && ts.ts_status == 0 && 812 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { 813 nacked++; 814 sc->sc_stats.ast_tx_rssi = ts.ts_rssi; 815 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 816 ts.ts_rssi); 817 } 818 819 /* Handle frame completion and rate control update */ 820 ath_tx_process_buf_completion(sc, txq, &ts, bf); 821 822 /* bf is invalid at this point */ 823 824 /* 825 * Now that there's space in the FIFO, let's push some 826 * more frames into it. 827 */ 828 ATH_TXQ_LOCK(txq); 829 if (dosched) 830 ath_edma_tx_fifo_fill(sc, txq); 831 ATH_TXQ_UNLOCK(txq); 832 } 833 834 sc->sc_wd_timer = 0; 835 836 if (idx > 0) { 837 IF_LOCK(&sc->sc_ifp->if_snd); 838 sc->sc_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 839 IF_UNLOCK(&sc->sc_ifp->if_snd); 840 } 841 842 /* Kick software scheduler */ 843 /* 844 * XXX It's inefficient to do this if the FIFO queue is full, 845 * but there's no easy way right now to only populate 846 * the txq task for _one_ TXQ. This should be fixed. 847 */ 848 if (dosched) 849 ath_tx_swq_kick(sc); 850 } 851 852 static void 853 ath_edma_attach_comp_func(struct ath_softc *sc) 854 { 855 856 TASK_INIT(&sc->sc_txtask, 0, ath_edma_tx_proc, sc); 857 } 858 859 void 860 ath_xmit_setup_edma(struct ath_softc *sc) 861 { 862 863 /* Fetch EDMA field and buffer sizes */ 864 (void) ath_hal_gettxdesclen(sc->sc_ah, &sc->sc_tx_desclen); 865 (void) ath_hal_gettxstatuslen(sc->sc_ah, &sc->sc_tx_statuslen); 866 (void) ath_hal_getntxmaps(sc->sc_ah, &sc->sc_tx_nmaps); 867 868 device_printf(sc->sc_dev, "TX descriptor length: %d\n", 869 sc->sc_tx_desclen); 870 device_printf(sc->sc_dev, "TX status length: %d\n", 871 sc->sc_tx_statuslen); 872 device_printf(sc->sc_dev, "TX buffers per descriptor: %d\n", 873 sc->sc_tx_nmaps); 874 875 sc->sc_tx.xmit_setup = ath_edma_dma_txsetup; 876 sc->sc_tx.xmit_teardown = ath_edma_dma_txteardown; 877 sc->sc_tx.xmit_attach_comp_func = ath_edma_attach_comp_func; 878 879 sc->sc_tx.xmit_dma_restart = ath_edma_dma_restart; 880 sc->sc_tx.xmit_handoff = ath_edma_xmit_handoff; 881 sc->sc_tx.xmit_drain = ath_edma_tx_drain; 882 } 883