1 /*- 2 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_ath.h" 42 /* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49 #include "opt_ah.h" 50 #include "opt_wlan.h" 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/sysctl.h> 55 #include <sys/mbuf.h> 56 #include <sys/malloc.h> 57 #include <sys/lock.h> 58 #include <sys/mutex.h> 59 #include <sys/kernel.h> 60 #include <sys/socket.h> 61 #include <sys/sockio.h> 62 #include <sys/errno.h> 63 #include <sys/callout.h> 64 #include <sys/bus.h> 65 #include <sys/endian.h> 66 #include <sys/kthread.h> 67 #include <sys/taskqueue.h> 68 #include <sys/priv.h> 69 #include <sys/module.h> 70 #include <sys/ktr.h> 71 #include <sys/smp.h> /* for mp_ncpus */ 72 73 #include <machine/bus.h> 74 75 #include <net/if.h> 76 #include <net/if_var.h> 77 #include <net/if_dl.h> 78 #include <net/if_media.h> 79 #include <net/if_types.h> 80 #include <net/if_arp.h> 81 #include <net/ethernet.h> 82 #include <net/if_llc.h> 83 84 #include <net80211/ieee80211_var.h> 85 #include <net80211/ieee80211_regdomain.h> 86 #ifdef IEEE80211_SUPPORT_SUPERG 87 #include <net80211/ieee80211_superg.h> 88 #endif 89 #ifdef IEEE80211_SUPPORT_TDMA 90 #include <net80211/ieee80211_tdma.h> 91 #endif 92 93 #include <net/bpf.h> 94 95 #ifdef INET 96 #include <netinet/in.h> 97 #include <netinet/if_ether.h> 98 #endif 99 100 #include <dev/ath/if_athvar.h> 101 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 102 #include <dev/ath/ath_hal/ah_diagcodes.h> 103 104 #include <dev/ath/if_ath_debug.h> 105 #include <dev/ath/if_ath_misc.h> 106 #include <dev/ath/if_ath_tsf.h> 107 #include <dev/ath/if_ath_tx.h> 108 #include <dev/ath/if_ath_sysctl.h> 109 #include <dev/ath/if_ath_led.h> 110 #include <dev/ath/if_ath_keycache.h> 111 #include <dev/ath/if_ath_rx.h> 112 #include <dev/ath/if_ath_beacon.h> 113 #include <dev/ath/if_athdfs.h> 114 #include <dev/ath/if_ath_descdma.h> 115 116 #ifdef ATH_TX99_DIAG 117 #include <dev/ath/ath_tx99/ath_tx99.h> 118 #endif 119 120 #include <dev/ath/if_ath_tx_edma.h> 121 122 #ifdef ATH_DEBUG_ALQ 123 #include <dev/ath/if_ath_alq.h> 124 #endif 125 126 /* 127 * some general macros 128 */ 129 #define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1) 130 #define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1) 131 132 /* 133 * XXX doesn't belong here, and should be tunable 134 */ 135 #define ATH_TXSTATUS_RING_SIZE 512 136 137 MALLOC_DECLARE(M_ATHDEV); 138 139 static void ath_edma_tx_processq(struct ath_softc *sc, int dosched); 140 141 /* 142 * Push some frames into the TX FIFO if we have space. 143 */ 144 static void 145 ath_edma_tx_fifo_fill(struct ath_softc *sc, struct ath_txq *txq) 146 { 147 struct ath_buf *bf, *bf_last; 148 int i = 0; 149 150 ATH_TXQ_LOCK_ASSERT(txq); 151 152 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: called\n", 153 __func__, 154 txq->axq_qnum); 155 156 TAILQ_FOREACH(bf, &txq->axq_q, bf_list) { 157 if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH) 158 break; 159 160 /* 161 * We have space in the FIFO - so let's push a frame 162 * into it. 163 */ 164 165 /* 166 * Remove it from the normal list 167 */ 168 ATH_TXQ_REMOVE(txq, bf, bf_list); 169 170 /* 171 * XXX for now, we only dequeue a frame at a time, so 172 * that's only one buffer. Later on when we just 173 * push this staging _list_ into the queue, we'll 174 * set bf_last to the end pointer in the list. 175 */ 176 bf_last = bf; 177 DPRINTF(sc, ATH_DEBUG_TX_PROC, 178 "%s: Q%d: depth=%d; pushing %p->%p\n", 179 __func__, 180 txq->axq_qnum, 181 txq->axq_fifo_depth, 182 bf, 183 bf_last); 184 185 /* 186 * Append it to the FIFO staging list 187 */ 188 ATH_TXQ_INSERT_TAIL(&txq->fifo, bf, bf_list); 189 190 /* 191 * Set fifo start / fifo end flags appropriately 192 * 193 */ 194 bf->bf_flags |= ATH_BUF_FIFOPTR; 195 bf_last->bf_flags |= ATH_BUF_FIFOEND; 196 197 /* 198 * Push _into_ the FIFO. 199 */ 200 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); 201 #ifdef ATH_DEBUG 202 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 203 ath_printtxbuf(sc, bf, txq->axq_qnum, i, 0); 204 #endif/* ATH_DEBUG */ 205 #ifdef ATH_DEBUG_ALQ 206 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 207 ath_tx_alq_post(sc, bf); 208 #endif /* ATH_DEBUG_ALQ */ 209 txq->axq_fifo_depth++; 210 i++; 211 } 212 if (i > 0) 213 ath_hal_txstart(sc->sc_ah, txq->axq_qnum); 214 } 215 216 /* 217 * Re-initialise the DMA FIFO with the current contents of 218 * said TXQ. 219 * 220 * This should only be called as part of the chip reset path, as it 221 * assumes the FIFO is currently empty. 222 */ 223 static void 224 ath_edma_dma_restart(struct ath_softc *sc, struct ath_txq *txq) 225 { 226 struct ath_buf *bf; 227 int i = 0; 228 int fifostart = 1; 229 int old_fifo_depth; 230 231 DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: called\n", 232 __func__, 233 txq->axq_qnum); 234 235 ATH_TXQ_LOCK_ASSERT(txq); 236 237 /* 238 * Let's log if the tracked FIFO depth doesn't match 239 * what we actually push in. 240 */ 241 old_fifo_depth = txq->axq_fifo_depth; 242 txq->axq_fifo_depth = 0; 243 244 /* 245 * Walk the FIFO staging list, looking for "head" entries. 246 * Since we may have a partially completed list of frames, 247 * we push the first frame we see into the FIFO and re-mark 248 * it as the head entry. We then skip entries until we see 249 * FIFO end, at which point we get ready to push another 250 * entry into the FIFO. 251 */ 252 TAILQ_FOREACH(bf, &txq->fifo.axq_q, bf_list) { 253 /* 254 * If we're looking for FIFOEND and we haven't found 255 * it, skip. 256 * 257 * If we're looking for FIFOEND and we've found it, 258 * reset for another descriptor. 259 */ 260 #ifdef ATH_DEBUG 261 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 262 ath_printtxbuf(sc, bf, txq->axq_qnum, i, 0); 263 #endif/* ATH_DEBUG */ 264 #ifdef ATH_DEBUG_ALQ 265 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 266 ath_tx_alq_post(sc, bf); 267 #endif /* ATH_DEBUG_ALQ */ 268 269 if (fifostart == 0) { 270 if (bf->bf_flags & ATH_BUF_FIFOEND) 271 fifostart = 1; 272 continue; 273 } 274 275 /* Make sure we're not overflowing the FIFO! */ 276 if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH) { 277 device_printf(sc->sc_dev, 278 "%s: Q%d: more frames in the queue; FIFO depth=%d?!\n", 279 __func__, 280 txq->axq_qnum, 281 txq->axq_fifo_depth); 282 } 283 284 #if 0 285 DPRINTF(sc, ATH_DEBUG_RESET, 286 "%s: Q%d: depth=%d: pushing bf=%p; start=%d, end=%d\n", 287 __func__, 288 txq->axq_qnum, 289 txq->axq_fifo_depth, 290 bf, 291 !! (bf->bf_flags & ATH_BUF_FIFOPTR), 292 !! (bf->bf_flags & ATH_BUF_FIFOEND)); 293 #endif 294 295 /* 296 * Set this to be the first buffer in the FIFO 297 * list - even if it's also the last buffer in 298 * a FIFO list! 299 */ 300 bf->bf_flags |= ATH_BUF_FIFOPTR; 301 302 /* Push it into the FIFO and bump the FIFO count */ 303 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); 304 txq->axq_fifo_depth++; 305 306 /* 307 * If this isn't the last entry either, let's 308 * clear fifostart so we continue looking for 309 * said last entry. 310 */ 311 if (! (bf->bf_flags & ATH_BUF_FIFOEND)) 312 fifostart = 0; 313 i++; 314 } 315 316 /* Only bother starting the queue if there's something in it */ 317 if (i > 0) 318 ath_hal_txstart(sc->sc_ah, txq->axq_qnum); 319 320 DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: FIFO depth was %d, is %d\n", 321 __func__, 322 txq->axq_qnum, 323 old_fifo_depth, 324 txq->axq_fifo_depth); 325 326 /* And now, let's check! */ 327 if (txq->axq_fifo_depth != old_fifo_depth) { 328 device_printf(sc->sc_dev, 329 "%s: Q%d: FIFO depth should be %d, is %d\n", 330 __func__, 331 txq->axq_qnum, 332 old_fifo_depth, 333 txq->axq_fifo_depth); 334 } 335 } 336 337 /* 338 * Hand off this frame to a hardware queue. 339 * 340 * Things are a bit hairy in the EDMA world. The TX FIFO is only 341 * 8 entries deep, so we need to keep track of exactly what we've 342 * pushed into the FIFO and what's just sitting in the TX queue, 343 * waiting to go out. 344 * 345 * So this is split into two halves - frames get appended to the 346 * TXQ; then a scheduler is called to push some frames into the 347 * actual TX FIFO. 348 */ 349 static void 350 ath_edma_xmit_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, 351 struct ath_buf *bf) 352 { 353 354 ATH_TXQ_LOCK(txq); 355 356 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 357 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 358 359 /* 360 * XXX TODO: write a hard-coded check to ensure that 361 * the queue id in the TX descriptor matches txq->axq_qnum. 362 */ 363 364 /* Update aggr stats */ 365 if (bf->bf_state.bfs_aggr) 366 txq->axq_aggr_depth++; 367 368 /* Push and update frame stats */ 369 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 370 371 /* For now, set the link pointer in the last descriptor 372 * to be NULL. 373 * 374 * Later on, when it comes time to handling multiple descriptors 375 * in one FIFO push, we can link descriptors together this way. 376 */ 377 378 /* 379 * Finally, call the FIFO schedule routine to schedule some 380 * frames to the FIFO. 381 */ 382 ath_edma_tx_fifo_fill(sc, txq); 383 ATH_TXQ_UNLOCK(txq); 384 } 385 386 /* 387 * Hand off this frame to a multicast software queue. 388 * 389 * The EDMA TX CABQ will get a list of chained frames, chained 390 * together using the next pointer. The single head of that 391 * particular queue is pushed to the hardware CABQ. 392 */ 393 static void 394 ath_edma_xmit_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, 395 struct ath_buf *bf) 396 { 397 398 ATH_TX_LOCK_ASSERT(sc); 399 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 400 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 401 402 ATH_TXQ_LOCK(txq); 403 /* 404 * XXX this is mostly duplicated in ath_tx_handoff_mcast(). 405 */ 406 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) { 407 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s); 408 struct ieee80211_frame *wh; 409 410 /* mark previous frame */ 411 wh = mtod(bf_last->bf_m, struct ieee80211_frame *); 412 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 413 414 /* re-sync buffer to memory */ 415 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, 416 BUS_DMASYNC_PREWRITE); 417 418 /* link descriptor */ 419 ath_hal_settxdesclink(sc->sc_ah, 420 bf_last->bf_lastds, 421 bf->bf_daddr); 422 } 423 #ifdef ATH_DEBUG_ALQ 424 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 425 ath_tx_alq_post(sc, bf); 426 #endif /* ATH_DEBUG_ALQ */ 427 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 428 ATH_TXQ_UNLOCK(txq); 429 } 430 431 /* 432 * Handoff this frame to the hardware. 433 * 434 * For the multicast queue, this will treat it as a software queue 435 * and append it to the list, after updating the MORE_DATA flag 436 * in the previous frame. The cabq processing code will ensure 437 * that the queue contents gets transferred over. 438 * 439 * For the hardware queues, this will queue a frame to the queue 440 * like before, then populate the FIFO from that. Since the 441 * EDMA hardware has 8 FIFO slots per TXQ, this ensures that 442 * frames such as management frames don't get prematurely dropped. 443 * 444 * This does imply that a similar flush-hwq-to-fifoq method will 445 * need to be called from the processq function, before the 446 * per-node software scheduler is called. 447 */ 448 static void 449 ath_edma_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, 450 struct ath_buf *bf) 451 { 452 453 DPRINTF(sc, ATH_DEBUG_XMIT_DESC, 454 "%s: called; bf=%p, txq=%p, qnum=%d\n", 455 __func__, 456 bf, 457 txq, 458 txq->axq_qnum); 459 460 if (txq->axq_qnum == ATH_TXQ_SWQ) 461 ath_edma_xmit_handoff_mcast(sc, txq, bf); 462 else 463 ath_edma_xmit_handoff_hw(sc, txq, bf); 464 } 465 466 static int 467 ath_edma_setup_txfifo(struct ath_softc *sc, int qnum) 468 { 469 struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum]; 470 471 te->m_fifo = malloc(sizeof(struct ath_buf *) * HAL_TXFIFO_DEPTH, 472 M_ATHDEV, 473 M_NOWAIT | M_ZERO); 474 if (te->m_fifo == NULL) { 475 device_printf(sc->sc_dev, "%s: malloc failed\n", 476 __func__); 477 return (-ENOMEM); 478 } 479 480 /* 481 * Set initial "empty" state. 482 */ 483 te->m_fifo_head = te->m_fifo_tail = te->m_fifo_depth = 0; 484 485 return (0); 486 } 487 488 static int 489 ath_edma_free_txfifo(struct ath_softc *sc, int qnum) 490 { 491 struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum]; 492 493 /* XXX TODO: actually deref the ath_buf entries? */ 494 free(te->m_fifo, M_ATHDEV); 495 return (0); 496 } 497 498 static int 499 ath_edma_dma_txsetup(struct ath_softc *sc) 500 { 501 int error; 502 int i; 503 504 error = ath_descdma_alloc_desc(sc, &sc->sc_txsdma, 505 NULL, "txcomp", sc->sc_tx_statuslen, ATH_TXSTATUS_RING_SIZE); 506 if (error != 0) 507 return (error); 508 509 ath_hal_setuptxstatusring(sc->sc_ah, 510 (void *) sc->sc_txsdma.dd_desc, 511 sc->sc_txsdma.dd_desc_paddr, 512 ATH_TXSTATUS_RING_SIZE); 513 514 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 515 ath_edma_setup_txfifo(sc, i); 516 } 517 518 return (0); 519 } 520 521 static int 522 ath_edma_dma_txteardown(struct ath_softc *sc) 523 { 524 int i; 525 526 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 527 ath_edma_free_txfifo(sc, i); 528 } 529 530 ath_descdma_cleanup(sc, &sc->sc_txsdma, NULL); 531 return (0); 532 } 533 534 /* 535 * Drain all TXQs, potentially after completing the existing completed 536 * frames. 537 */ 538 static void 539 ath_edma_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 540 { 541 int i; 542 543 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 544 545 (void) ath_stoptxdma(sc); 546 547 /* 548 * If reset type is noloss, the TX FIFO needs to be serviced 549 * and those frames need to be handled. 550 * 551 * Otherwise, just toss everything in each TX queue. 552 */ 553 if (reset_type == ATH_RESET_NOLOSS) { 554 ath_edma_tx_processq(sc, 0); 555 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 556 if (ATH_TXQ_SETUP(sc, i)) { 557 ATH_TXQ_LOCK(&sc->sc_txq[i]); 558 /* 559 * Free the holding buffer; DMA is now 560 * stopped. 561 */ 562 ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]); 563 /* 564 * Reset the link pointer to NULL; there's 565 * no frames to chain DMA to. 566 */ 567 sc->sc_txq[i].axq_link = NULL; 568 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 569 } 570 } 571 } else { 572 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 573 if (ATH_TXQ_SETUP(sc, i)) 574 ath_tx_draintxq(sc, &sc->sc_txq[i]); 575 } 576 } 577 578 /* XXX dump out the TX completion FIFO contents */ 579 580 /* XXX dump out the frames */ 581 582 sc->sc_wd_timer = 0; 583 } 584 585 /* 586 * TX completion tasklet. 587 */ 588 589 static void 590 ath_edma_tx_proc(void *arg, int npending) 591 { 592 struct ath_softc *sc = (struct ath_softc *) arg; 593 594 #if 0 595 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: called, npending=%d\n", 596 __func__, npending); 597 #endif 598 ath_edma_tx_processq(sc, 1); 599 } 600 601 /* 602 * Process the TX status queue. 603 */ 604 static void 605 ath_edma_tx_processq(struct ath_softc *sc, int dosched) 606 { 607 struct ath_hal *ah = sc->sc_ah; 608 HAL_STATUS status; 609 struct ath_tx_status ts; 610 struct ath_txq *txq; 611 struct ath_buf *bf; 612 struct ieee80211_node *ni; 613 int nacked = 0; 614 int idx; 615 616 #ifdef ATH_DEBUG 617 /* XXX */ 618 uint32_t txstatus[32]; 619 #endif 620 621 for (idx = 0; ; idx++) { 622 bzero(&ts, sizeof(ts)); 623 624 ATH_TXSTATUS_LOCK(sc); 625 #ifdef ATH_DEBUG 626 ath_hal_gettxrawtxdesc(ah, txstatus); 627 #endif 628 status = ath_hal_txprocdesc(ah, NULL, (void *) &ts); 629 ATH_TXSTATUS_UNLOCK(sc); 630 631 if (status == HAL_EINPROGRESS) 632 break; 633 634 #ifdef ATH_DEBUG 635 if (sc->sc_debug & ATH_DEBUG_TX_PROC) 636 if (ts.ts_queue_id != sc->sc_bhalq) 637 ath_printtxstatbuf(sc, NULL, txstatus, ts.ts_queue_id, 638 idx, (status == HAL_OK)); 639 #endif 640 641 /* 642 * If there is an error with this descriptor, continue 643 * processing. 644 * 645 * XXX TBD: log some statistics? 646 */ 647 if (status == HAL_EIO) { 648 device_printf(sc->sc_dev, "%s: invalid TX status?\n", 649 __func__); 650 break; 651 } 652 653 #if defined(ATH_DEBUG_ALQ) && defined(ATH_DEBUG) 654 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS)) 655 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS, 656 sc->sc_tx_statuslen, 657 (char *) txstatus); 658 #endif /* ATH_DEBUG_ALQ */ 659 660 /* 661 * At this point we have a valid status descriptor. 662 * The QID and descriptor ID (which currently isn't set) 663 * is part of the status. 664 * 665 * We then assume that the descriptor in question is the 666 * -head- of the given QID. Eventually we should verify 667 * this by using the descriptor ID. 668 */ 669 670 /* 671 * The beacon queue is not currently a "real" queue. 672 * Frames aren't pushed onto it and the lock isn't setup. 673 * So skip it for now; the beacon handling code will 674 * free and alloc more beacon buffers as appropriate. 675 */ 676 if (ts.ts_queue_id == sc->sc_bhalq) 677 continue; 678 679 txq = &sc->sc_txq[ts.ts_queue_id]; 680 681 ATH_TXQ_LOCK(txq); 682 bf = ATH_TXQ_FIRST(&txq->fifo); 683 684 /* 685 * Work around the situation where I'm seeing notifications 686 * for Q1 when no frames are available. That needs to be 687 * debugged but not by crashing _here_. 688 */ 689 if (bf == NULL) { 690 device_printf(sc->sc_dev, "%s: Q%d: empty?\n", 691 __func__, 692 ts.ts_queue_id); 693 ATH_TXQ_UNLOCK(txq); 694 continue; 695 } 696 697 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d, bf=%p, start=%d, end=%d\n", 698 __func__, 699 ts.ts_queue_id, bf, 700 !! (bf->bf_flags & ATH_BUF_FIFOPTR), 701 !! (bf->bf_flags & ATH_BUF_FIFOEND)); 702 703 /* XXX TODO: actually output debugging info about this */ 704 705 #if 0 706 /* XXX assert the buffer/descriptor matches the status descid */ 707 if (ts.ts_desc_id != bf->bf_descid) { 708 device_printf(sc->sc_dev, 709 "%s: mismatched descid (qid=%d, tsdescid=%d, " 710 "bfdescid=%d\n", 711 __func__, 712 ts.ts_queue_id, 713 ts.ts_desc_id, 714 bf->bf_descid); 715 } 716 #endif 717 718 /* This removes the buffer and decrements the queue depth */ 719 ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list); 720 if (bf->bf_state.bfs_aggr) 721 txq->axq_aggr_depth--; 722 723 /* 724 * If this was the end of a FIFO set, decrement FIFO depth 725 */ 726 if (bf->bf_flags & ATH_BUF_FIFOEND) 727 txq->axq_fifo_depth--; 728 729 /* 730 * If this isn't the final buffer in a FIFO set, mark 731 * the buffer as busy so it goes onto the holding queue. 732 */ 733 if (! (bf->bf_flags & ATH_BUF_FIFOEND)) 734 bf->bf_flags |= ATH_BUF_BUSY; 735 736 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: FIFO depth is now %d (%d)\n", 737 __func__, 738 txq->axq_qnum, 739 txq->axq_fifo_depth, 740 txq->fifo.axq_depth); 741 742 /* XXX assert FIFO depth >= 0 */ 743 ATH_TXQ_UNLOCK(txq); 744 745 /* 746 * Outside of the TX lock - if the buffer is end 747 * end buffer in this FIFO, we don't need a holding 748 * buffer any longer. 749 */ 750 if (bf->bf_flags & ATH_BUF_FIFOEND) { 751 ATH_TXQ_LOCK(txq); 752 ath_txq_freeholdingbuf(sc, txq); 753 ATH_TXQ_UNLOCK(txq); 754 } 755 756 /* 757 * First we need to make sure ts_rate is valid. 758 * 759 * Pre-EDMA chips pass the whole TX descriptor to 760 * the proctxdesc function which will then fill out 761 * ts_rate based on the ts_finaltsi (final TX index) 762 * in the TX descriptor. However the TX completion 763 * FIFO doesn't have this information. So here we 764 * do a separate HAL call to populate that information. 765 * 766 * The same problem exists with ts_longretry. 767 * The FreeBSD HAL corrects ts_longretry in the HAL layer; 768 * the AR9380 HAL currently doesn't. So until the HAL 769 * is imported and this can be added, we correct for it 770 * here. 771 */ 772 /* XXX TODO */ 773 /* XXX faked for now. Ew. */ 774 if (ts.ts_finaltsi < 4) { 775 ts.ts_rate = 776 bf->bf_state.bfs_rc[ts.ts_finaltsi].ratecode; 777 switch (ts.ts_finaltsi) { 778 case 3: ts.ts_longretry += 779 bf->bf_state.bfs_rc[2].tries; 780 case 2: ts.ts_longretry += 781 bf->bf_state.bfs_rc[1].tries; 782 case 1: ts.ts_longretry += 783 bf->bf_state.bfs_rc[0].tries; 784 } 785 } else { 786 device_printf(sc->sc_dev, "%s: finaltsi=%d\n", 787 __func__, 788 ts.ts_finaltsi); 789 ts.ts_rate = bf->bf_state.bfs_rc[0].ratecode; 790 } 791 792 /* 793 * XXX This is terrible. 794 * 795 * Right now, some code uses the TX status that is 796 * passed in here, but the completion handlers in the 797 * software TX path also use bf_status.ds_txstat. 798 * Ew. That should all go away. 799 * 800 * XXX It's also possible the rate control completion 801 * routine is called twice. 802 */ 803 memcpy(&bf->bf_status, &ts, sizeof(ts)); 804 805 ni = bf->bf_node; 806 807 /* Update RSSI */ 808 /* XXX duplicate from ath_tx_processq */ 809 if (ni != NULL && ts.ts_status == 0 && 810 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { 811 nacked++; 812 sc->sc_stats.ast_tx_rssi = ts.ts_rssi; 813 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 814 ts.ts_rssi); 815 } 816 817 /* Handle frame completion and rate control update */ 818 ath_tx_process_buf_completion(sc, txq, &ts, bf); 819 820 /* bf is invalid at this point */ 821 822 /* 823 * Now that there's space in the FIFO, let's push some 824 * more frames into it. 825 */ 826 ATH_TXQ_LOCK(txq); 827 if (dosched) 828 ath_edma_tx_fifo_fill(sc, txq); 829 ATH_TXQ_UNLOCK(txq); 830 } 831 832 sc->sc_wd_timer = 0; 833 834 /* Kick software scheduler */ 835 /* 836 * XXX It's inefficient to do this if the FIFO queue is full, 837 * but there's no easy way right now to only populate 838 * the txq task for _one_ TXQ. This should be fixed. 839 */ 840 if (dosched) 841 ath_tx_swq_kick(sc); 842 } 843 844 static void 845 ath_edma_attach_comp_func(struct ath_softc *sc) 846 { 847 848 TASK_INIT(&sc->sc_txtask, 0, ath_edma_tx_proc, sc); 849 } 850 851 void 852 ath_xmit_setup_edma(struct ath_softc *sc) 853 { 854 855 /* Fetch EDMA field and buffer sizes */ 856 (void) ath_hal_gettxdesclen(sc->sc_ah, &sc->sc_tx_desclen); 857 (void) ath_hal_gettxstatuslen(sc->sc_ah, &sc->sc_tx_statuslen); 858 (void) ath_hal_getntxmaps(sc->sc_ah, &sc->sc_tx_nmaps); 859 860 if (bootverbose) { 861 device_printf(sc->sc_dev, "TX descriptor length: %d\n", 862 sc->sc_tx_desclen); 863 device_printf(sc->sc_dev, "TX status length: %d\n", 864 sc->sc_tx_statuslen); 865 device_printf(sc->sc_dev, "TX buffers per descriptor: %d\n", 866 sc->sc_tx_nmaps); 867 } 868 869 sc->sc_tx.xmit_setup = ath_edma_dma_txsetup; 870 sc->sc_tx.xmit_teardown = ath_edma_dma_txteardown; 871 sc->sc_tx.xmit_attach_comp_func = ath_edma_attach_comp_func; 872 873 sc->sc_tx.xmit_dma_restart = ath_edma_dma_restart; 874 sc->sc_tx.xmit_handoff = ath_edma_xmit_handoff; 875 sc->sc_tx.xmit_drain = ath_edma_tx_drain; 876 } 877