1 /*- 2 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_ath.h" 42 /* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49 #include "opt_ah.h" 50 #include "opt_wlan.h" 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/sysctl.h> 55 #include <sys/mbuf.h> 56 #include <sys/malloc.h> 57 #include <sys/lock.h> 58 #include <sys/mutex.h> 59 #include <sys/kernel.h> 60 #include <sys/socket.h> 61 #include <sys/sockio.h> 62 #include <sys/errno.h> 63 #include <sys/callout.h> 64 #include <sys/bus.h> 65 #include <sys/endian.h> 66 #include <sys/kthread.h> 67 #include <sys/taskqueue.h> 68 #include <sys/priv.h> 69 #include <sys/module.h> 70 #include <sys/ktr.h> 71 #include <sys/smp.h> /* for mp_ncpus */ 72 73 #include <machine/bus.h> 74 75 #include <net/if.h> 76 #include <net/if_var.h> 77 #include <net/if_dl.h> 78 #include <net/if_media.h> 79 #include <net/if_types.h> 80 #include <net/if_arp.h> 81 #include <net/ethernet.h> 82 #include <net/if_llc.h> 83 84 #include <net80211/ieee80211_var.h> 85 #include <net80211/ieee80211_regdomain.h> 86 #ifdef IEEE80211_SUPPORT_SUPERG 87 #include <net80211/ieee80211_superg.h> 88 #endif 89 #ifdef IEEE80211_SUPPORT_TDMA 90 #include <net80211/ieee80211_tdma.h> 91 #endif 92 93 #include <net/bpf.h> 94 95 #ifdef INET 96 #include <netinet/in.h> 97 #include <netinet/if_ether.h> 98 #endif 99 100 #include <dev/ath/if_athvar.h> 101 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 102 #include <dev/ath/ath_hal/ah_diagcodes.h> 103 104 #include <dev/ath/if_ath_debug.h> 105 #include <dev/ath/if_ath_misc.h> 106 #include <dev/ath/if_ath_tsf.h> 107 #include <dev/ath/if_ath_tx.h> 108 #include <dev/ath/if_ath_sysctl.h> 109 #include <dev/ath/if_ath_led.h> 110 #include <dev/ath/if_ath_keycache.h> 111 #include <dev/ath/if_ath_rx.h> 112 #include <dev/ath/if_ath_beacon.h> 113 #include <dev/ath/if_athdfs.h> 114 #include <dev/ath/if_ath_descdma.h> 115 116 #ifdef ATH_TX99_DIAG 117 #include <dev/ath/ath_tx99/ath_tx99.h> 118 #endif 119 120 #include <dev/ath/if_ath_tx_edma.h> 121 122 #ifdef ATH_DEBUG_ALQ 123 #include <dev/ath/if_ath_alq.h> 124 #endif 125 126 /* 127 * some general macros 128 */ 129 #define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1) 130 #define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1) 131 132 /* 133 * XXX doesn't belong here, and should be tunable 134 */ 135 #define ATH_TXSTATUS_RING_SIZE 512 136 137 MALLOC_DECLARE(M_ATHDEV); 138 139 static void ath_edma_tx_processq(struct ath_softc *sc, int dosched); 140 141 #ifdef ATH_DEBUG_ALQ 142 static void 143 ath_tx_alq_edma_push(struct ath_softc *sc, int txq, int nframes, 144 int fifo_depth, int frame_cnt) 145 { 146 struct if_ath_alq_tx_fifo_push aq; 147 148 aq.txq = htobe32(txq); 149 aq.nframes = htobe32(nframes); 150 aq.fifo_depth = htobe32(fifo_depth); 151 aq.frame_cnt = htobe32(frame_cnt); 152 153 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_TX_FIFO_PUSH, 154 sizeof(aq), 155 (const char *) &aq); 156 } 157 #endif /* ATH_DEBUG_ALQ */ 158 159 /* 160 * XXX TODO: push an aggregate as a single FIFO slot, even though 161 * it may not meet the TXOP for say, DBA-gated traffic in TDMA mode. 162 * 163 * The TX completion code handles a TX FIFO slot having multiple frames, 164 * aggregate or otherwise, but it may just make things easier to deal 165 * with. 166 * 167 * XXX TODO: track the number of aggregate subframes and put that in the 168 * push alq message. 169 */ 170 static void 171 ath_tx_edma_push_staging_list(struct ath_softc *sc, struct ath_txq *txq, 172 int limit) 173 { 174 struct ath_buf *bf, *bf_last; 175 struct ath_buf *bfi, *bfp; 176 int i, sqdepth; 177 TAILQ_HEAD(axq_q_f_s, ath_buf) sq; 178 179 ATH_TXQ_LOCK_ASSERT(txq); 180 181 /* 182 * Don't bother doing any work if it's full. 183 */ 184 if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH) 185 return; 186 187 if (TAILQ_EMPTY(&txq->axq_q)) 188 return; 189 190 TAILQ_INIT(&sq); 191 192 /* 193 * First pass - walk sq, queue up to 'limit' entries, 194 * subtract them from the staging queue. 195 */ 196 sqdepth = 0; 197 for (i = 0; i < limit; i++) { 198 /* Grab the head entry */ 199 bf = ATH_TXQ_FIRST(txq); 200 if (bf == NULL) 201 break; 202 ATH_TXQ_REMOVE(txq, bf, bf_list); 203 204 /* Queue it into our staging list */ 205 TAILQ_INSERT_TAIL(&sq, bf, bf_list); 206 207 /* Ensure the flags are cleared */ 208 bf->bf_flags &= ~(ATH_BUF_FIFOPTR | ATH_BUF_FIFOEND); 209 sqdepth++; 210 } 211 212 /* 213 * Ok, so now we have a staging list of up to 'limit' 214 * frames from the txq. Now let's wrap that up 215 * into its own list and pass that to the hardware 216 * as one FIFO entry. 217 */ 218 219 bf = TAILQ_FIRST(&sq); 220 bf_last = TAILQ_LAST(&sq, axq_q_s); 221 222 /* 223 * Ok, so here's the gymnastics reqiured to make this 224 * all sensible. 225 */ 226 227 /* 228 * Tag the first/last buffer appropriately. 229 */ 230 bf->bf_flags |= ATH_BUF_FIFOPTR; 231 bf_last->bf_flags |= ATH_BUF_FIFOEND; 232 233 /* 234 * Walk the descriptor list and link them appropriately. 235 */ 236 bfp = NULL; 237 TAILQ_FOREACH(bfi, &sq, bf_list) { 238 if (bfp != NULL) { 239 ath_hal_settxdesclink(sc->sc_ah, bfp->bf_lastds, 240 bfi->bf_daddr); 241 } 242 bfp = bfi; 243 } 244 245 i = 0; 246 TAILQ_FOREACH(bfi, &sq, bf_list) { 247 #ifdef ATH_DEBUG 248 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 249 ath_printtxbuf(sc, bfi, txq->axq_qnum, i, 0); 250 #endif/* ATH_DEBUG */ 251 #ifdef ATH_DEBUG_ALQ 252 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 253 ath_tx_alq_post(sc, bfi); 254 #endif /* ATH_DEBUG_ALQ */ 255 i++; 256 } 257 258 /* 259 * We now need to push this set of frames onto the tail 260 * of the FIFO queue. We don't adjust the aggregate 261 * count, only the queue depth counter(s). 262 * We also need to blank the link pointer now. 263 */ 264 265 TAILQ_CONCAT(&txq->fifo.axq_q, &sq, bf_list); 266 /* Bump total queue tracking in FIFO queue */ 267 txq->fifo.axq_depth += sqdepth; 268 269 /* Bump FIFO queue */ 270 txq->axq_fifo_depth++; 271 DPRINTF(sc, ATH_DEBUG_XMIT, 272 "%s: queued %d packets; depth=%d, fifo depth=%d\n", 273 __func__, sqdepth, txq->fifo.axq_depth, txq->axq_fifo_depth); 274 275 /* Push the first entry into the hardware */ 276 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); 277 278 /* Push start on the DMA if it's not already started */ 279 ath_hal_txstart(sc->sc_ah, txq->axq_qnum); 280 281 #ifdef ATH_DEBUG_ALQ 282 ath_tx_alq_edma_push(sc, txq->axq_qnum, sqdepth, 283 txq->axq_fifo_depth, 284 txq->fifo.axq_depth); 285 #endif /* ATH_DEBUG_ALQ */ 286 } 287 288 #define TX_BATCH_SIZE 32 289 290 /* 291 * Push some frames into the TX FIFO if we have space. 292 */ 293 static void 294 ath_edma_tx_fifo_fill(struct ath_softc *sc, struct ath_txq *txq) 295 { 296 297 ATH_TXQ_LOCK_ASSERT(txq); 298 299 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: called\n", 300 __func__, 301 txq->axq_qnum); 302 303 /* 304 * For now, push up to 4 frames per TX FIFO slot. 305 * If more are in the hardware queue then they'll 306 * get populated when we try to send another frame 307 * or complete a frame - so at most there'll be 308 * 32 non-AMPDU frames per TXQ. 309 * 310 * Note that the hardware staging queue will limit 311 * how many frames in total we will have pushed into 312 * here. 313 * 314 * Later on, we'll want to push less frames into 315 * the TX FIFO since we don't want to necessarily 316 * fill tens or hundreds of milliseconds of potential 317 * frames. 318 * 319 * However, we need more frames right now because of 320 * how the MAC implements the frame scheduling policy. 321 * It only ungates a single FIFO entry at a time, 322 * and will run that until CHNTIME expires or the 323 * end of that FIFO entry descriptor list is reached. 324 * So for TDMA we suffer a big performance penalty - 325 * single TX FIFO entries mean the MAC only sends out 326 * one frame per DBA event, which turned out on average 327 * 6ms per TX frame. 328 * 329 * So, for aggregates it's okay - it'll push two at a 330 * time and this will just do them more efficiently. 331 * For non-aggregates it'll do 4 at a time, up to the 332 * non-aggr limit (non_aggr, which is 32.) They should 333 * be time based rather than a hard count, but I also 334 * do need sleep. 335 */ 336 337 /* 338 * Do some basic, basic batching to the hardware 339 * queue. 340 * 341 * If we have TX_BATCH_SIZE entries in the staging 342 * queue, then let's try to send them all in one hit. 343 * 344 * Ensure we don't push more than TX_BATCH_SIZE worth 345 * in, otherwise we end up draining 8 slots worth of 346 * 32 frames into the hardware queue and then we don't 347 * attempt to push more frames in until we empty the 348 * FIFO. 349 */ 350 if (txq->axq_depth >= TX_BATCH_SIZE / 2 && 351 txq->fifo.axq_depth <= TX_BATCH_SIZE) { 352 ath_tx_edma_push_staging_list(sc, txq, TX_BATCH_SIZE); 353 } 354 355 /* 356 * Aggregate check: if we have less than two FIFO slots 357 * busy and we have some aggregate frames, queue it. 358 * 359 * Now, ideally we'd just check to see if the scheduler 360 * has given us aggregate frames and push them into the FIFO 361 * as individual slots, as honestly we should just be pushing 362 * a single aggregate in as one FIFO slot. 363 * 364 * Let's do that next once I know this works. 365 */ 366 else if (txq->axq_aggr_depth > 0 && txq->axq_fifo_depth < 2) 367 ath_tx_edma_push_staging_list(sc, txq, TX_BATCH_SIZE); 368 369 /* 370 * 371 * If we have less, and the TXFIFO isn't empty, let's 372 * wait until we've finished sending the FIFO. 373 * 374 * If we have less, and the TXFIFO is empty, then 375 * send them. 376 */ 377 else if (txq->axq_fifo_depth == 0) { 378 ath_tx_edma_push_staging_list(sc, txq, TX_BATCH_SIZE); 379 } 380 } 381 382 /* 383 * Re-initialise the DMA FIFO with the current contents of 384 * said TXQ. 385 * 386 * This should only be called as part of the chip reset path, as it 387 * assumes the FIFO is currently empty. 388 */ 389 static void 390 ath_edma_dma_restart(struct ath_softc *sc, struct ath_txq *txq) 391 { 392 struct ath_buf *bf; 393 int i = 0; 394 int fifostart = 1; 395 int old_fifo_depth; 396 397 DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: called\n", 398 __func__, 399 txq->axq_qnum); 400 401 ATH_TXQ_LOCK_ASSERT(txq); 402 403 /* 404 * Let's log if the tracked FIFO depth doesn't match 405 * what we actually push in. 406 */ 407 old_fifo_depth = txq->axq_fifo_depth; 408 txq->axq_fifo_depth = 0; 409 410 /* 411 * Walk the FIFO staging list, looking for "head" entries. 412 * Since we may have a partially completed list of frames, 413 * we push the first frame we see into the FIFO and re-mark 414 * it as the head entry. We then skip entries until we see 415 * FIFO end, at which point we get ready to push another 416 * entry into the FIFO. 417 */ 418 TAILQ_FOREACH(bf, &txq->fifo.axq_q, bf_list) { 419 /* 420 * If we're looking for FIFOEND and we haven't found 421 * it, skip. 422 * 423 * If we're looking for FIFOEND and we've found it, 424 * reset for another descriptor. 425 */ 426 #ifdef ATH_DEBUG 427 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 428 ath_printtxbuf(sc, bf, txq->axq_qnum, i, 0); 429 #endif/* ATH_DEBUG */ 430 #ifdef ATH_DEBUG_ALQ 431 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 432 ath_tx_alq_post(sc, bf); 433 #endif /* ATH_DEBUG_ALQ */ 434 435 if (fifostart == 0) { 436 if (bf->bf_flags & ATH_BUF_FIFOEND) 437 fifostart = 1; 438 continue; 439 } 440 441 /* Make sure we're not overflowing the FIFO! */ 442 if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH) { 443 device_printf(sc->sc_dev, 444 "%s: Q%d: more frames in the queue; FIFO depth=%d?!\n", 445 __func__, 446 txq->axq_qnum, 447 txq->axq_fifo_depth); 448 } 449 450 #if 0 451 DPRINTF(sc, ATH_DEBUG_RESET, 452 "%s: Q%d: depth=%d: pushing bf=%p; start=%d, end=%d\n", 453 __func__, 454 txq->axq_qnum, 455 txq->axq_fifo_depth, 456 bf, 457 !! (bf->bf_flags & ATH_BUF_FIFOPTR), 458 !! (bf->bf_flags & ATH_BUF_FIFOEND)); 459 #endif 460 461 /* 462 * Set this to be the first buffer in the FIFO 463 * list - even if it's also the last buffer in 464 * a FIFO list! 465 */ 466 bf->bf_flags |= ATH_BUF_FIFOPTR; 467 468 /* Push it into the FIFO and bump the FIFO count */ 469 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); 470 txq->axq_fifo_depth++; 471 472 /* 473 * If this isn't the last entry either, let's 474 * clear fifostart so we continue looking for 475 * said last entry. 476 */ 477 if (! (bf->bf_flags & ATH_BUF_FIFOEND)) 478 fifostart = 0; 479 i++; 480 } 481 482 /* Only bother starting the queue if there's something in it */ 483 if (i > 0) 484 ath_hal_txstart(sc->sc_ah, txq->axq_qnum); 485 486 DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: FIFO depth was %d, is %d\n", 487 __func__, 488 txq->axq_qnum, 489 old_fifo_depth, 490 txq->axq_fifo_depth); 491 492 /* And now, let's check! */ 493 if (txq->axq_fifo_depth != old_fifo_depth) { 494 device_printf(sc->sc_dev, 495 "%s: Q%d: FIFO depth should be %d, is %d\n", 496 __func__, 497 txq->axq_qnum, 498 old_fifo_depth, 499 txq->axq_fifo_depth); 500 } 501 } 502 503 /* 504 * Hand off this frame to a hardware queue. 505 * 506 * Things are a bit hairy in the EDMA world. The TX FIFO is only 507 * 8 entries deep, so we need to keep track of exactly what we've 508 * pushed into the FIFO and what's just sitting in the TX queue, 509 * waiting to go out. 510 * 511 * So this is split into two halves - frames get appended to the 512 * TXQ; then a scheduler is called to push some frames into the 513 * actual TX FIFO. 514 */ 515 static void 516 ath_edma_xmit_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, 517 struct ath_buf *bf) 518 { 519 520 ATH_TXQ_LOCK(txq); 521 522 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 523 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 524 525 /* 526 * XXX TODO: write a hard-coded check to ensure that 527 * the queue id in the TX descriptor matches txq->axq_qnum. 528 */ 529 530 /* Update aggr stats */ 531 if (bf->bf_state.bfs_aggr) 532 txq->axq_aggr_depth++; 533 534 /* Push and update frame stats */ 535 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 536 537 /* 538 * Finally, call the FIFO schedule routine to schedule some 539 * frames to the FIFO. 540 */ 541 ath_edma_tx_fifo_fill(sc, txq); 542 ATH_TXQ_UNLOCK(txq); 543 } 544 545 /* 546 * Hand off this frame to a multicast software queue. 547 * 548 * The EDMA TX CABQ will get a list of chained frames, chained 549 * together using the next pointer. The single head of that 550 * particular queue is pushed to the hardware CABQ. 551 */ 552 static void 553 ath_edma_xmit_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, 554 struct ath_buf *bf) 555 { 556 557 ATH_TX_LOCK_ASSERT(sc); 558 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 559 ("%s: busy status 0x%x", __func__, bf->bf_flags)); 560 561 ATH_TXQ_LOCK(txq); 562 /* 563 * XXX this is mostly duplicated in ath_tx_handoff_mcast(). 564 */ 565 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) { 566 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s); 567 struct ieee80211_frame *wh; 568 569 /* mark previous frame */ 570 wh = mtod(bf_last->bf_m, struct ieee80211_frame *); 571 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 572 573 /* re-sync buffer to memory */ 574 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, 575 BUS_DMASYNC_PREWRITE); 576 577 /* link descriptor */ 578 ath_hal_settxdesclink(sc->sc_ah, 579 bf_last->bf_lastds, 580 bf->bf_daddr); 581 } 582 #ifdef ATH_DEBUG_ALQ 583 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 584 ath_tx_alq_post(sc, bf); 585 #endif /* ATH_DEBUG_ALQ */ 586 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 587 ATH_TXQ_UNLOCK(txq); 588 } 589 590 /* 591 * Handoff this frame to the hardware. 592 * 593 * For the multicast queue, this will treat it as a software queue 594 * and append it to the list, after updating the MORE_DATA flag 595 * in the previous frame. The cabq processing code will ensure 596 * that the queue contents gets transferred over. 597 * 598 * For the hardware queues, this will queue a frame to the queue 599 * like before, then populate the FIFO from that. Since the 600 * EDMA hardware has 8 FIFO slots per TXQ, this ensures that 601 * frames such as management frames don't get prematurely dropped. 602 * 603 * This does imply that a similar flush-hwq-to-fifoq method will 604 * need to be called from the processq function, before the 605 * per-node software scheduler is called. 606 */ 607 static void 608 ath_edma_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, 609 struct ath_buf *bf) 610 { 611 612 DPRINTF(sc, ATH_DEBUG_XMIT_DESC, 613 "%s: called; bf=%p, txq=%p, qnum=%d\n", 614 __func__, 615 bf, 616 txq, 617 txq->axq_qnum); 618 619 if (txq->axq_qnum == ATH_TXQ_SWQ) 620 ath_edma_xmit_handoff_mcast(sc, txq, bf); 621 else 622 ath_edma_xmit_handoff_hw(sc, txq, bf); 623 } 624 625 static int 626 ath_edma_setup_txfifo(struct ath_softc *sc, int qnum) 627 { 628 struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum]; 629 630 te->m_fifo = malloc(sizeof(struct ath_buf *) * HAL_TXFIFO_DEPTH, 631 M_ATHDEV, 632 M_NOWAIT | M_ZERO); 633 if (te->m_fifo == NULL) { 634 device_printf(sc->sc_dev, "%s: malloc failed\n", 635 __func__); 636 return (-ENOMEM); 637 } 638 639 /* 640 * Set initial "empty" state. 641 */ 642 te->m_fifo_head = te->m_fifo_tail = te->m_fifo_depth = 0; 643 644 return (0); 645 } 646 647 static int 648 ath_edma_free_txfifo(struct ath_softc *sc, int qnum) 649 { 650 struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum]; 651 652 /* XXX TODO: actually deref the ath_buf entries? */ 653 free(te->m_fifo, M_ATHDEV); 654 return (0); 655 } 656 657 static int 658 ath_edma_dma_txsetup(struct ath_softc *sc) 659 { 660 int error; 661 int i; 662 663 error = ath_descdma_alloc_desc(sc, &sc->sc_txsdma, 664 NULL, "txcomp", sc->sc_tx_statuslen, ATH_TXSTATUS_RING_SIZE); 665 if (error != 0) 666 return (error); 667 668 ath_hal_setuptxstatusring(sc->sc_ah, 669 (void *) sc->sc_txsdma.dd_desc, 670 sc->sc_txsdma.dd_desc_paddr, 671 ATH_TXSTATUS_RING_SIZE); 672 673 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 674 ath_edma_setup_txfifo(sc, i); 675 } 676 677 return (0); 678 } 679 680 static int 681 ath_edma_dma_txteardown(struct ath_softc *sc) 682 { 683 int i; 684 685 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 686 ath_edma_free_txfifo(sc, i); 687 } 688 689 ath_descdma_cleanup(sc, &sc->sc_txsdma, NULL); 690 return (0); 691 } 692 693 /* 694 * Drain all TXQs, potentially after completing the existing completed 695 * frames. 696 */ 697 static void 698 ath_edma_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 699 { 700 int i; 701 702 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 703 704 (void) ath_stoptxdma(sc); 705 706 /* 707 * If reset type is noloss, the TX FIFO needs to be serviced 708 * and those frames need to be handled. 709 * 710 * Otherwise, just toss everything in each TX queue. 711 */ 712 if (reset_type == ATH_RESET_NOLOSS) { 713 ath_edma_tx_processq(sc, 0); 714 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 715 if (ATH_TXQ_SETUP(sc, i)) { 716 ATH_TXQ_LOCK(&sc->sc_txq[i]); 717 /* 718 * Free the holding buffer; DMA is now 719 * stopped. 720 */ 721 ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]); 722 /* 723 * Reset the link pointer to NULL; there's 724 * no frames to chain DMA to. 725 */ 726 sc->sc_txq[i].axq_link = NULL; 727 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 728 } 729 } 730 } else { 731 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 732 if (ATH_TXQ_SETUP(sc, i)) 733 ath_tx_draintxq(sc, &sc->sc_txq[i]); 734 } 735 } 736 737 /* XXX dump out the TX completion FIFO contents */ 738 739 /* XXX dump out the frames */ 740 741 sc->sc_wd_timer = 0; 742 } 743 744 /* 745 * TX completion tasklet. 746 */ 747 748 static void 749 ath_edma_tx_proc(void *arg, int npending) 750 { 751 struct ath_softc *sc = (struct ath_softc *) arg; 752 753 #if 0 754 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: called, npending=%d\n", 755 __func__, npending); 756 #endif 757 ath_edma_tx_processq(sc, 1); 758 } 759 760 /* 761 * Process the TX status queue. 762 */ 763 static void 764 ath_edma_tx_processq(struct ath_softc *sc, int dosched) 765 { 766 struct ath_hal *ah = sc->sc_ah; 767 HAL_STATUS status; 768 struct ath_tx_status ts; 769 struct ath_txq *txq; 770 struct ath_buf *bf; 771 struct ieee80211_node *ni; 772 int nacked = 0; 773 int idx; 774 int i; 775 776 #ifdef ATH_DEBUG 777 /* XXX */ 778 uint32_t txstatus[32]; 779 #endif 780 781 for (idx = 0; ; idx++) { 782 bzero(&ts, sizeof(ts)); 783 784 ATH_TXSTATUS_LOCK(sc); 785 #ifdef ATH_DEBUG 786 ath_hal_gettxrawtxdesc(ah, txstatus); 787 #endif 788 status = ath_hal_txprocdesc(ah, NULL, (void *) &ts); 789 ATH_TXSTATUS_UNLOCK(sc); 790 791 if (status == HAL_EINPROGRESS) 792 break; 793 794 #ifdef ATH_DEBUG 795 if (sc->sc_debug & ATH_DEBUG_TX_PROC) 796 if (ts.ts_queue_id != sc->sc_bhalq) 797 ath_printtxstatbuf(sc, NULL, txstatus, ts.ts_queue_id, 798 idx, (status == HAL_OK)); 799 #endif 800 801 /* 802 * If there is an error with this descriptor, continue 803 * processing. 804 * 805 * XXX TBD: log some statistics? 806 */ 807 if (status == HAL_EIO) { 808 device_printf(sc->sc_dev, "%s: invalid TX status?\n", 809 __func__); 810 break; 811 } 812 813 #if defined(ATH_DEBUG_ALQ) && defined(ATH_DEBUG) 814 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS)) 815 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS, 816 sc->sc_tx_statuslen, 817 (char *) txstatus); 818 #endif /* ATH_DEBUG_ALQ */ 819 820 /* 821 * At this point we have a valid status descriptor. 822 * The QID and descriptor ID (which currently isn't set) 823 * is part of the status. 824 * 825 * We then assume that the descriptor in question is the 826 * -head- of the given QID. Eventually we should verify 827 * this by using the descriptor ID. 828 */ 829 830 /* 831 * The beacon queue is not currently a "real" queue. 832 * Frames aren't pushed onto it and the lock isn't setup. 833 * So skip it for now; the beacon handling code will 834 * free and alloc more beacon buffers as appropriate. 835 */ 836 if (ts.ts_queue_id == sc->sc_bhalq) 837 continue; 838 839 txq = &sc->sc_txq[ts.ts_queue_id]; 840 841 ATH_TXQ_LOCK(txq); 842 bf = ATH_TXQ_FIRST(&txq->fifo); 843 844 /* 845 * Work around the situation where I'm seeing notifications 846 * for Q1 when no frames are available. That needs to be 847 * debugged but not by crashing _here_. 848 */ 849 if (bf == NULL) { 850 device_printf(sc->sc_dev, "%s: Q%d: empty?\n", 851 __func__, 852 ts.ts_queue_id); 853 ATH_TXQ_UNLOCK(txq); 854 continue; 855 } 856 857 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d, bf=%p, start=%d, end=%d\n", 858 __func__, 859 ts.ts_queue_id, bf, 860 !! (bf->bf_flags & ATH_BUF_FIFOPTR), 861 !! (bf->bf_flags & ATH_BUF_FIFOEND)); 862 863 /* XXX TODO: actually output debugging info about this */ 864 865 #if 0 866 /* XXX assert the buffer/descriptor matches the status descid */ 867 if (ts.ts_desc_id != bf->bf_descid) { 868 device_printf(sc->sc_dev, 869 "%s: mismatched descid (qid=%d, tsdescid=%d, " 870 "bfdescid=%d\n", 871 __func__, 872 ts.ts_queue_id, 873 ts.ts_desc_id, 874 bf->bf_descid); 875 } 876 #endif 877 878 /* This removes the buffer and decrements the queue depth */ 879 ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list); 880 if (bf->bf_state.bfs_aggr) 881 txq->axq_aggr_depth--; 882 883 /* 884 * If this was the end of a FIFO set, decrement FIFO depth 885 */ 886 if (bf->bf_flags & ATH_BUF_FIFOEND) 887 txq->axq_fifo_depth--; 888 889 /* 890 * If this isn't the final buffer in a FIFO set, mark 891 * the buffer as busy so it goes onto the holding queue. 892 */ 893 if (! (bf->bf_flags & ATH_BUF_FIFOEND)) 894 bf->bf_flags |= ATH_BUF_BUSY; 895 896 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: FIFO depth is now %d (%d)\n", 897 __func__, 898 txq->axq_qnum, 899 txq->axq_fifo_depth, 900 txq->fifo.axq_depth); 901 902 /* XXX assert FIFO depth >= 0 */ 903 ATH_TXQ_UNLOCK(txq); 904 905 /* 906 * Outside of the TX lock - if the buffer is end 907 * end buffer in this FIFO, we don't need a holding 908 * buffer any longer. 909 */ 910 if (bf->bf_flags & ATH_BUF_FIFOEND) { 911 ATH_TXQ_LOCK(txq); 912 ath_txq_freeholdingbuf(sc, txq); 913 ATH_TXQ_UNLOCK(txq); 914 } 915 916 /* 917 * First we need to make sure ts_rate is valid. 918 * 919 * Pre-EDMA chips pass the whole TX descriptor to 920 * the proctxdesc function which will then fill out 921 * ts_rate based on the ts_finaltsi (final TX index) 922 * in the TX descriptor. However the TX completion 923 * FIFO doesn't have this information. So here we 924 * do a separate HAL call to populate that information. 925 * 926 * The same problem exists with ts_longretry. 927 * The FreeBSD HAL corrects ts_longretry in the HAL layer; 928 * the AR9380 HAL currently doesn't. So until the HAL 929 * is imported and this can be added, we correct for it 930 * here. 931 */ 932 /* XXX TODO */ 933 /* XXX faked for now. Ew. */ 934 if (ts.ts_finaltsi < 4) { 935 ts.ts_rate = 936 bf->bf_state.bfs_rc[ts.ts_finaltsi].ratecode; 937 switch (ts.ts_finaltsi) { 938 case 3: ts.ts_longretry += 939 bf->bf_state.bfs_rc[2].tries; 940 case 2: ts.ts_longretry += 941 bf->bf_state.bfs_rc[1].tries; 942 case 1: ts.ts_longretry += 943 bf->bf_state.bfs_rc[0].tries; 944 } 945 } else { 946 device_printf(sc->sc_dev, "%s: finaltsi=%d\n", 947 __func__, 948 ts.ts_finaltsi); 949 ts.ts_rate = bf->bf_state.bfs_rc[0].ratecode; 950 } 951 952 /* 953 * XXX This is terrible. 954 * 955 * Right now, some code uses the TX status that is 956 * passed in here, but the completion handlers in the 957 * software TX path also use bf_status.ds_txstat. 958 * Ew. That should all go away. 959 * 960 * XXX It's also possible the rate control completion 961 * routine is called twice. 962 */ 963 memcpy(&bf->bf_status, &ts, sizeof(ts)); 964 965 ni = bf->bf_node; 966 967 /* Update RSSI */ 968 /* XXX duplicate from ath_tx_processq */ 969 if (ni != NULL && ts.ts_status == 0 && 970 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { 971 nacked++; 972 sc->sc_stats.ast_tx_rssi = ts.ts_rssi; 973 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 974 ts.ts_rssi); 975 } 976 977 /* Handle frame completion and rate control update */ 978 ath_tx_process_buf_completion(sc, txq, &ts, bf); 979 980 /* NB: bf is invalid at this point */ 981 } 982 983 sc->sc_wd_timer = 0; 984 985 /* 986 * XXX It's inefficient to do this if the FIFO queue is full, 987 * but there's no easy way right now to only populate 988 * the txq task for _one_ TXQ. This should be fixed. 989 */ 990 if (dosched) { 991 /* Attempt to schedule more hardware frames to the TX FIFO */ 992 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 993 if (ATH_TXQ_SETUP(sc, i)) { 994 ATH_TXQ_LOCK(&sc->sc_txq[i]); 995 ath_edma_tx_fifo_fill(sc, &sc->sc_txq[i]); 996 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 997 } 998 } 999 /* Kick software scheduler */ 1000 ath_tx_swq_kick(sc); 1001 } 1002 } 1003 1004 static void 1005 ath_edma_attach_comp_func(struct ath_softc *sc) 1006 { 1007 1008 TASK_INIT(&sc->sc_txtask, 0, ath_edma_tx_proc, sc); 1009 } 1010 1011 void 1012 ath_xmit_setup_edma(struct ath_softc *sc) 1013 { 1014 1015 /* Fetch EDMA field and buffer sizes */ 1016 (void) ath_hal_gettxdesclen(sc->sc_ah, &sc->sc_tx_desclen); 1017 (void) ath_hal_gettxstatuslen(sc->sc_ah, &sc->sc_tx_statuslen); 1018 (void) ath_hal_getntxmaps(sc->sc_ah, &sc->sc_tx_nmaps); 1019 1020 if (bootverbose) { 1021 device_printf(sc->sc_dev, "TX descriptor length: %d\n", 1022 sc->sc_tx_desclen); 1023 device_printf(sc->sc_dev, "TX status length: %d\n", 1024 sc->sc_tx_statuslen); 1025 device_printf(sc->sc_dev, "TX buffers per descriptor: %d\n", 1026 sc->sc_tx_nmaps); 1027 } 1028 1029 sc->sc_tx.xmit_setup = ath_edma_dma_txsetup; 1030 sc->sc_tx.xmit_teardown = ath_edma_dma_txteardown; 1031 sc->sc_tx.xmit_attach_comp_func = ath_edma_attach_comp_func; 1032 1033 sc->sc_tx.xmit_dma_restart = ath_edma_dma_restart; 1034 sc->sc_tx.xmit_handoff = ath_edma_xmit_handoff; 1035 sc->sc_tx.xmit_drain = ath_edma_tx_drain; 1036 } 1037