1 /*- 2 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_ath.h" 42 /* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49 #include "opt_ah.h" 50 #include "opt_wlan.h" 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/sysctl.h> 55 #include <sys/mbuf.h> 56 #include <sys/malloc.h> 57 #include <sys/lock.h> 58 #include <sys/mutex.h> 59 #include <sys/kernel.h> 60 #include <sys/socket.h> 61 #include <sys/sockio.h> 62 #include <sys/errno.h> 63 #include <sys/callout.h> 64 #include <sys/bus.h> 65 #include <sys/endian.h> 66 #include <sys/kthread.h> 67 #include <sys/taskqueue.h> 68 #include <sys/priv.h> 69 #include <sys/module.h> 70 #include <sys/ktr.h> 71 #include <sys/smp.h> /* for mp_ncpus */ 72 73 #include <machine/bus.h> 74 75 #include <net/if.h> 76 #include <net/if_var.h> 77 #include <net/if_dl.h> 78 #include <net/if_media.h> 79 #include <net/if_types.h> 80 #include <net/if_arp.h> 81 #include <net/ethernet.h> 82 #include <net/if_llc.h> 83 84 #include <net80211/ieee80211_var.h> 85 #include <net80211/ieee80211_regdomain.h> 86 #ifdef IEEE80211_SUPPORT_SUPERG 87 #include <net80211/ieee80211_superg.h> 88 #endif 89 #ifdef IEEE80211_SUPPORT_TDMA 90 #include <net80211/ieee80211_tdma.h> 91 #endif 92 93 #include <net/bpf.h> 94 95 #ifdef INET 96 #include <netinet/in.h> 97 #include <netinet/if_ether.h> 98 #endif 99 100 #include <dev/ath/if_athvar.h> 101 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 102 #include <dev/ath/ath_hal/ah_diagcodes.h> 103 104 #include <dev/ath/if_ath_debug.h> 105 #include <dev/ath/if_ath_misc.h> 106 #include <dev/ath/if_ath_tsf.h> 107 #include <dev/ath/if_ath_tx.h> 108 #include <dev/ath/if_ath_sysctl.h> 109 #include <dev/ath/if_ath_led.h> 110 #include <dev/ath/if_ath_keycache.h> 111 #include <dev/ath/if_ath_rx.h> 112 #include <dev/ath/if_ath_beacon.h> 113 #include <dev/ath/if_athdfs.h> 114 115 #ifdef ATH_TX99_DIAG 116 #include <dev/ath/ath_tx99/ath_tx99.h> 117 #endif 118 119 #include <dev/ath/if_ath_rx_edma.h> 120 121 #ifdef ATH_DEBUG_ALQ 122 #include <dev/ath/if_ath_alq.h> 123 #endif 124 125 /* 126 * some general macros 127 */ 128 #define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1) 129 #define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1) 130 131 MALLOC_DECLARE(M_ATHDEV); 132 133 /* 134 * XXX TODO: 135 * 136 * + Make sure the FIFO is correctly flushed and reinitialised 137 * through a reset; 138 * + Verify multi-descriptor frames work! 139 * + There's a "memory use after free" which needs to be tracked down 140 * and fixed ASAP. I've seen this in the legacy path too, so it 141 * may be a generic RX path issue. 142 */ 143 144 /* 145 * XXX shuffle the function orders so these pre-declarations aren't 146 * required! 147 */ 148 static int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, 149 int nbufs); 150 static int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype); 151 static void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf); 152 static void ath_edma_recv_proc_queue(struct ath_softc *sc, 153 HAL_RX_QUEUE qtype, int dosched); 154 static int ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, 155 HAL_RX_QUEUE qtype, int dosched); 156 157 static void 158 ath_edma_stoprecv(struct ath_softc *sc, int dodelay) 159 { 160 struct ath_hal *ah = sc->sc_ah; 161 162 ATH_RX_LOCK(sc); 163 164 ath_hal_stoppcurecv(ah); 165 ath_hal_setrxfilter(ah, 0); 166 167 /* 168 * 169 */ 170 if (ath_hal_stopdmarecv(ah) == AH_TRUE) 171 sc->sc_rx_stopped = 1; 172 173 /* 174 * Give the various bus FIFOs (not EDMA descriptor FIFO) 175 * time to finish flushing out data. 176 */ 177 DELAY(3000); 178 179 /* Flush RX pending for each queue */ 180 /* XXX should generic-ify this */ 181 if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending) { 182 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending); 183 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL; 184 } 185 186 if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending) { 187 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending); 188 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL; 189 } 190 ATH_RX_UNLOCK(sc); 191 } 192 193 /* 194 * Re-initialise the FIFO given the current buffer contents. 195 * Specifically, walk from head -> tail, pushing the FIFO contents 196 * back into the FIFO. 197 */ 198 static void 199 ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 200 { 201 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 202 struct ath_buf *bf; 203 int i, j; 204 205 ATH_RX_LOCK_ASSERT(sc); 206 207 i = re->m_fifo_head; 208 for (j = 0; j < re->m_fifo_depth; j++) { 209 bf = re->m_fifo[i]; 210 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 211 "%s: Q%d: pos=%i, addr=0x%jx\n", 212 __func__, 213 qtype, 214 i, 215 (uintmax_t)bf->bf_daddr); 216 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 217 INCR(i, re->m_fifolen); 218 } 219 220 /* Ensure this worked out right */ 221 if (i != re->m_fifo_tail) { 222 device_printf(sc->sc_dev, "%s: i (%d) != tail! (%d)\n", 223 __func__, 224 i, 225 re->m_fifo_tail); 226 } 227 } 228 229 /* 230 * Start receive. 231 */ 232 static int 233 ath_edma_startrecv(struct ath_softc *sc) 234 { 235 struct ath_hal *ah = sc->sc_ah; 236 237 ATH_RX_LOCK(sc); 238 239 /* 240 * Sanity check - are we being called whilst RX 241 * isn't stopped? If so, we may end up pushing 242 * too many entries into the RX FIFO and 243 * badness occurs. 244 */ 245 246 /* Enable RX FIFO */ 247 ath_hal_rxena(ah); 248 249 /* 250 * In theory the hardware has been initialised, right? 251 */ 252 if (sc->sc_rx_resetted == 1) { 253 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 254 "%s: Re-initing HP FIFO\n", __func__); 255 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_HP); 256 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 257 "%s: Re-initing LP FIFO\n", __func__); 258 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_LP); 259 sc->sc_rx_resetted = 0; 260 } else { 261 device_printf(sc->sc_dev, 262 "%s: called without resetting chip?\n", 263 __func__); 264 } 265 266 /* Add up to m_fifolen entries in each queue */ 267 /* 268 * These must occur after the above write so the FIFO buffers 269 * are pushed/tracked in the same order as the hardware will 270 * process them. 271 * 272 * XXX TODO: is this really necessary? We should've stopped 273 * the hardware already and reinitialised it, so it's a no-op. 274 */ 275 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_HP, 276 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_fifolen); 277 278 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_LP, 279 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_fifolen); 280 281 ath_mode_init(sc); 282 ath_hal_startpcurecv(ah); 283 284 /* 285 * We're now doing RX DMA! 286 */ 287 sc->sc_rx_stopped = 0; 288 289 ATH_RX_UNLOCK(sc); 290 291 return (0); 292 } 293 294 static void 295 ath_edma_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 296 int dosched) 297 { 298 299 ATH_LOCK(sc); 300 ath_power_set_power_state(sc, HAL_PM_AWAKE); 301 ATH_UNLOCK(sc); 302 303 ath_edma_recv_proc_queue(sc, qtype, dosched); 304 305 ATH_LOCK(sc); 306 ath_power_restore_power_state(sc); 307 ATH_UNLOCK(sc); 308 309 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 310 } 311 312 static void 313 ath_edma_recv_sched(struct ath_softc *sc, int dosched) 314 { 315 316 ATH_LOCK(sc); 317 ath_power_set_power_state(sc, HAL_PM_AWAKE); 318 ATH_UNLOCK(sc); 319 320 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, dosched); 321 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, dosched); 322 323 ATH_LOCK(sc); 324 ath_power_restore_power_state(sc); 325 ATH_UNLOCK(sc); 326 327 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 328 } 329 330 static void 331 ath_edma_recv_flush(struct ath_softc *sc) 332 { 333 334 DPRINTF(sc, ATH_DEBUG_RECV, "%s: called\n", __func__); 335 336 ATH_PCU_LOCK(sc); 337 sc->sc_rxproc_cnt++; 338 ATH_PCU_UNLOCK(sc); 339 340 ATH_LOCK(sc); 341 ath_power_set_power_state(sc, HAL_PM_AWAKE); 342 ATH_UNLOCK(sc); 343 344 /* 345 * Flush any active frames from FIFO -> deferred list 346 */ 347 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 0); 348 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 0); 349 350 /* 351 * Process what's in the deferred queue 352 */ 353 /* 354 * XXX: If we read the tsf/channoise here and then pass it in, 355 * we could restore the power state before processing 356 * the deferred queue. 357 */ 358 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 0); 359 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 0); 360 361 ATH_LOCK(sc); 362 ath_power_restore_power_state(sc); 363 ATH_UNLOCK(sc); 364 365 ATH_PCU_LOCK(sc); 366 sc->sc_rxproc_cnt--; 367 ATH_PCU_UNLOCK(sc); 368 } 369 370 /* 371 * Process frames from the current queue into the deferred queue. 372 */ 373 static void 374 ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 375 int dosched) 376 { 377 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 378 struct ath_rx_status *rs; 379 struct ath_desc *ds; 380 struct ath_buf *bf; 381 struct mbuf *m; 382 struct ath_hal *ah = sc->sc_ah; 383 uint64_t tsf; 384 uint16_t nf; 385 int npkts = 0; 386 387 tsf = ath_hal_gettsf64(ah); 388 nf = ath_hal_getchannoise(ah, sc->sc_curchan); 389 sc->sc_stats.ast_rx_noise = nf; 390 391 ATH_RX_LOCK(sc); 392 393 #if 1 394 if (sc->sc_rx_resetted == 1) { 395 /* 396 * XXX We shouldn't ever be scheduled if 397 * receive has been stopped - so complain 398 * loudly! 399 */ 400 device_printf(sc->sc_dev, 401 "%s: sc_rx_resetted=1! Bad!\n", 402 __func__); 403 ATH_RX_UNLOCK(sc); 404 return; 405 } 406 #endif 407 408 do { 409 bf = re->m_fifo[re->m_fifo_head]; 410 /* This shouldn't occur! */ 411 if (bf == NULL) { 412 device_printf(sc->sc_dev, "%s: Q%d: NULL bf?\n", 413 __func__, 414 qtype); 415 break; 416 } 417 m = bf->bf_m; 418 ds = bf->bf_desc; 419 420 /* 421 * Sync descriptor memory - this also syncs the buffer for us. 422 * EDMA descriptors are in cached memory. 423 */ 424 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 425 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 426 rs = &bf->bf_status.ds_rxstat; 427 bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, 428 NULL, rs); 429 #ifdef ATH_DEBUG 430 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 431 ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK); 432 #endif /* ATH_DEBUG */ 433 #ifdef ATH_DEBUG_ALQ 434 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS)) 435 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS, 436 sc->sc_rx_statuslen, (char *) ds); 437 #endif /* ATH_DEBUG */ 438 if (bf->bf_rxstatus == HAL_EINPROGRESS) 439 break; 440 441 /* 442 * Completed descriptor. 443 */ 444 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 445 "%s: Q%d: completed!\n", __func__, qtype); 446 npkts++; 447 448 /* 449 * We've been synced already, so unmap. 450 */ 451 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 452 453 /* 454 * Remove the FIFO entry and place it on the completion 455 * queue. 456 */ 457 re->m_fifo[re->m_fifo_head] = NULL; 458 TAILQ_INSERT_TAIL(&sc->sc_rx_rxlist[qtype], bf, bf_list); 459 460 /* Bump the descriptor FIFO stats */ 461 INCR(re->m_fifo_head, re->m_fifolen); 462 re->m_fifo_depth--; 463 /* XXX check it doesn't fall below 0 */ 464 } while (re->m_fifo_depth > 0); 465 466 /* Append some more fresh frames to the FIFO */ 467 if (dosched) 468 ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen); 469 470 ATH_RX_UNLOCK(sc); 471 472 /* rx signal state monitoring */ 473 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); 474 475 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 476 "ath edma rx proc: npkts=%d\n", 477 npkts); 478 479 return; 480 } 481 482 /* 483 * Flush the deferred queue. 484 * 485 * This destructively flushes the deferred queue - it doesn't 486 * call the wireless stack on each mbuf. 487 */ 488 static void 489 ath_edma_flush_deferred_queue(struct ath_softc *sc) 490 { 491 struct ath_buf *bf; 492 493 ATH_RX_LOCK_ASSERT(sc); 494 495 /* Free in one set, inside the lock */ 496 while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP])) { 497 bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]); 498 TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP], bf, bf_list); 499 /* Free the buffer/mbuf */ 500 ath_edma_rxbuf_free(sc, bf); 501 } 502 while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP])) { 503 bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]); 504 TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP], bf, bf_list); 505 /* Free the buffer/mbuf */ 506 ath_edma_rxbuf_free(sc, bf); 507 } 508 } 509 510 static int 511 ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 512 int dosched) 513 { 514 int ngood = 0; 515 uint64_t tsf; 516 struct ath_buf *bf, *next; 517 struct ath_rx_status *rs; 518 int16_t nf; 519 ath_bufhead rxlist; 520 struct mbuf *m; 521 522 TAILQ_INIT(&rxlist); 523 524 nf = ath_hal_getchannoise(sc->sc_ah, sc->sc_curchan); 525 /* 526 * XXX TODO: the NF/TSF should be stamped on the bufs themselves, 527 * otherwise we may end up adding in the wrong values if this 528 * is delayed too far.. 529 */ 530 tsf = ath_hal_gettsf64(sc->sc_ah); 531 532 /* Copy the list over */ 533 ATH_RX_LOCK(sc); 534 TAILQ_CONCAT(&rxlist, &sc->sc_rx_rxlist[qtype], bf_list); 535 ATH_RX_UNLOCK(sc); 536 537 /* Handle the completed descriptors */ 538 /* 539 * XXX is this SAFE call needed? The ath_buf entries 540 * aren't modified by ath_rx_pkt, right? 541 */ 542 TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) { 543 /* 544 * Skip the RX descriptor status - start at the data offset 545 */ 546 m_adj(bf->bf_m, sc->sc_rx_statuslen); 547 548 /* Handle the frame */ 549 550 rs = &bf->bf_status.ds_rxstat; 551 m = bf->bf_m; 552 bf->bf_m = NULL; 553 if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf, m)) 554 ngood++; 555 } 556 557 if (ngood) { 558 sc->sc_lastrx = tsf; 559 } 560 561 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 562 "ath edma rx deferred proc: ngood=%d\n", 563 ngood); 564 565 /* Free in one set, inside the lock */ 566 ATH_RX_LOCK(sc); 567 while (! TAILQ_EMPTY(&rxlist)) { 568 bf = TAILQ_FIRST(&rxlist); 569 TAILQ_REMOVE(&rxlist, bf, bf_list); 570 /* Free the buffer/mbuf */ 571 ath_edma_rxbuf_free(sc, bf); 572 } 573 ATH_RX_UNLOCK(sc); 574 575 return (ngood); 576 } 577 578 static void 579 ath_edma_recv_tasklet(void *arg, int npending) 580 { 581 struct ath_softc *sc = (struct ath_softc *) arg; 582 struct ifnet *ifp = sc->sc_ifp; 583 #ifdef IEEE80211_SUPPORT_SUPERG 584 struct ieee80211com *ic = ifp->if_l2com; 585 #endif 586 587 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; npending=%d\n", 588 __func__, 589 npending); 590 591 ATH_PCU_LOCK(sc); 592 if (sc->sc_inreset_cnt > 0) { 593 device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n", 594 __func__); 595 ATH_PCU_UNLOCK(sc); 596 return; 597 } 598 sc->sc_rxproc_cnt++; 599 ATH_PCU_UNLOCK(sc); 600 601 ATH_LOCK(sc); 602 ath_power_set_power_state(sc, HAL_PM_AWAKE); 603 ATH_UNLOCK(sc); 604 605 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 1); 606 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 1); 607 608 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 1); 609 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 1); 610 611 /* 612 * XXX: If we read the tsf/channoise here and then pass it in, 613 * we could restore the power state before processing 614 * the deferred queue. 615 */ 616 ATH_LOCK(sc); 617 ath_power_restore_power_state(sc); 618 ATH_UNLOCK(sc); 619 620 /* XXX inside IF_LOCK ? */ 621 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 622 #ifdef IEEE80211_SUPPORT_SUPERG 623 ieee80211_ff_age_all(ic, 100); 624 #endif 625 if (! IFQ_IS_EMPTY(&ifp->if_snd)) 626 ath_tx_kick(sc); 627 } 628 if (ath_dfs_tasklet_needed(sc, sc->sc_curchan)) 629 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); 630 631 ATH_PCU_LOCK(sc); 632 sc->sc_rxproc_cnt--; 633 ATH_PCU_UNLOCK(sc); 634 } 635 636 /* 637 * Allocate an RX mbuf for the given ath_buf and initialise 638 * it for EDMA. 639 * 640 * + Allocate a 4KB mbuf; 641 * + Setup the DMA map for the given buffer; 642 * + Return that. 643 */ 644 static int 645 ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 646 { 647 648 struct mbuf *m; 649 int error; 650 int len; 651 652 ATH_RX_LOCK_ASSERT(sc); 653 654 m = m_getm(NULL, sc->sc_edma_bufsize, M_NOWAIT, MT_DATA); 655 if (! m) 656 return (ENOBUFS); /* XXX ?*/ 657 658 /* XXX warn/enforce alignment */ 659 660 len = m->m_ext.ext_size; 661 #if 0 662 device_printf(sc->sc_dev, "%s: called: m=%p, size=%d, mtod=%p\n", 663 __func__, 664 m, 665 len, 666 mtod(m, char *)); 667 #endif 668 669 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 670 671 /* 672 * Populate ath_buf fields. 673 */ 674 bf->bf_desc = mtod(m, struct ath_desc *); 675 bf->bf_lastds = bf->bf_desc; /* XXX only really for TX? */ 676 bf->bf_m = m; 677 678 /* 679 * Zero the descriptor and ensure it makes it out to the 680 * bounce buffer if one is required. 681 * 682 * XXX PREWRITE will copy the whole buffer; we only needed it 683 * to sync the first 32 DWORDS. Oh well. 684 */ 685 memset(bf->bf_desc, '\0', sc->sc_rx_statuslen); 686 687 /* 688 * Create DMA mapping. 689 */ 690 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 691 bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); 692 693 if (error != 0) { 694 device_printf(sc->sc_dev, "%s: failed; error=%d\n", 695 __func__, 696 error); 697 m_freem(m); 698 return (error); 699 } 700 701 /* 702 * Set daddr to the physical mapping page. 703 */ 704 bf->bf_daddr = bf->bf_segs[0].ds_addr; 705 706 /* 707 * Prepare for the upcoming read. 708 * 709 * We need to both sync some data into the buffer (the zero'ed 710 * descriptor payload) and also prepare for the read that's going 711 * to occur. 712 */ 713 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 714 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 715 716 /* Finish! */ 717 return (0); 718 } 719 720 /* 721 * Allocate a RX buffer. 722 */ 723 static struct ath_buf * 724 ath_edma_rxbuf_alloc(struct ath_softc *sc) 725 { 726 struct ath_buf *bf; 727 int error; 728 729 ATH_RX_LOCK_ASSERT(sc); 730 731 /* Allocate buffer */ 732 bf = TAILQ_FIRST(&sc->sc_rxbuf); 733 /* XXX shouldn't happen upon startup? */ 734 if (bf == NULL) { 735 device_printf(sc->sc_dev, "%s: nothing on rxbuf?!\n", 736 __func__); 737 return (NULL); 738 } 739 740 /* Remove it from the free list */ 741 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 742 743 /* Assign RX mbuf to it */ 744 error = ath_edma_rxbuf_init(sc, bf); 745 if (error != 0) { 746 device_printf(sc->sc_dev, 747 "%s: bf=%p, rxbuf alloc failed! error=%d\n", 748 __func__, 749 bf, 750 error); 751 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 752 return (NULL); 753 } 754 755 return (bf); 756 } 757 758 static void 759 ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf) 760 { 761 762 ATH_RX_LOCK_ASSERT(sc); 763 764 /* 765 * Only unload the frame if we haven't consumed 766 * the mbuf via ath_rx_pkt(). 767 */ 768 if (bf->bf_m) { 769 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 770 m_freem(bf->bf_m); 771 bf->bf_m = NULL; 772 } 773 774 /* XXX lock? */ 775 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 776 } 777 778 /* 779 * Allocate up to 'n' entries and push them onto the hardware FIFO. 780 * 781 * Return how many entries were successfully pushed onto the 782 * FIFO. 783 */ 784 static int 785 ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs) 786 { 787 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 788 struct ath_buf *bf; 789 int i; 790 791 ATH_RX_LOCK_ASSERT(sc); 792 793 /* 794 * Allocate buffers until the FIFO is full or nbufs is reached. 795 */ 796 for (i = 0; i < nbufs && re->m_fifo_depth < re->m_fifolen; i++) { 797 /* Ensure the FIFO is already blank, complain loudly! */ 798 if (re->m_fifo[re->m_fifo_tail] != NULL) { 799 device_printf(sc->sc_dev, 800 "%s: Q%d: fifo[%d] != NULL (%p)\n", 801 __func__, 802 qtype, 803 re->m_fifo_tail, 804 re->m_fifo[re->m_fifo_tail]); 805 806 /* Free the slot */ 807 ath_edma_rxbuf_free(sc, re->m_fifo[re->m_fifo_tail]); 808 re->m_fifo_depth--; 809 /* XXX check it's not < 0 */ 810 re->m_fifo[re->m_fifo_tail] = NULL; 811 } 812 813 bf = ath_edma_rxbuf_alloc(sc); 814 /* XXX should ensure the FIFO is not NULL? */ 815 if (bf == NULL) { 816 device_printf(sc->sc_dev, 817 "%s: Q%d: alloc failed: i=%d, nbufs=%d?\n", 818 __func__, 819 qtype, 820 i, 821 nbufs); 822 break; 823 } 824 825 re->m_fifo[re->m_fifo_tail] = bf; 826 827 /* Write to the RX FIFO */ 828 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 829 "%s: Q%d: putrxbuf=%p (0x%jx)\n", 830 __func__, 831 qtype, 832 bf->bf_desc, 833 (uintmax_t) bf->bf_daddr); 834 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 835 836 re->m_fifo_depth++; 837 INCR(re->m_fifo_tail, re->m_fifolen); 838 } 839 840 /* 841 * Return how many were allocated. 842 */ 843 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: nbufs=%d, nalloced=%d\n", 844 __func__, 845 qtype, 846 nbufs, 847 i); 848 return (i); 849 } 850 851 static int 852 ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype) 853 { 854 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 855 int i; 856 857 ATH_RX_LOCK_ASSERT(sc); 858 859 for (i = 0; i < re->m_fifolen; i++) { 860 if (re->m_fifo[i] != NULL) { 861 #ifdef ATH_DEBUG 862 struct ath_buf *bf = re->m_fifo[i]; 863 864 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 865 ath_printrxbuf(sc, bf, 0, HAL_OK); 866 #endif 867 ath_edma_rxbuf_free(sc, re->m_fifo[i]); 868 re->m_fifo[i] = NULL; 869 re->m_fifo_depth--; 870 } 871 } 872 873 if (re->m_rxpending != NULL) { 874 m_freem(re->m_rxpending); 875 re->m_rxpending = NULL; 876 } 877 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 878 879 return (0); 880 } 881 882 /* 883 * Setup the initial RX FIFO structure. 884 */ 885 static int 886 ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 887 { 888 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 889 890 ATH_RX_LOCK_ASSERT(sc); 891 892 if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) { 893 device_printf(sc->sc_dev, "%s: qtype=%d, failed\n", 894 __func__, 895 qtype); 896 return (-EINVAL); 897 } 898 899 if (bootverbose) 900 device_printf(sc->sc_dev, 901 "%s: type=%d, FIFO depth = %d entries\n", 902 __func__, 903 qtype, 904 re->m_fifolen); 905 906 /* Allocate ath_buf FIFO array, pre-zero'ed */ 907 re->m_fifo = malloc(sizeof(struct ath_buf *) * re->m_fifolen, 908 M_ATHDEV, 909 M_NOWAIT | M_ZERO); 910 if (re->m_fifo == NULL) { 911 device_printf(sc->sc_dev, "%s: malloc failed\n", 912 __func__); 913 return (-ENOMEM); 914 } 915 916 /* 917 * Set initial "empty" state. 918 */ 919 re->m_rxpending = NULL; 920 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 921 922 return (0); 923 } 924 925 static int 926 ath_edma_rxfifo_free(struct ath_softc *sc, HAL_RX_QUEUE qtype) 927 { 928 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 929 930 device_printf(sc->sc_dev, "%s: called; qtype=%d\n", 931 __func__, 932 qtype); 933 934 free(re->m_fifo, M_ATHDEV); 935 936 return (0); 937 } 938 939 static int 940 ath_edma_dma_rxsetup(struct ath_softc *sc) 941 { 942 int error; 943 944 /* 945 * Create RX DMA tag and buffers. 946 */ 947 error = ath_descdma_setup_rx_edma(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 948 "rx", ath_rxbuf, sc->sc_rx_statuslen); 949 if (error != 0) 950 return error; 951 952 ATH_RX_LOCK(sc); 953 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP); 954 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP); 955 ATH_RX_UNLOCK(sc); 956 957 return (0); 958 } 959 960 static int 961 ath_edma_dma_rxteardown(struct ath_softc *sc) 962 { 963 964 ATH_RX_LOCK(sc); 965 ath_edma_flush_deferred_queue(sc); 966 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP); 967 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP); 968 969 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP); 970 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP); 971 ATH_RX_UNLOCK(sc); 972 973 /* Free RX ath_buf */ 974 /* Free RX DMA tag */ 975 if (sc->sc_rxdma.dd_desc_len != 0) 976 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 977 978 return (0); 979 } 980 981 void 982 ath_recv_setup_edma(struct ath_softc *sc) 983 { 984 985 /* Set buffer size to 4k */ 986 sc->sc_edma_bufsize = 4096; 987 988 /* Fetch EDMA field and buffer sizes */ 989 (void) ath_hal_getrxstatuslen(sc->sc_ah, &sc->sc_rx_statuslen); 990 991 /* Configure the hardware with the RX buffer size */ 992 (void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize - 993 sc->sc_rx_statuslen); 994 995 if (bootverbose) { 996 device_printf(sc->sc_dev, "RX status length: %d\n", 997 sc->sc_rx_statuslen); 998 device_printf(sc->sc_dev, "RX buffer size: %d\n", 999 sc->sc_edma_bufsize); 1000 } 1001 1002 sc->sc_rx.recv_stop = ath_edma_stoprecv; 1003 sc->sc_rx.recv_start = ath_edma_startrecv; 1004 sc->sc_rx.recv_flush = ath_edma_recv_flush; 1005 sc->sc_rx.recv_tasklet = ath_edma_recv_tasklet; 1006 sc->sc_rx.recv_rxbuf_init = ath_edma_rxbuf_init; 1007 1008 sc->sc_rx.recv_setup = ath_edma_dma_rxsetup; 1009 sc->sc_rx.recv_teardown = ath_edma_dma_rxteardown; 1010 1011 sc->sc_rx.recv_sched = ath_edma_recv_sched; 1012 sc->sc_rx.recv_sched_queue = ath_edma_recv_sched_queue; 1013 } 1014