1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer, 12 * without modification. 13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 14 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 15 * redistribution must be conditioned upon including a substantially 16 * similar Disclaimer requirement for further binary redistribution. 17 * 18 * NO WARRANTY 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 22 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 23 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 24 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 27 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 29 * THE POSSIBILITY OF SUCH DAMAGES. 30 */ 31 32 #include <sys/cdefs.h> 33 /* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_ath.h" 42 /* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49 #include "opt_ah.h" 50 #include "opt_wlan.h" 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/sysctl.h> 55 #include <sys/mbuf.h> 56 #include <sys/malloc.h> 57 #include <sys/lock.h> 58 #include <sys/mutex.h> 59 #include <sys/kernel.h> 60 #include <sys/socket.h> 61 #include <sys/sockio.h> 62 #include <sys/errno.h> 63 #include <sys/callout.h> 64 #include <sys/bus.h> 65 #include <sys/endian.h> 66 #include <sys/kthread.h> 67 #include <sys/taskqueue.h> 68 #include <sys/priv.h> 69 #include <sys/module.h> 70 #include <sys/ktr.h> 71 #include <sys/smp.h> /* for mp_ncpus */ 72 73 #include <machine/bus.h> 74 75 #include <net/if.h> 76 #include <net/if_var.h> 77 #include <net/if_dl.h> 78 #include <net/if_media.h> 79 #include <net/if_types.h> 80 #include <net/if_arp.h> 81 #include <net/ethernet.h> 82 #include <net/if_llc.h> 83 84 #include <net80211/ieee80211_var.h> 85 #include <net80211/ieee80211_regdomain.h> 86 #ifdef IEEE80211_SUPPORT_SUPERG 87 #include <net80211/ieee80211_superg.h> 88 #endif 89 #ifdef IEEE80211_SUPPORT_TDMA 90 #include <net80211/ieee80211_tdma.h> 91 #endif 92 93 #include <net/bpf.h> 94 95 #ifdef INET 96 #include <netinet/in.h> 97 #include <netinet/if_ether.h> 98 #endif 99 100 #include <dev/ath/if_athvar.h> 101 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 102 #include <dev/ath/ath_hal/ah_diagcodes.h> 103 104 #include <dev/ath/if_ath_debug.h> 105 #include <dev/ath/if_ath_misc.h> 106 #include <dev/ath/if_ath_tsf.h> 107 #include <dev/ath/if_ath_tx.h> 108 #include <dev/ath/if_ath_sysctl.h> 109 #include <dev/ath/if_ath_led.h> 110 #include <dev/ath/if_ath_keycache.h> 111 #include <dev/ath/if_ath_rx.h> 112 #include <dev/ath/if_ath_beacon.h> 113 #include <dev/ath/if_athdfs.h> 114 #include <dev/ath/if_ath_descdma.h> 115 116 #ifdef ATH_TX99_DIAG 117 #include <dev/ath/ath_tx99/ath_tx99.h> 118 #endif 119 120 #include <dev/ath/if_ath_rx_edma.h> 121 122 #ifdef ATH_DEBUG_ALQ 123 #include <dev/ath/if_ath_alq.h> 124 #endif 125 126 /* 127 * some general macros 128 */ 129 #define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1) 130 #define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1) 131 132 MALLOC_DECLARE(M_ATHDEV); 133 134 /* 135 * XXX TODO: 136 * 137 * + Make sure the FIFO is correctly flushed and reinitialised 138 * through a reset; 139 * + Verify multi-descriptor frames work! 140 * + There's a "memory use after free" which needs to be tracked down 141 * and fixed ASAP. I've seen this in the legacy path too, so it 142 * may be a generic RX path issue. 143 */ 144 145 /* 146 * XXX shuffle the function orders so these pre-declarations aren't 147 * required! 148 */ 149 static int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, 150 int nbufs); 151 static int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype); 152 static void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf); 153 static void ath_edma_recv_proc_queue(struct ath_softc *sc, 154 HAL_RX_QUEUE qtype, int dosched); 155 static int ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, 156 HAL_RX_QUEUE qtype, int dosched); 157 158 static void 159 ath_edma_stoprecv(struct ath_softc *sc, int dodelay) 160 { 161 struct ath_hal *ah = sc->sc_ah; 162 163 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called, dodelay=%d\n", 164 __func__, dodelay); 165 166 ATH_RX_LOCK(sc); 167 168 ath_hal_stoppcurecv(ah); 169 ath_hal_setrxfilter(ah, 0); 170 171 /* 172 * 173 */ 174 if (ath_hal_stopdmarecv(ah) == AH_TRUE) 175 sc->sc_rx_stopped = 1; 176 177 /* 178 * Give the various bus FIFOs (not EDMA descriptor FIFO) 179 * time to finish flushing out data. 180 */ 181 DELAY(3000); 182 183 /* Flush RX pending for each queue */ 184 /* XXX should generic-ify this */ 185 if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending) { 186 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending); 187 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL; 188 } 189 190 if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending) { 191 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending); 192 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL; 193 } 194 ATH_RX_UNLOCK(sc); 195 196 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: done\n", __func__); 197 } 198 199 /* 200 * Re-initialise the FIFO given the current buffer contents. 201 * Specifically, walk from head -> tail, pushing the FIFO contents 202 * back into the FIFO. 203 */ 204 static void 205 ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 206 { 207 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 208 struct ath_buf *bf; 209 int i, j; 210 211 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called\n", __func__); 212 213 ATH_RX_LOCK_ASSERT(sc); 214 215 i = re->m_fifo_head; 216 for (j = 0; j < re->m_fifo_depth; j++) { 217 bf = re->m_fifo[i]; 218 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 219 "%s: Q%d: pos=%i, addr=0x%jx\n", 220 __func__, 221 qtype, 222 i, 223 (uintmax_t)bf->bf_daddr); 224 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 225 INCR(i, re->m_fifolen); 226 } 227 228 /* Ensure this worked out right */ 229 if (i != re->m_fifo_tail) { 230 device_printf(sc->sc_dev, "%s: i (%d) != tail! (%d)\n", 231 __func__, 232 i, 233 re->m_fifo_tail); 234 } 235 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: done\n", __func__); 236 } 237 238 /* 239 * Start receive. 240 */ 241 static int 242 ath_edma_startrecv(struct ath_softc *sc) 243 { 244 struct ath_hal *ah = sc->sc_ah; 245 246 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 247 "%s: called; resetted=%d, stopped=%d\n", __func__, 248 sc->sc_rx_resetted, sc->sc_rx_stopped); 249 250 ATH_RX_LOCK(sc); 251 252 /* 253 * Sanity check - are we being called whilst RX 254 * isn't stopped? If so, we may end up pushing 255 * too many entries into the RX FIFO and 256 * badness occurs. 257 */ 258 259 /* Enable RX FIFO */ 260 ath_hal_rxena(ah); 261 262 /* 263 * In theory the hardware has been initialised, right? 264 */ 265 if (sc->sc_rx_resetted == 1 || sc->sc_rx_stopped == 1) { 266 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 267 "%s: Re-initing HP FIFO\n", __func__); 268 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_HP); 269 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 270 "%s: Re-initing LP FIFO\n", __func__); 271 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_LP); 272 sc->sc_rx_resetted = 0; 273 } else { 274 device_printf(sc->sc_dev, 275 "%s: called without resetting chip? " 276 "resetted=%d, stopped=%d\n", 277 __func__, 278 sc->sc_rx_resetted, 279 sc->sc_rx_stopped); 280 } 281 282 /* Add up to m_fifolen entries in each queue */ 283 /* 284 * These must occur after the above write so the FIFO buffers 285 * are pushed/tracked in the same order as the hardware will 286 * process them. 287 * 288 * XXX TODO: is this really necessary? We should've stopped 289 * the hardware already and reinitialised it, so it's a no-op. 290 */ 291 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_HP, 292 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_fifolen); 293 294 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_LP, 295 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_fifolen); 296 297 ath_mode_init(sc); 298 ath_hal_startpcurecv(ah, (!! sc->sc_scanning)); 299 300 /* 301 * We're now doing RX DMA! 302 */ 303 sc->sc_rx_stopped = 0; 304 305 ATH_RX_UNLOCK(sc); 306 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: ready\n", __func__); 307 308 return (0); 309 } 310 311 static void 312 ath_edma_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 313 int dosched) 314 { 315 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; qtype=%d, dosched=%d\n", 316 __func__, qtype, dosched); 317 318 ATH_LOCK(sc); 319 ath_power_set_power_state(sc, HAL_PM_AWAKE); 320 ATH_UNLOCK(sc); 321 322 ath_edma_recv_proc_queue(sc, qtype, dosched); 323 324 ATH_LOCK(sc); 325 ath_power_restore_power_state(sc); 326 ATH_UNLOCK(sc); 327 328 /* XXX TODO: methodize */ 329 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 330 331 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: done\n", __func__); 332 } 333 334 static void 335 ath_edma_recv_sched(struct ath_softc *sc, int dosched) 336 { 337 338 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; dosched=%d\n", 339 __func__, dosched); 340 341 ATH_LOCK(sc); 342 ath_power_set_power_state(sc, HAL_PM_AWAKE); 343 ATH_UNLOCK(sc); 344 345 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, dosched); 346 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, dosched); 347 348 ATH_LOCK(sc); 349 ath_power_restore_power_state(sc); 350 ATH_UNLOCK(sc); 351 352 /* XXX TODO: methodize */ 353 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 354 355 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: done\n", __func__); 356 } 357 358 static void 359 ath_edma_recv_flush(struct ath_softc *sc) 360 { 361 362 DPRINTF(sc, ATH_DEBUG_RECV | ATH_DEBUG_EDMA_RX, "%s: called\n", __func__); 363 364 ATH_PCU_LOCK(sc); 365 sc->sc_rxproc_cnt++; 366 ATH_PCU_UNLOCK(sc); 367 368 // XXX TODO: methodize; make it an RX stop/block 369 while (taskqueue_cancel(sc->sc_tq, &sc->sc_rxtask, NULL) != 0) { 370 taskqueue_drain(sc->sc_tq, &sc->sc_rxtask); 371 } 372 373 ATH_LOCK(sc); 374 ath_power_set_power_state(sc, HAL_PM_AWAKE); 375 ATH_UNLOCK(sc); 376 377 /* 378 * Flush any active frames from FIFO -> deferred list 379 */ 380 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 0); 381 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 0); 382 383 /* 384 * Process what's in the deferred queue 385 */ 386 /* 387 * XXX: If we read the tsf/channoise here and then pass it in, 388 * we could restore the power state before processing 389 * the deferred queue. 390 */ 391 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 0); 392 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 0); 393 394 ATH_LOCK(sc); 395 ath_power_restore_power_state(sc); 396 ATH_UNLOCK(sc); 397 398 ATH_PCU_LOCK(sc); 399 sc->sc_rxproc_cnt--; 400 ATH_PCU_UNLOCK(sc); 401 402 DPRINTF(sc, ATH_DEBUG_RECV | ATH_DEBUG_EDMA_RX, "%s: done\n", __func__); 403 } 404 405 /* 406 * Process frames from the current queue into the deferred queue. 407 */ 408 static void 409 ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 410 int dosched) 411 { 412 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 413 struct ath_rx_status *rs; 414 struct ath_desc *ds; 415 struct ath_buf *bf; 416 struct mbuf *m; 417 struct ath_hal *ah = sc->sc_ah; 418 uint64_t tsf; 419 uint16_t nf; 420 int npkts = 0; 421 422 tsf = ath_hal_gettsf64(ah); 423 nf = ath_hal_getchannoise(ah, sc->sc_curchan); 424 sc->sc_stats.ast_rx_noise = nf; 425 426 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; qtype=%d, dosched=%d\n", __func__, qtype, dosched); 427 428 ATH_RX_LOCK(sc); 429 430 #if 1 431 if (sc->sc_rx_resetted == 1) { 432 /* 433 * XXX We shouldn't ever be scheduled if 434 * receive has been stopped - so complain 435 * loudly! 436 */ 437 device_printf(sc->sc_dev, 438 "%s: sc_rx_resetted=1! Bad!\n", 439 __func__); 440 ATH_RX_UNLOCK(sc); 441 return; 442 } 443 #endif 444 445 do { 446 bf = re->m_fifo[re->m_fifo_head]; 447 /* This shouldn't occur! */ 448 if (bf == NULL) { 449 device_printf(sc->sc_dev, "%s: Q%d: NULL bf?\n", 450 __func__, 451 qtype); 452 break; 453 } 454 m = bf->bf_m; 455 ds = bf->bf_desc; 456 457 /* 458 * Sync descriptor memory - this also syncs the buffer for us. 459 * EDMA descriptors are in cached memory. 460 */ 461 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 462 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 463 rs = &bf->bf_status.ds_rxstat; 464 bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, 465 NULL, rs); 466 if (bf->bf_rxstatus == HAL_EINPROGRESS) 467 break; 468 #ifdef ATH_DEBUG 469 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 470 ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK); 471 #endif /* ATH_DEBUG */ 472 #ifdef ATH_DEBUG_ALQ 473 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS)) 474 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS, 475 sc->sc_rx_statuslen, (char *) ds); 476 #endif /* ATH_DEBUG */ 477 478 /* 479 * Completed descriptor. 480 */ 481 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 482 "%s: Q%d: completed!\n", __func__, qtype); 483 npkts++; 484 485 /* 486 * We've been synced already, so unmap. 487 */ 488 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 489 490 /* 491 * Remove the FIFO entry and place it on the completion 492 * queue. 493 */ 494 re->m_fifo[re->m_fifo_head] = NULL; 495 TAILQ_INSERT_TAIL(&sc->sc_rx_rxlist[qtype], bf, bf_list); 496 497 /* Bump the descriptor FIFO stats */ 498 INCR(re->m_fifo_head, re->m_fifolen); 499 re->m_fifo_depth--; 500 /* XXX check it doesn't fall below 0 */ 501 } while (re->m_fifo_depth > 0); 502 503 /* Append some more fresh frames to the FIFO */ 504 if (dosched) 505 ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen); 506 507 ATH_RX_UNLOCK(sc); 508 509 /* rx signal state monitoring */ 510 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); 511 512 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 513 "ath edma rx proc: npkts=%d\n", 514 npkts); 515 516 return; 517 } 518 519 /* 520 * Flush the deferred queue. 521 * 522 * This destructively flushes the deferred queue - it doesn't 523 * call the wireless stack on each mbuf. 524 */ 525 static void 526 ath_edma_flush_deferred_queue(struct ath_softc *sc) 527 { 528 struct ath_buf *bf; 529 530 ATH_RX_LOCK_ASSERT(sc); 531 532 /* Free in one set, inside the lock */ 533 while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP])) { 534 bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]); 535 TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP], bf, bf_list); 536 /* Free the buffer/mbuf */ 537 ath_edma_rxbuf_free(sc, bf); 538 } 539 while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP])) { 540 bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]); 541 TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP], bf, bf_list); 542 /* Free the buffer/mbuf */ 543 ath_edma_rxbuf_free(sc, bf); 544 } 545 } 546 547 static int 548 ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 549 int dosched) 550 { 551 int ngood = 0; 552 uint64_t tsf; 553 struct ath_buf *bf, *next; 554 struct ath_rx_status *rs; 555 int16_t nf; 556 ath_bufhead rxlist; 557 struct mbuf *m; 558 struct epoch_tracker et; 559 560 TAILQ_INIT(&rxlist); 561 562 nf = ath_hal_getchannoise(sc->sc_ah, sc->sc_curchan); 563 /* 564 * XXX TODO: the NF/TSF should be stamped on the bufs themselves, 565 * otherwise we may end up adding in the wrong values if this 566 * is delayed too far.. 567 */ 568 tsf = ath_hal_gettsf64(sc->sc_ah); 569 570 /* Copy the list over */ 571 ATH_RX_LOCK(sc); 572 TAILQ_CONCAT(&rxlist, &sc->sc_rx_rxlist[qtype], bf_list); 573 ATH_RX_UNLOCK(sc); 574 575 NET_EPOCH_ENTER(et); 576 577 /* Handle the completed descriptors */ 578 /* 579 * XXX is this SAFE call needed? The ath_buf entries 580 * aren't modified by ath_rx_pkt, right? 581 */ 582 TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) { 583 /* 584 * Skip the RX descriptor status - start at the data offset 585 */ 586 m_adj(bf->bf_m, sc->sc_rx_statuslen); 587 588 /* Handle the frame */ 589 590 rs = &bf->bf_status.ds_rxstat; 591 m = bf->bf_m; 592 bf->bf_m = NULL; 593 if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf, m)) 594 ngood++; 595 } 596 597 if (ngood) { 598 sc->sc_lastrx = tsf; 599 } 600 NET_EPOCH_EXIT(et); 601 602 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 603 "ath edma rx deferred proc: ngood=%d\n", 604 ngood); 605 606 /* Free in one set, inside the lock */ 607 ATH_RX_LOCK(sc); 608 while (! TAILQ_EMPTY(&rxlist)) { 609 bf = TAILQ_FIRST(&rxlist); 610 TAILQ_REMOVE(&rxlist, bf, bf_list); 611 /* Free the buffer/mbuf */ 612 ath_edma_rxbuf_free(sc, bf); 613 } 614 ATH_RX_UNLOCK(sc); 615 616 return (ngood); 617 } 618 619 static void 620 ath_edma_recv_tasklet(void *arg, int npending) 621 { 622 struct ath_softc *sc = (struct ath_softc *) arg; 623 #ifdef IEEE80211_SUPPORT_SUPERG 624 struct ieee80211com *ic = &sc->sc_ic; 625 #endif 626 627 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; npending=%d\n", 628 __func__, 629 npending); 630 631 ATH_PCU_LOCK(sc); 632 if (sc->sc_inreset_cnt > 0) { 633 device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n", 634 __func__); 635 ATH_PCU_UNLOCK(sc); 636 return; 637 } 638 sc->sc_rxproc_cnt++; 639 ATH_PCU_UNLOCK(sc); 640 641 ATH_LOCK(sc); 642 ath_power_set_power_state(sc, HAL_PM_AWAKE); 643 ATH_UNLOCK(sc); 644 645 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 1); 646 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 1); 647 648 /* 649 * XXX: If we read the tsf/channoise here and then pass it in, 650 * we could restore the power state before processing 651 * the deferred queue. 652 */ 653 ATH_LOCK(sc); 654 ath_power_restore_power_state(sc); 655 ATH_UNLOCK(sc); 656 657 #ifdef IEEE80211_SUPPORT_SUPERG 658 ieee80211_ff_age_all(ic, 100); 659 #endif 660 if (ath_dfs_tasklet_needed(sc, sc->sc_curchan)) 661 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); 662 663 ATH_PCU_LOCK(sc); 664 sc->sc_rxproc_cnt--; 665 ATH_PCU_UNLOCK(sc); 666 667 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; done!\n", __func__); 668 } 669 670 /* 671 * Allocate an RX mbuf for the given ath_buf and initialise 672 * it for EDMA. 673 * 674 * + Allocate a 4KB mbuf; 675 * + Setup the DMA map for the given buffer; 676 * + Return that. 677 */ 678 static int 679 ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 680 { 681 682 struct mbuf *m; 683 int error; 684 int len; 685 686 ATH_RX_LOCK_ASSERT(sc); 687 688 m = m_getm(NULL, sc->sc_edma_bufsize, M_NOWAIT, MT_DATA); 689 if (! m) 690 return (ENOBUFS); /* XXX ?*/ 691 692 /* XXX warn/enforce alignment */ 693 694 len = m->m_ext.ext_size; 695 #if 0 696 device_printf(sc->sc_dev, "%s: called: m=%p, size=%d, mtod=%p\n", 697 __func__, 698 m, 699 len, 700 mtod(m, char *)); 701 #endif 702 703 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 704 705 /* 706 * Populate ath_buf fields. 707 */ 708 bf->bf_desc = mtod(m, struct ath_desc *); 709 bf->bf_lastds = bf->bf_desc; /* XXX only really for TX? */ 710 bf->bf_m = m; 711 712 /* 713 * Zero the descriptor and ensure it makes it out to the 714 * bounce buffer if one is required. 715 * 716 * XXX PREWRITE will copy the whole buffer; we only needed it 717 * to sync the first 32 DWORDS. Oh well. 718 */ 719 memset(bf->bf_desc, '\0', sc->sc_rx_statuslen); 720 721 /* 722 * Create DMA mapping. 723 */ 724 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 725 bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); 726 727 if (error != 0) { 728 device_printf(sc->sc_dev, "%s: failed; error=%d\n", 729 __func__, 730 error); 731 m_freem(m); 732 return (error); 733 } 734 735 /* 736 * Set daddr to the physical mapping page. 737 */ 738 bf->bf_daddr = bf->bf_segs[0].ds_addr; 739 740 /* 741 * Prepare for the upcoming read. 742 * 743 * We need to both sync some data into the buffer (the zero'ed 744 * descriptor payload) and also prepare for the read that's going 745 * to occur. 746 */ 747 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 748 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 749 750 /* Finish! */ 751 return (0); 752 } 753 754 /* 755 * Allocate a RX buffer. 756 */ 757 static struct ath_buf * 758 ath_edma_rxbuf_alloc(struct ath_softc *sc) 759 { 760 struct ath_buf *bf; 761 int error; 762 763 ATH_RX_LOCK_ASSERT(sc); 764 765 /* Allocate buffer */ 766 bf = TAILQ_FIRST(&sc->sc_rxbuf); 767 /* XXX shouldn't happen upon startup? */ 768 if (bf == NULL) { 769 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: nothing on rxbuf?!\n", 770 __func__); 771 return (NULL); 772 } 773 774 /* Remove it from the free list */ 775 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 776 777 /* Assign RX mbuf to it */ 778 error = ath_edma_rxbuf_init(sc, bf); 779 if (error != 0) { 780 device_printf(sc->sc_dev, 781 "%s: bf=%p, rxbuf alloc failed! error=%d\n", 782 __func__, 783 bf, 784 error); 785 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 786 return (NULL); 787 } 788 789 return (bf); 790 } 791 792 static void 793 ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf) 794 { 795 796 ATH_RX_LOCK_ASSERT(sc); 797 798 /* 799 * Only unload the frame if we haven't consumed 800 * the mbuf via ath_rx_pkt(). 801 */ 802 if (bf->bf_m) { 803 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 804 m_freem(bf->bf_m); 805 bf->bf_m = NULL; 806 } 807 808 /* XXX lock? */ 809 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 810 } 811 812 /* 813 * Allocate up to 'n' entries and push them onto the hardware FIFO. 814 * 815 * Return how many entries were successfully pushed onto the 816 * FIFO. 817 */ 818 static int 819 ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs) 820 { 821 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 822 struct ath_buf *bf; 823 int i; 824 825 ATH_RX_LOCK_ASSERT(sc); 826 827 /* 828 * Allocate buffers until the FIFO is full or nbufs is reached. 829 */ 830 for (i = 0; i < nbufs && re->m_fifo_depth < re->m_fifolen; i++) { 831 /* Ensure the FIFO is already blank, complain loudly! */ 832 if (re->m_fifo[re->m_fifo_tail] != NULL) { 833 device_printf(sc->sc_dev, 834 "%s: Q%d: fifo[%d] != NULL (%p)\n", 835 __func__, 836 qtype, 837 re->m_fifo_tail, 838 re->m_fifo[re->m_fifo_tail]); 839 840 /* Free the slot */ 841 ath_edma_rxbuf_free(sc, re->m_fifo[re->m_fifo_tail]); 842 re->m_fifo_depth--; 843 /* XXX check it's not < 0 */ 844 re->m_fifo[re->m_fifo_tail] = NULL; 845 } 846 847 bf = ath_edma_rxbuf_alloc(sc); 848 /* XXX should ensure the FIFO is not NULL? */ 849 if (bf == NULL) { 850 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 851 "%s: Q%d: alloc failed: i=%d, nbufs=%d?\n", 852 __func__, 853 qtype, 854 i, 855 nbufs); 856 break; 857 } 858 859 re->m_fifo[re->m_fifo_tail] = bf; 860 861 /* Write to the RX FIFO */ 862 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 863 "%s: Q%d: putrxbuf=%p (0x%jx)\n", 864 __func__, 865 qtype, 866 bf->bf_desc, 867 (uintmax_t) bf->bf_daddr); 868 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 869 870 re->m_fifo_depth++; 871 INCR(re->m_fifo_tail, re->m_fifolen); 872 } 873 874 /* 875 * Return how many were allocated. 876 */ 877 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: nbufs=%d, nalloced=%d\n", 878 __func__, 879 qtype, 880 nbufs, 881 i); 882 return (i); 883 } 884 885 static int 886 ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype) 887 { 888 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 889 int i; 890 891 ATH_RX_LOCK_ASSERT(sc); 892 893 for (i = 0; i < re->m_fifolen; i++) { 894 if (re->m_fifo[i] != NULL) { 895 #ifdef ATH_DEBUG 896 struct ath_buf *bf = re->m_fifo[i]; 897 898 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 899 ath_printrxbuf(sc, bf, 0, HAL_OK); 900 #endif 901 ath_edma_rxbuf_free(sc, re->m_fifo[i]); 902 re->m_fifo[i] = NULL; 903 re->m_fifo_depth--; 904 } 905 } 906 907 if (re->m_rxpending != NULL) { 908 m_freem(re->m_rxpending); 909 re->m_rxpending = NULL; 910 } 911 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 912 913 return (0); 914 } 915 916 /* 917 * Setup the initial RX FIFO structure. 918 */ 919 static int 920 ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 921 { 922 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 923 924 ATH_RX_LOCK_ASSERT(sc); 925 926 if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) { 927 device_printf(sc->sc_dev, "%s: qtype=%d, failed\n", 928 __func__, 929 qtype); 930 return (-EINVAL); 931 } 932 933 if (bootverbose) 934 device_printf(sc->sc_dev, 935 "%s: type=%d, FIFO depth = %d entries\n", 936 __func__, 937 qtype, 938 re->m_fifolen); 939 940 /* Allocate ath_buf FIFO array, pre-zero'ed */ 941 re->m_fifo = malloc(sizeof(struct ath_buf *) * re->m_fifolen, 942 M_ATHDEV, 943 M_NOWAIT | M_ZERO); 944 if (re->m_fifo == NULL) { 945 device_printf(sc->sc_dev, "%s: malloc failed\n", 946 __func__); 947 return (-ENOMEM); 948 } 949 950 /* 951 * Set initial "empty" state. 952 */ 953 re->m_rxpending = NULL; 954 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 955 956 return (0); 957 } 958 959 static int 960 ath_edma_rxfifo_free(struct ath_softc *sc, HAL_RX_QUEUE qtype) 961 { 962 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 963 964 device_printf(sc->sc_dev, "%s: called; qtype=%d\n", 965 __func__, 966 qtype); 967 968 free(re->m_fifo, M_ATHDEV); 969 970 return (0); 971 } 972 973 static int 974 ath_edma_dma_rxsetup(struct ath_softc *sc) 975 { 976 int error; 977 978 /* 979 * Create RX DMA tag and buffers. 980 */ 981 error = ath_descdma_setup_rx_edma(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 982 "rx", ath_rxbuf, sc->sc_rx_statuslen); 983 if (error != 0) 984 return error; 985 986 ATH_RX_LOCK(sc); 987 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP); 988 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP); 989 ATH_RX_UNLOCK(sc); 990 991 return (0); 992 } 993 994 static int 995 ath_edma_dma_rxteardown(struct ath_softc *sc) 996 { 997 998 ATH_RX_LOCK(sc); 999 ath_edma_flush_deferred_queue(sc); 1000 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP); 1001 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP); 1002 1003 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP); 1004 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP); 1005 ATH_RX_UNLOCK(sc); 1006 1007 /* Free RX ath_buf */ 1008 /* Free RX DMA tag */ 1009 if (sc->sc_rxdma.dd_desc_len != 0) 1010 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 1011 1012 return (0); 1013 } 1014 1015 void 1016 ath_recv_setup_edma(struct ath_softc *sc) 1017 { 1018 1019 /* Set buffer size to 4k */ 1020 sc->sc_edma_bufsize = 4096; 1021 1022 /* Fetch EDMA field and buffer sizes */ 1023 (void) ath_hal_getrxstatuslen(sc->sc_ah, &sc->sc_rx_statuslen); 1024 1025 /* Configure the hardware with the RX buffer size */ 1026 (void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize - 1027 sc->sc_rx_statuslen); 1028 1029 if (bootverbose) { 1030 device_printf(sc->sc_dev, "RX status length: %d\n", 1031 sc->sc_rx_statuslen); 1032 device_printf(sc->sc_dev, "RX buffer size: %d\n", 1033 sc->sc_edma_bufsize); 1034 } 1035 1036 sc->sc_rx.recv_stop = ath_edma_stoprecv; 1037 sc->sc_rx.recv_start = ath_edma_startrecv; 1038 sc->sc_rx.recv_flush = ath_edma_recv_flush; 1039 sc->sc_rx.recv_tasklet = ath_edma_recv_tasklet; 1040 sc->sc_rx.recv_rxbuf_init = ath_edma_rxbuf_init; 1041 1042 sc->sc_rx.recv_setup = ath_edma_dma_rxsetup; 1043 sc->sc_rx.recv_teardown = ath_edma_dma_rxteardown; 1044 1045 sc->sc_rx.recv_sched = ath_edma_recv_sched; 1046 sc->sc_rx.recv_sched_queue = ath_edma_recv_sched_queue; 1047 } 1048