1 /*- 2 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_ath.h" 42 /* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49 #include "opt_ah.h" 50 #include "opt_wlan.h" 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/sysctl.h> 55 #include <sys/mbuf.h> 56 #include <sys/malloc.h> 57 #include <sys/lock.h> 58 #include <sys/mutex.h> 59 #include <sys/kernel.h> 60 #include <sys/socket.h> 61 #include <sys/sockio.h> 62 #include <sys/errno.h> 63 #include <sys/callout.h> 64 #include <sys/bus.h> 65 #include <sys/endian.h> 66 #include <sys/kthread.h> 67 #include <sys/taskqueue.h> 68 #include <sys/priv.h> 69 #include <sys/module.h> 70 #include <sys/ktr.h> 71 #include <sys/smp.h> /* for mp_ncpus */ 72 73 #include <machine/bus.h> 74 75 #include <net/if.h> 76 #include <net/if_var.h> 77 #include <net/if_dl.h> 78 #include <net/if_media.h> 79 #include <net/if_types.h> 80 #include <net/if_arp.h> 81 #include <net/ethernet.h> 82 #include <net/if_llc.h> 83 84 #include <net80211/ieee80211_var.h> 85 #include <net80211/ieee80211_regdomain.h> 86 #ifdef IEEE80211_SUPPORT_SUPERG 87 #include <net80211/ieee80211_superg.h> 88 #endif 89 #ifdef IEEE80211_SUPPORT_TDMA 90 #include <net80211/ieee80211_tdma.h> 91 #endif 92 93 #include <net/bpf.h> 94 95 #ifdef INET 96 #include <netinet/in.h> 97 #include <netinet/if_ether.h> 98 #endif 99 100 #include <dev/ath/if_athvar.h> 101 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 102 #include <dev/ath/ath_hal/ah_diagcodes.h> 103 104 #include <dev/ath/if_ath_debug.h> 105 #include <dev/ath/if_ath_misc.h> 106 #include <dev/ath/if_ath_tsf.h> 107 #include <dev/ath/if_ath_tx.h> 108 #include <dev/ath/if_ath_sysctl.h> 109 #include <dev/ath/if_ath_led.h> 110 #include <dev/ath/if_ath_keycache.h> 111 #include <dev/ath/if_ath_rx.h> 112 #include <dev/ath/if_ath_beacon.h> 113 #include <dev/ath/if_athdfs.h> 114 115 #ifdef ATH_TX99_DIAG 116 #include <dev/ath/ath_tx99/ath_tx99.h> 117 #endif 118 119 #include <dev/ath/if_ath_rx_edma.h> 120 121 #ifdef ATH_DEBUG_ALQ 122 #include <dev/ath/if_ath_alq.h> 123 #endif 124 125 /* 126 * some general macros 127 */ 128 #define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1) 129 #define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1) 130 131 MALLOC_DECLARE(M_ATHDEV); 132 133 /* 134 * XXX TODO: 135 * 136 * + Make sure the FIFO is correctly flushed and reinitialised 137 * through a reset; 138 * + Verify multi-descriptor frames work! 139 * + There's a "memory use after free" which needs to be tracked down 140 * and fixed ASAP. I've seen this in the legacy path too, so it 141 * may be a generic RX path issue. 142 */ 143 144 /* 145 * XXX shuffle the function orders so these pre-declarations aren't 146 * required! 147 */ 148 static int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, 149 int nbufs); 150 static int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype); 151 static void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf); 152 static void ath_edma_recv_proc_queue(struct ath_softc *sc, 153 HAL_RX_QUEUE qtype, int dosched); 154 static int ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, 155 HAL_RX_QUEUE qtype, int dosched); 156 157 static void 158 ath_edma_stoprecv(struct ath_softc *sc, int dodelay) 159 { 160 struct ath_hal *ah = sc->sc_ah; 161 162 ATH_RX_LOCK(sc); 163 ath_hal_stoppcurecv(ah); 164 ath_hal_setrxfilter(ah, 0); 165 ath_hal_stopdmarecv(ah); 166 167 DELAY(3000); 168 169 /* Flush RX pending for each queue */ 170 /* XXX should generic-ify this */ 171 if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending) { 172 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending); 173 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL; 174 } 175 176 if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending) { 177 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending); 178 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL; 179 } 180 ATH_RX_UNLOCK(sc); 181 } 182 183 /* 184 * Re-initialise the FIFO given the current buffer contents. 185 * Specifically, walk from head -> tail, pushing the FIFO contents 186 * back into the FIFO. 187 */ 188 static void 189 ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 190 { 191 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 192 struct ath_buf *bf; 193 int i, j; 194 195 ATH_RX_LOCK_ASSERT(sc); 196 197 i = re->m_fifo_head; 198 for (j = 0; j < re->m_fifo_depth; j++) { 199 bf = re->m_fifo[i]; 200 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 201 "%s: Q%d: pos=%i, addr=0x%jx\n", 202 __func__, 203 qtype, 204 i, 205 (uintmax_t)bf->bf_daddr); 206 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 207 INCR(i, re->m_fifolen); 208 } 209 210 /* Ensure this worked out right */ 211 if (i != re->m_fifo_tail) { 212 device_printf(sc->sc_dev, "%s: i (%d) != tail! (%d)\n", 213 __func__, 214 i, 215 re->m_fifo_tail); 216 } 217 } 218 219 /* 220 * Start receive. 221 * 222 * XXX TODO: this needs to reallocate the FIFO entries when a reset 223 * occurs, in case the FIFO is filled up and no new descriptors get 224 * thrown into the FIFO. 225 */ 226 static int 227 ath_edma_startrecv(struct ath_softc *sc) 228 { 229 struct ath_hal *ah = sc->sc_ah; 230 231 ATH_RX_LOCK(sc); 232 233 /* Enable RX FIFO */ 234 ath_hal_rxena(ah); 235 236 /* 237 * Entries should only be written out if the 238 * FIFO is empty. 239 * 240 * XXX This isn't correct. I should be looking 241 * at the value of AR_RXDP_SIZE (0x0070) to determine 242 * how many entries are in here. 243 * 244 * A warm reset will clear the registers but not the FIFO. 245 * 246 * And I believe this is actually the address of the last 247 * handled buffer rather than the current FIFO pointer. 248 * So if no frames have been (yet) seen, we'll reinit the 249 * FIFO. 250 * 251 * I'll chase that up at some point. 252 */ 253 if (ath_hal_getrxbuf(sc->sc_ah, HAL_RX_QUEUE_HP) == 0) { 254 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 255 "%s: Re-initing HP FIFO\n", __func__); 256 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_HP); 257 } 258 if (ath_hal_getrxbuf(sc->sc_ah, HAL_RX_QUEUE_LP) == 0) { 259 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 260 "%s: Re-initing LP FIFO\n", __func__); 261 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_LP); 262 } 263 264 /* Add up to m_fifolen entries in each queue */ 265 /* 266 * These must occur after the above write so the FIFO buffers 267 * are pushed/tracked in the same order as the hardware will 268 * process them. 269 */ 270 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_HP, 271 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_fifolen); 272 273 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_LP, 274 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_fifolen); 275 276 ath_mode_init(sc); 277 ath_hal_startpcurecv(ah); 278 279 ATH_RX_UNLOCK(sc); 280 281 return (0); 282 } 283 284 static void 285 ath_edma_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 286 int dosched) 287 { 288 289 ATH_LOCK(sc); 290 ath_power_set_power_state(sc, HAL_PM_AWAKE); 291 ATH_UNLOCK(sc); 292 293 ath_edma_recv_proc_queue(sc, qtype, dosched); 294 295 ATH_LOCK(sc); 296 ath_power_restore_power_state(sc); 297 ATH_UNLOCK(sc); 298 299 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 300 } 301 302 static void 303 ath_edma_recv_sched(struct ath_softc *sc, int dosched) 304 { 305 306 ATH_LOCK(sc); 307 ath_power_set_power_state(sc, HAL_PM_AWAKE); 308 ATH_UNLOCK(sc); 309 310 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, dosched); 311 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, dosched); 312 313 ATH_LOCK(sc); 314 ath_power_restore_power_state(sc); 315 ATH_UNLOCK(sc); 316 317 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 318 } 319 320 static void 321 ath_edma_recv_flush(struct ath_softc *sc) 322 { 323 324 DPRINTF(sc, ATH_DEBUG_RECV, "%s: called\n", __func__); 325 326 ATH_PCU_LOCK(sc); 327 sc->sc_rxproc_cnt++; 328 ATH_PCU_UNLOCK(sc); 329 330 ATH_LOCK(sc); 331 ath_power_set_power_state(sc, HAL_PM_AWAKE); 332 ATH_UNLOCK(sc); 333 334 /* 335 * Flush any active frames from FIFO -> deferred list 336 */ 337 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 0); 338 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 0); 339 340 /* 341 * Process what's in the deferred queue 342 */ 343 /* 344 * XXX: If we read the tsf/channoise here and then pass it in, 345 * we could restore the power state before processing 346 * the deferred queue. 347 */ 348 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 0); 349 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 0); 350 351 ATH_LOCK(sc); 352 ath_power_restore_power_state(sc); 353 ATH_UNLOCK(sc); 354 355 ATH_PCU_LOCK(sc); 356 sc->sc_rxproc_cnt--; 357 ATH_PCU_UNLOCK(sc); 358 } 359 360 /* 361 * Process frames from the current queue into the deferred queue. 362 */ 363 static void 364 ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 365 int dosched) 366 { 367 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 368 struct ath_rx_status *rs; 369 struct ath_desc *ds; 370 struct ath_buf *bf; 371 struct mbuf *m; 372 struct ath_hal *ah = sc->sc_ah; 373 uint64_t tsf; 374 uint16_t nf; 375 int npkts = 0; 376 377 tsf = ath_hal_gettsf64(ah); 378 nf = ath_hal_getchannoise(ah, sc->sc_curchan); 379 sc->sc_stats.ast_rx_noise = nf; 380 381 ATH_RX_LOCK(sc); 382 383 do { 384 bf = re->m_fifo[re->m_fifo_head]; 385 /* This shouldn't occur! */ 386 if (bf == NULL) { 387 device_printf(sc->sc_dev, "%s: Q%d: NULL bf?\n", 388 __func__, 389 qtype); 390 break; 391 } 392 m = bf->bf_m; 393 ds = bf->bf_desc; 394 395 /* 396 * Sync descriptor memory - this also syncs the buffer for us. 397 * EDMA descriptors are in cached memory. 398 */ 399 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 400 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 401 rs = &bf->bf_status.ds_rxstat; 402 bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, 403 NULL, rs); 404 #ifdef ATH_DEBUG 405 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 406 ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK); 407 #endif /* ATH_DEBUG */ 408 #ifdef ATH_DEBUG_ALQ 409 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS)) 410 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS, 411 sc->sc_rx_statuslen, (char *) ds); 412 #endif /* ATH_DEBUG */ 413 if (bf->bf_rxstatus == HAL_EINPROGRESS) 414 break; 415 416 /* 417 * Completed descriptor. 418 */ 419 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 420 "%s: Q%d: completed!\n", __func__, qtype); 421 npkts++; 422 423 /* 424 * We've been synced already, so unmap. 425 */ 426 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 427 428 /* 429 * Remove the FIFO entry and place it on the completion 430 * queue. 431 */ 432 re->m_fifo[re->m_fifo_head] = NULL; 433 TAILQ_INSERT_TAIL(&sc->sc_rx_rxlist[qtype], bf, bf_list); 434 435 /* Bump the descriptor FIFO stats */ 436 INCR(re->m_fifo_head, re->m_fifolen); 437 re->m_fifo_depth--; 438 /* XXX check it doesn't fall below 0 */ 439 } while (re->m_fifo_depth > 0); 440 441 /* Append some more fresh frames to the FIFO */ 442 if (dosched) 443 ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen); 444 445 ATH_RX_UNLOCK(sc); 446 447 /* rx signal state monitoring */ 448 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); 449 450 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 451 "ath edma rx proc: npkts=%d\n", 452 npkts); 453 454 /* Handle resched and kickpcu appropriately */ 455 ATH_PCU_LOCK(sc); 456 if (dosched && sc->sc_kickpcu) { 457 ATH_KTR(sc, ATH_KTR_ERROR, 0, 458 "ath_edma_recv_proc_queue(): kickpcu"); 459 if (npkts > 0) 460 device_printf(sc->sc_dev, 461 "%s: handled npkts %d\n", 462 __func__, npkts); 463 464 /* 465 * XXX TODO: what should occur here? Just re-poke and 466 * re-enable the RX FIFO? 467 */ 468 sc->sc_kickpcu = 0; 469 } 470 ATH_PCU_UNLOCK(sc); 471 472 return; 473 } 474 475 /* 476 * Flush the deferred queue. 477 * 478 * This destructively flushes the deferred queue - it doesn't 479 * call the wireless stack on each mbuf. 480 */ 481 static void 482 ath_edma_flush_deferred_queue(struct ath_softc *sc) 483 { 484 struct ath_buf *bf; 485 486 ATH_RX_LOCK_ASSERT(sc); 487 488 /* Free in one set, inside the lock */ 489 while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP])) { 490 bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]); 491 TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP], bf, bf_list); 492 /* Free the buffer/mbuf */ 493 ath_edma_rxbuf_free(sc, bf); 494 } 495 while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP])) { 496 bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]); 497 TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP], bf, bf_list); 498 /* Free the buffer/mbuf */ 499 ath_edma_rxbuf_free(sc, bf); 500 } 501 } 502 503 static int 504 ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 505 int dosched) 506 { 507 int ngood = 0; 508 uint64_t tsf; 509 struct ath_buf *bf, *next; 510 struct ath_rx_status *rs; 511 int16_t nf; 512 ath_bufhead rxlist; 513 struct mbuf *m; 514 515 TAILQ_INIT(&rxlist); 516 517 nf = ath_hal_getchannoise(sc->sc_ah, sc->sc_curchan); 518 /* 519 * XXX TODO: the NF/TSF should be stamped on the bufs themselves, 520 * otherwise we may end up adding in the wrong values if this 521 * is delayed too far.. 522 */ 523 tsf = ath_hal_gettsf64(sc->sc_ah); 524 525 /* Copy the list over */ 526 ATH_RX_LOCK(sc); 527 TAILQ_CONCAT(&rxlist, &sc->sc_rx_rxlist[qtype], bf_list); 528 ATH_RX_UNLOCK(sc); 529 530 /* Handle the completed descriptors */ 531 /* 532 * XXX is this SAFE call needed? The ath_buf entries 533 * aren't modified by ath_rx_pkt, right? 534 */ 535 TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) { 536 /* 537 * Skip the RX descriptor status - start at the data offset 538 */ 539 m_adj(bf->bf_m, sc->sc_rx_statuslen); 540 541 /* Handle the frame */ 542 543 rs = &bf->bf_status.ds_rxstat; 544 m = bf->bf_m; 545 bf->bf_m = NULL; 546 if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf, m)) 547 ngood++; 548 } 549 550 if (ngood) { 551 sc->sc_lastrx = tsf; 552 } 553 554 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 555 "ath edma rx deferred proc: ngood=%d\n", 556 ngood); 557 558 /* Free in one set, inside the lock */ 559 ATH_RX_LOCK(sc); 560 while (! TAILQ_EMPTY(&rxlist)) { 561 bf = TAILQ_FIRST(&rxlist); 562 TAILQ_REMOVE(&rxlist, bf, bf_list); 563 /* Free the buffer/mbuf */ 564 ath_edma_rxbuf_free(sc, bf); 565 } 566 ATH_RX_UNLOCK(sc); 567 568 return (ngood); 569 } 570 571 static void 572 ath_edma_recv_tasklet(void *arg, int npending) 573 { 574 struct ath_softc *sc = (struct ath_softc *) arg; 575 struct ifnet *ifp = sc->sc_ifp; 576 #ifdef IEEE80211_SUPPORT_SUPERG 577 struct ieee80211com *ic = ifp->if_l2com; 578 #endif 579 580 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; npending=%d\n", 581 __func__, 582 npending); 583 584 ATH_PCU_LOCK(sc); 585 if (sc->sc_inreset_cnt > 0) { 586 device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n", 587 __func__); 588 ATH_PCU_UNLOCK(sc); 589 return; 590 } 591 sc->sc_rxproc_cnt++; 592 ATH_PCU_UNLOCK(sc); 593 594 ATH_LOCK(sc); 595 ath_power_set_power_state(sc, HAL_PM_AWAKE); 596 ATH_UNLOCK(sc); 597 598 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 1); 599 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 1); 600 601 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 1); 602 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 1); 603 604 /* 605 * XXX: If we read the tsf/channoise here and then pass it in, 606 * we could restore the power state before processing 607 * the deferred queue. 608 */ 609 ATH_LOCK(sc); 610 ath_power_restore_power_state(sc); 611 ATH_UNLOCK(sc); 612 613 /* XXX inside IF_LOCK ? */ 614 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 615 #ifdef IEEE80211_SUPPORT_SUPERG 616 ieee80211_ff_age_all(ic, 100); 617 #endif 618 if (! IFQ_IS_EMPTY(&ifp->if_snd)) 619 ath_tx_kick(sc); 620 } 621 if (ath_dfs_tasklet_needed(sc, sc->sc_curchan)) 622 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); 623 624 ATH_PCU_LOCK(sc); 625 sc->sc_rxproc_cnt--; 626 ATH_PCU_UNLOCK(sc); 627 } 628 629 /* 630 * Allocate an RX mbuf for the given ath_buf and initialise 631 * it for EDMA. 632 * 633 * + Allocate a 4KB mbuf; 634 * + Setup the DMA map for the given buffer; 635 * + Return that. 636 */ 637 static int 638 ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 639 { 640 641 struct mbuf *m; 642 int error; 643 int len; 644 645 ATH_RX_LOCK_ASSERT(sc); 646 647 m = m_getm(NULL, sc->sc_edma_bufsize, M_NOWAIT, MT_DATA); 648 if (! m) 649 return (ENOBUFS); /* XXX ?*/ 650 651 /* XXX warn/enforce alignment */ 652 653 len = m->m_ext.ext_size; 654 #if 0 655 device_printf(sc->sc_dev, "%s: called: m=%p, size=%d, mtod=%p\n", 656 __func__, 657 m, 658 len, 659 mtod(m, char *)); 660 #endif 661 662 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 663 664 /* 665 * Populate ath_buf fields. 666 */ 667 bf->bf_desc = mtod(m, struct ath_desc *); 668 bf->bf_lastds = bf->bf_desc; /* XXX only really for TX? */ 669 bf->bf_m = m; 670 671 /* 672 * Zero the descriptor and ensure it makes it out to the 673 * bounce buffer if one is required. 674 * 675 * XXX PREWRITE will copy the whole buffer; we only needed it 676 * to sync the first 32 DWORDS. Oh well. 677 */ 678 memset(bf->bf_desc, '\0', sc->sc_rx_statuslen); 679 680 /* 681 * Create DMA mapping. 682 */ 683 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 684 bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); 685 686 if (error != 0) { 687 device_printf(sc->sc_dev, "%s: failed; error=%d\n", 688 __func__, 689 error); 690 m_freem(m); 691 return (error); 692 } 693 694 /* 695 * Set daddr to the physical mapping page. 696 */ 697 bf->bf_daddr = bf->bf_segs[0].ds_addr; 698 699 /* 700 * Prepare for the upcoming read. 701 * 702 * We need to both sync some data into the buffer (the zero'ed 703 * descriptor payload) and also prepare for the read that's going 704 * to occur. 705 */ 706 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 707 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 708 709 /* Finish! */ 710 return (0); 711 } 712 713 /* 714 * Allocate a RX buffer. 715 */ 716 static struct ath_buf * 717 ath_edma_rxbuf_alloc(struct ath_softc *sc) 718 { 719 struct ath_buf *bf; 720 int error; 721 722 ATH_RX_LOCK_ASSERT(sc); 723 724 /* Allocate buffer */ 725 bf = TAILQ_FIRST(&sc->sc_rxbuf); 726 /* XXX shouldn't happen upon startup? */ 727 if (bf == NULL) { 728 device_printf(sc->sc_dev, "%s: nothing on rxbuf?!\n", 729 __func__); 730 return (NULL); 731 } 732 733 /* Remove it from the free list */ 734 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 735 736 /* Assign RX mbuf to it */ 737 error = ath_edma_rxbuf_init(sc, bf); 738 if (error != 0) { 739 device_printf(sc->sc_dev, 740 "%s: bf=%p, rxbuf alloc failed! error=%d\n", 741 __func__, 742 bf, 743 error); 744 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 745 return (NULL); 746 } 747 748 return (bf); 749 } 750 751 static void 752 ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf) 753 { 754 755 ATH_RX_LOCK_ASSERT(sc); 756 757 /* 758 * Only unload the frame if we haven't consumed 759 * the mbuf via ath_rx_pkt(). 760 */ 761 if (bf->bf_m) { 762 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 763 m_freem(bf->bf_m); 764 bf->bf_m = NULL; 765 } 766 767 /* XXX lock? */ 768 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 769 } 770 771 /* 772 * Allocate up to 'n' entries and push them onto the hardware FIFO. 773 * 774 * Return how many entries were successfully pushed onto the 775 * FIFO. 776 */ 777 static int 778 ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs) 779 { 780 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 781 struct ath_buf *bf; 782 int i; 783 784 ATH_RX_LOCK_ASSERT(sc); 785 786 /* 787 * Allocate buffers until the FIFO is full or nbufs is reached. 788 */ 789 for (i = 0; i < nbufs && re->m_fifo_depth < re->m_fifolen; i++) { 790 /* Ensure the FIFO is already blank, complain loudly! */ 791 if (re->m_fifo[re->m_fifo_tail] != NULL) { 792 device_printf(sc->sc_dev, 793 "%s: Q%d: fifo[%d] != NULL (%p)\n", 794 __func__, 795 qtype, 796 re->m_fifo_tail, 797 re->m_fifo[re->m_fifo_tail]); 798 799 /* Free the slot */ 800 ath_edma_rxbuf_free(sc, re->m_fifo[re->m_fifo_tail]); 801 re->m_fifo_depth--; 802 /* XXX check it's not < 0 */ 803 re->m_fifo[re->m_fifo_tail] = NULL; 804 } 805 806 bf = ath_edma_rxbuf_alloc(sc); 807 /* XXX should ensure the FIFO is not NULL? */ 808 if (bf == NULL) { 809 device_printf(sc->sc_dev, 810 "%s: Q%d: alloc failed: i=%d, nbufs=%d?\n", 811 __func__, 812 qtype, 813 i, 814 nbufs); 815 break; 816 } 817 818 re->m_fifo[re->m_fifo_tail] = bf; 819 820 /* Write to the RX FIFO */ 821 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 822 "%s: Q%d: putrxbuf=%p (0x%jx)\n", 823 __func__, 824 qtype, 825 bf->bf_desc, 826 (uintmax_t) bf->bf_daddr); 827 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 828 829 re->m_fifo_depth++; 830 INCR(re->m_fifo_tail, re->m_fifolen); 831 } 832 833 /* 834 * Return how many were allocated. 835 */ 836 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: nbufs=%d, nalloced=%d\n", 837 __func__, 838 qtype, 839 nbufs, 840 i); 841 return (i); 842 } 843 844 static int 845 ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype) 846 { 847 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 848 int i; 849 850 ATH_RX_LOCK_ASSERT(sc); 851 852 for (i = 0; i < re->m_fifolen; i++) { 853 if (re->m_fifo[i] != NULL) { 854 #ifdef ATH_DEBUG 855 struct ath_buf *bf = re->m_fifo[i]; 856 857 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 858 ath_printrxbuf(sc, bf, 0, HAL_OK); 859 #endif 860 ath_edma_rxbuf_free(sc, re->m_fifo[i]); 861 re->m_fifo[i] = NULL; 862 re->m_fifo_depth--; 863 } 864 } 865 866 if (re->m_rxpending != NULL) { 867 m_freem(re->m_rxpending); 868 re->m_rxpending = NULL; 869 } 870 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 871 872 return (0); 873 } 874 875 /* 876 * Setup the initial RX FIFO structure. 877 */ 878 static int 879 ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 880 { 881 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 882 883 ATH_RX_LOCK_ASSERT(sc); 884 885 if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) { 886 device_printf(sc->sc_dev, "%s: qtype=%d, failed\n", 887 __func__, 888 qtype); 889 return (-EINVAL); 890 } 891 892 if (bootverbose) 893 device_printf(sc->sc_dev, 894 "%s: type=%d, FIFO depth = %d entries\n", 895 __func__, 896 qtype, 897 re->m_fifolen); 898 899 /* Allocate ath_buf FIFO array, pre-zero'ed */ 900 re->m_fifo = malloc(sizeof(struct ath_buf *) * re->m_fifolen, 901 M_ATHDEV, 902 M_NOWAIT | M_ZERO); 903 if (re->m_fifo == NULL) { 904 device_printf(sc->sc_dev, "%s: malloc failed\n", 905 __func__); 906 return (-ENOMEM); 907 } 908 909 /* 910 * Set initial "empty" state. 911 */ 912 re->m_rxpending = NULL; 913 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 914 915 return (0); 916 } 917 918 static int 919 ath_edma_rxfifo_free(struct ath_softc *sc, HAL_RX_QUEUE qtype) 920 { 921 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 922 923 device_printf(sc->sc_dev, "%s: called; qtype=%d\n", 924 __func__, 925 qtype); 926 927 free(re->m_fifo, M_ATHDEV); 928 929 return (0); 930 } 931 932 static int 933 ath_edma_dma_rxsetup(struct ath_softc *sc) 934 { 935 int error; 936 937 /* 938 * Create RX DMA tag and buffers. 939 */ 940 error = ath_descdma_setup_rx_edma(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 941 "rx", ath_rxbuf, sc->sc_rx_statuslen); 942 if (error != 0) 943 return error; 944 945 ATH_RX_LOCK(sc); 946 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP); 947 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP); 948 ATH_RX_UNLOCK(sc); 949 950 return (0); 951 } 952 953 static int 954 ath_edma_dma_rxteardown(struct ath_softc *sc) 955 { 956 957 ATH_RX_LOCK(sc); 958 ath_edma_flush_deferred_queue(sc); 959 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP); 960 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP); 961 962 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP); 963 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP); 964 ATH_RX_UNLOCK(sc); 965 966 /* Free RX ath_buf */ 967 /* Free RX DMA tag */ 968 if (sc->sc_rxdma.dd_desc_len != 0) 969 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 970 971 return (0); 972 } 973 974 void 975 ath_recv_setup_edma(struct ath_softc *sc) 976 { 977 978 /* Set buffer size to 4k */ 979 sc->sc_edma_bufsize = 4096; 980 981 /* Fetch EDMA field and buffer sizes */ 982 (void) ath_hal_getrxstatuslen(sc->sc_ah, &sc->sc_rx_statuslen); 983 984 /* Configure the hardware with the RX buffer size */ 985 (void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize - 986 sc->sc_rx_statuslen); 987 988 if (bootverbose) { 989 device_printf(sc->sc_dev, "RX status length: %d\n", 990 sc->sc_rx_statuslen); 991 device_printf(sc->sc_dev, "RX buffer size: %d\n", 992 sc->sc_edma_bufsize); 993 } 994 995 sc->sc_rx.recv_stop = ath_edma_stoprecv; 996 sc->sc_rx.recv_start = ath_edma_startrecv; 997 sc->sc_rx.recv_flush = ath_edma_recv_flush; 998 sc->sc_rx.recv_tasklet = ath_edma_recv_tasklet; 999 sc->sc_rx.recv_rxbuf_init = ath_edma_rxbuf_init; 1000 1001 sc->sc_rx.recv_setup = ath_edma_dma_rxsetup; 1002 sc->sc_rx.recv_teardown = ath_edma_dma_rxteardown; 1003 1004 sc->sc_rx.recv_sched = ath_edma_recv_sched; 1005 sc->sc_rx.recv_sched_queue = ath_edma_recv_sched_queue; 1006 } 1007