1 /*- 2 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_ath.h" 42 /* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49 #include "opt_ah.h" 50 #include "opt_wlan.h" 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/sysctl.h> 55 #include <sys/mbuf.h> 56 #include <sys/malloc.h> 57 #include <sys/lock.h> 58 #include <sys/mutex.h> 59 #include <sys/kernel.h> 60 #include <sys/socket.h> 61 #include <sys/sockio.h> 62 #include <sys/errno.h> 63 #include <sys/callout.h> 64 #include <sys/bus.h> 65 #include <sys/endian.h> 66 #include <sys/kthread.h> 67 #include <sys/taskqueue.h> 68 #include <sys/priv.h> 69 #include <sys/module.h> 70 #include <sys/ktr.h> 71 #include <sys/smp.h> /* for mp_ncpus */ 72 73 #include <machine/bus.h> 74 75 #include <net/if.h> 76 #include <net/if_var.h> 77 #include <net/if_dl.h> 78 #include <net/if_media.h> 79 #include <net/if_types.h> 80 #include <net/if_arp.h> 81 #include <net/ethernet.h> 82 #include <net/if_llc.h> 83 84 #include <net80211/ieee80211_var.h> 85 #include <net80211/ieee80211_regdomain.h> 86 #ifdef IEEE80211_SUPPORT_SUPERG 87 #include <net80211/ieee80211_superg.h> 88 #endif 89 #ifdef IEEE80211_SUPPORT_TDMA 90 #include <net80211/ieee80211_tdma.h> 91 #endif 92 93 #include <net/bpf.h> 94 95 #ifdef INET 96 #include <netinet/in.h> 97 #include <netinet/if_ether.h> 98 #endif 99 100 #include <dev/ath/if_athvar.h> 101 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 102 #include <dev/ath/ath_hal/ah_diagcodes.h> 103 104 #include <dev/ath/if_ath_debug.h> 105 #include <dev/ath/if_ath_misc.h> 106 #include <dev/ath/if_ath_tsf.h> 107 #include <dev/ath/if_ath_tx.h> 108 #include <dev/ath/if_ath_sysctl.h> 109 #include <dev/ath/if_ath_led.h> 110 #include <dev/ath/if_ath_keycache.h> 111 #include <dev/ath/if_ath_rx.h> 112 #include <dev/ath/if_ath_beacon.h> 113 #include <dev/ath/if_athdfs.h> 114 115 #ifdef ATH_TX99_DIAG 116 #include <dev/ath/ath_tx99/ath_tx99.h> 117 #endif 118 119 #include <dev/ath/if_ath_rx_edma.h> 120 121 #ifdef ATH_DEBUG_ALQ 122 #include <dev/ath/if_ath_alq.h> 123 #endif 124 125 /* 126 * some general macros 127 */ 128 #define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1) 129 #define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1) 130 131 MALLOC_DECLARE(M_ATHDEV); 132 133 /* 134 * XXX TODO: 135 * 136 * + Make sure the FIFO is correctly flushed and reinitialised 137 * through a reset; 138 * + Verify multi-descriptor frames work! 139 * + There's a "memory use after free" which needs to be tracked down 140 * and fixed ASAP. I've seen this in the legacy path too, so it 141 * may be a generic RX path issue. 142 */ 143 144 /* 145 * XXX shuffle the function orders so these pre-declarations aren't 146 * required! 147 */ 148 static int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, 149 int nbufs); 150 static int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype); 151 static void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf); 152 static void ath_edma_recv_proc_queue(struct ath_softc *sc, 153 HAL_RX_QUEUE qtype, int dosched); 154 static int ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, 155 HAL_RX_QUEUE qtype, int dosched); 156 157 static void 158 ath_edma_stoprecv(struct ath_softc *sc, int dodelay) 159 { 160 struct ath_hal *ah = sc->sc_ah; 161 162 ATH_RX_LOCK(sc); 163 ath_hal_stoppcurecv(ah); 164 ath_hal_setrxfilter(ah, 0); 165 ath_hal_stopdmarecv(ah); 166 167 DELAY(3000); 168 169 /* Flush RX pending for each queue */ 170 /* XXX should generic-ify this */ 171 if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending) { 172 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending); 173 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL; 174 } 175 176 if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending) { 177 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending); 178 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL; 179 } 180 ATH_RX_UNLOCK(sc); 181 } 182 183 /* 184 * Re-initialise the FIFO given the current buffer contents. 185 * Specifically, walk from head -> tail, pushing the FIFO contents 186 * back into the FIFO. 187 */ 188 static void 189 ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 190 { 191 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 192 struct ath_buf *bf; 193 int i, j; 194 195 ATH_RX_LOCK_ASSERT(sc); 196 197 i = re->m_fifo_head; 198 for (j = 0; j < re->m_fifo_depth; j++) { 199 bf = re->m_fifo[i]; 200 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 201 "%s: Q%d: pos=%i, addr=0x%jx\n", 202 __func__, 203 qtype, 204 i, 205 (uintmax_t)bf->bf_daddr); 206 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 207 INCR(i, re->m_fifolen); 208 } 209 210 /* Ensure this worked out right */ 211 if (i != re->m_fifo_tail) { 212 device_printf(sc->sc_dev, "%s: i (%d) != tail! (%d)\n", 213 __func__, 214 i, 215 re->m_fifo_tail); 216 } 217 } 218 219 /* 220 * Start receive. 221 * 222 * XXX TODO: this needs to reallocate the FIFO entries when a reset 223 * occurs, in case the FIFO is filled up and no new descriptors get 224 * thrown into the FIFO. 225 */ 226 static int 227 ath_edma_startrecv(struct ath_softc *sc) 228 { 229 struct ath_hal *ah = sc->sc_ah; 230 231 ATH_RX_LOCK(sc); 232 233 /* Enable RX FIFO */ 234 ath_hal_rxena(ah); 235 236 /* 237 * Entries should only be written out if the 238 * FIFO is empty. 239 * 240 * XXX This isn't correct. I should be looking 241 * at the value of AR_RXDP_SIZE (0x0070) to determine 242 * how many entries are in here. 243 * 244 * A warm reset will clear the registers but not the FIFO. 245 * 246 * And I believe this is actually the address of the last 247 * handled buffer rather than the current FIFO pointer. 248 * So if no frames have been (yet) seen, we'll reinit the 249 * FIFO. 250 * 251 * I'll chase that up at some point. 252 */ 253 if (ath_hal_getrxbuf(sc->sc_ah, HAL_RX_QUEUE_HP) == 0) { 254 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 255 "%s: Re-initing HP FIFO\n", __func__); 256 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_HP); 257 } 258 if (ath_hal_getrxbuf(sc->sc_ah, HAL_RX_QUEUE_LP) == 0) { 259 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 260 "%s: Re-initing LP FIFO\n", __func__); 261 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_LP); 262 } 263 264 /* Add up to m_fifolen entries in each queue */ 265 /* 266 * These must occur after the above write so the FIFO buffers 267 * are pushed/tracked in the same order as the hardware will 268 * process them. 269 */ 270 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_HP, 271 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_fifolen); 272 273 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_LP, 274 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_fifolen); 275 276 ath_mode_init(sc); 277 ath_hal_startpcurecv(ah); 278 279 ATH_RX_UNLOCK(sc); 280 281 return (0); 282 } 283 284 static void 285 ath_edma_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 286 int dosched) 287 { 288 289 ath_edma_recv_proc_queue(sc, qtype, dosched); 290 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 291 } 292 293 static void 294 ath_edma_recv_sched(struct ath_softc *sc, int dosched) 295 { 296 297 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, dosched); 298 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, dosched); 299 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 300 } 301 302 static void 303 ath_edma_recv_flush(struct ath_softc *sc) 304 { 305 306 DPRINTF(sc, ATH_DEBUG_RECV, "%s: called\n", __func__); 307 308 ATH_PCU_LOCK(sc); 309 sc->sc_rxproc_cnt++; 310 ATH_PCU_UNLOCK(sc); 311 312 /* 313 * Flush any active frames from FIFO -> deferred list 314 */ 315 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 0); 316 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 0); 317 318 /* 319 * Process what's in the deferred queue 320 */ 321 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 0); 322 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 0); 323 324 ATH_PCU_LOCK(sc); 325 sc->sc_rxproc_cnt--; 326 ATH_PCU_UNLOCK(sc); 327 } 328 329 /* 330 * Process frames from the current queue into the deferred queue. 331 */ 332 static void 333 ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 334 int dosched) 335 { 336 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 337 struct ath_rx_status *rs; 338 struct ath_desc *ds; 339 struct ath_buf *bf; 340 struct mbuf *m; 341 struct ath_hal *ah = sc->sc_ah; 342 uint64_t tsf; 343 uint16_t nf; 344 int npkts = 0; 345 346 tsf = ath_hal_gettsf64(ah); 347 nf = ath_hal_getchannoise(ah, sc->sc_curchan); 348 sc->sc_stats.ast_rx_noise = nf; 349 350 ATH_RX_LOCK(sc); 351 352 do { 353 bf = re->m_fifo[re->m_fifo_head]; 354 /* This shouldn't occur! */ 355 if (bf == NULL) { 356 device_printf(sc->sc_dev, "%s: Q%d: NULL bf?\n", 357 __func__, 358 qtype); 359 break; 360 } 361 m = bf->bf_m; 362 ds = bf->bf_desc; 363 364 /* 365 * Sync descriptor memory - this also syncs the buffer for us. 366 * EDMA descriptors are in cached memory. 367 */ 368 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 369 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 370 rs = &bf->bf_status.ds_rxstat; 371 bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, 372 NULL, rs); 373 #ifdef ATH_DEBUG 374 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 375 ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK); 376 #endif /* ATH_DEBUG */ 377 #ifdef ATH_DEBUG_ALQ 378 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS)) 379 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS, 380 sc->sc_rx_statuslen, (char *) ds); 381 #endif /* ATH_DEBUG */ 382 if (bf->bf_rxstatus == HAL_EINPROGRESS) 383 break; 384 385 /* 386 * Completed descriptor. 387 */ 388 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 389 "%s: Q%d: completed!\n", __func__, qtype); 390 npkts++; 391 392 /* 393 * We've been synced already, so unmap. 394 */ 395 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 396 397 /* 398 * Remove the FIFO entry and place it on the completion 399 * queue. 400 */ 401 re->m_fifo[re->m_fifo_head] = NULL; 402 TAILQ_INSERT_TAIL(&sc->sc_rx_rxlist[qtype], bf, bf_list); 403 404 /* Bump the descriptor FIFO stats */ 405 INCR(re->m_fifo_head, re->m_fifolen); 406 re->m_fifo_depth--; 407 /* XXX check it doesn't fall below 0 */ 408 } while (re->m_fifo_depth > 0); 409 410 /* Append some more fresh frames to the FIFO */ 411 if (dosched) 412 ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen); 413 414 ATH_RX_UNLOCK(sc); 415 416 /* rx signal state monitoring */ 417 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); 418 419 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 420 "ath edma rx proc: npkts=%d\n", 421 npkts); 422 423 /* Handle resched and kickpcu appropriately */ 424 ATH_PCU_LOCK(sc); 425 if (dosched && sc->sc_kickpcu) { 426 ATH_KTR(sc, ATH_KTR_ERROR, 0, 427 "ath_edma_recv_proc_queue(): kickpcu"); 428 if (npkts > 0) 429 device_printf(sc->sc_dev, 430 "%s: handled npkts %d\n", 431 __func__, npkts); 432 433 /* 434 * XXX TODO: what should occur here? Just re-poke and 435 * re-enable the RX FIFO? 436 */ 437 sc->sc_kickpcu = 0; 438 } 439 ATH_PCU_UNLOCK(sc); 440 441 return; 442 } 443 444 /* 445 * Flush the deferred queue. 446 * 447 * This destructively flushes the deferred queue - it doesn't 448 * call the wireless stack on each mbuf. 449 */ 450 static void 451 ath_edma_flush_deferred_queue(struct ath_softc *sc) 452 { 453 struct ath_buf *bf, *next; 454 455 ATH_RX_LOCK_ASSERT(sc); 456 457 /* Free in one set, inside the lock */ 458 TAILQ_FOREACH_SAFE(bf, 459 &sc->sc_rx_rxlist[HAL_RX_QUEUE_LP], bf_list, next) { 460 /* Free the buffer/mbuf */ 461 ath_edma_rxbuf_free(sc, bf); 462 } 463 TAILQ_FOREACH_SAFE(bf, 464 &sc->sc_rx_rxlist[HAL_RX_QUEUE_HP], bf_list, next) { 465 /* Free the buffer/mbuf */ 466 ath_edma_rxbuf_free(sc, bf); 467 } 468 } 469 470 static int 471 ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 472 int dosched) 473 { 474 int ngood = 0; 475 uint64_t tsf; 476 struct ath_buf *bf, *next; 477 struct ath_rx_status *rs; 478 int16_t nf; 479 ath_bufhead rxlist; 480 struct mbuf *m; 481 482 TAILQ_INIT(&rxlist); 483 484 nf = ath_hal_getchannoise(sc->sc_ah, sc->sc_curchan); 485 /* 486 * XXX TODO: the NF/TSF should be stamped on the bufs themselves, 487 * otherwise we may end up adding in the wrong values if this 488 * is delayed too far.. 489 */ 490 tsf = ath_hal_gettsf64(sc->sc_ah); 491 492 /* Copy the list over */ 493 ATH_RX_LOCK(sc); 494 TAILQ_CONCAT(&rxlist, &sc->sc_rx_rxlist[qtype], bf_list); 495 ATH_RX_UNLOCK(sc); 496 497 /* Handle the completed descriptors */ 498 TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) { 499 /* 500 * Skip the RX descriptor status - start at the data offset 501 */ 502 m_adj(bf->bf_m, sc->sc_rx_statuslen); 503 504 /* Handle the frame */ 505 506 rs = &bf->bf_status.ds_rxstat; 507 m = bf->bf_m; 508 bf->bf_m = NULL; 509 if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf, m)) 510 ngood++; 511 } 512 513 if (ngood) { 514 sc->sc_lastrx = tsf; 515 } 516 517 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 518 "ath edma rx deferred proc: ngood=%d\n", 519 ngood); 520 521 /* Free in one set, inside the lock */ 522 ATH_RX_LOCK(sc); 523 TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) { 524 /* Free the buffer/mbuf */ 525 ath_edma_rxbuf_free(sc, bf); 526 } 527 ATH_RX_UNLOCK(sc); 528 529 return (ngood); 530 } 531 532 static void 533 ath_edma_recv_tasklet(void *arg, int npending) 534 { 535 struct ath_softc *sc = (struct ath_softc *) arg; 536 struct ifnet *ifp = sc->sc_ifp; 537 #ifdef IEEE80211_SUPPORT_SUPERG 538 struct ieee80211com *ic = ifp->if_l2com; 539 #endif 540 541 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; npending=%d\n", 542 __func__, 543 npending); 544 545 ATH_PCU_LOCK(sc); 546 if (sc->sc_inreset_cnt > 0) { 547 device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n", 548 __func__); 549 ATH_PCU_UNLOCK(sc); 550 return; 551 } 552 sc->sc_rxproc_cnt++; 553 ATH_PCU_UNLOCK(sc); 554 555 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 1); 556 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 1); 557 558 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 1); 559 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 1); 560 561 /* XXX inside IF_LOCK ? */ 562 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 563 #ifdef IEEE80211_SUPPORT_SUPERG 564 ieee80211_ff_age_all(ic, 100); 565 #endif 566 if (! IFQ_IS_EMPTY(&ifp->if_snd)) 567 ath_tx_kick(sc); 568 } 569 if (ath_dfs_tasklet_needed(sc, sc->sc_curchan)) 570 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); 571 572 ATH_PCU_LOCK(sc); 573 sc->sc_rxproc_cnt--; 574 ATH_PCU_UNLOCK(sc); 575 } 576 577 /* 578 * Allocate an RX mbuf for the given ath_buf and initialise 579 * it for EDMA. 580 * 581 * + Allocate a 4KB mbuf; 582 * + Setup the DMA map for the given buffer; 583 * + Return that. 584 */ 585 static int 586 ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 587 { 588 589 struct mbuf *m; 590 int error; 591 int len; 592 593 ATH_RX_LOCK_ASSERT(sc); 594 595 m = m_getm(NULL, sc->sc_edma_bufsize, M_NOWAIT, MT_DATA); 596 if (! m) 597 return (ENOBUFS); /* XXX ?*/ 598 599 /* XXX warn/enforce alignment */ 600 601 len = m->m_ext.ext_size; 602 #if 0 603 device_printf(sc->sc_dev, "%s: called: m=%p, size=%d, mtod=%p\n", 604 __func__, 605 m, 606 len, 607 mtod(m, char *)); 608 #endif 609 610 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 611 612 /* 613 * Populate ath_buf fields. 614 */ 615 bf->bf_desc = mtod(m, struct ath_desc *); 616 bf->bf_lastds = bf->bf_desc; /* XXX only really for TX? */ 617 bf->bf_m = m; 618 619 /* 620 * Zero the descriptor and ensure it makes it out to the 621 * bounce buffer if one is required. 622 * 623 * XXX PREWRITE will copy the whole buffer; we only needed it 624 * to sync the first 32 DWORDS. Oh well. 625 */ 626 memset(bf->bf_desc, '\0', sc->sc_rx_statuslen); 627 628 /* 629 * Create DMA mapping. 630 */ 631 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 632 bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); 633 634 if (error != 0) { 635 device_printf(sc->sc_dev, "%s: failed; error=%d\n", 636 __func__, 637 error); 638 m_freem(m); 639 return (error); 640 } 641 642 /* 643 * Set daddr to the physical mapping page. 644 */ 645 bf->bf_daddr = bf->bf_segs[0].ds_addr; 646 647 /* 648 * Prepare for the upcoming read. 649 * 650 * We need to both sync some data into the buffer (the zero'ed 651 * descriptor payload) and also prepare for the read that's going 652 * to occur. 653 */ 654 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 655 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 656 657 /* Finish! */ 658 return (0); 659 } 660 661 /* 662 * Allocate a RX buffer. 663 */ 664 static struct ath_buf * 665 ath_edma_rxbuf_alloc(struct ath_softc *sc) 666 { 667 struct ath_buf *bf; 668 int error; 669 670 ATH_RX_LOCK_ASSERT(sc); 671 672 /* Allocate buffer */ 673 bf = TAILQ_FIRST(&sc->sc_rxbuf); 674 /* XXX shouldn't happen upon startup? */ 675 if (bf == NULL) { 676 device_printf(sc->sc_dev, "%s: nothing on rxbuf?!\n", 677 __func__); 678 return (NULL); 679 } 680 681 /* Remove it from the free list */ 682 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 683 684 /* Assign RX mbuf to it */ 685 error = ath_edma_rxbuf_init(sc, bf); 686 if (error != 0) { 687 device_printf(sc->sc_dev, 688 "%s: bf=%p, rxbuf alloc failed! error=%d\n", 689 __func__, 690 bf, 691 error); 692 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 693 return (NULL); 694 } 695 696 return (bf); 697 } 698 699 static void 700 ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf) 701 { 702 703 ATH_RX_LOCK_ASSERT(sc); 704 705 /* 706 * Only unload the frame if we haven't consumed 707 * the mbuf via ath_rx_pkt(). 708 */ 709 if (bf->bf_m) { 710 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 711 m_freem(bf->bf_m); 712 bf->bf_m = NULL; 713 } 714 715 /* XXX lock? */ 716 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 717 } 718 719 /* 720 * Allocate up to 'n' entries and push them onto the hardware FIFO. 721 * 722 * Return how many entries were successfully pushed onto the 723 * FIFO. 724 */ 725 static int 726 ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs) 727 { 728 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 729 struct ath_buf *bf; 730 int i; 731 732 ATH_RX_LOCK_ASSERT(sc); 733 734 /* 735 * Allocate buffers until the FIFO is full or nbufs is reached. 736 */ 737 for (i = 0; i < nbufs && re->m_fifo_depth < re->m_fifolen; i++) { 738 /* Ensure the FIFO is already blank, complain loudly! */ 739 if (re->m_fifo[re->m_fifo_tail] != NULL) { 740 device_printf(sc->sc_dev, 741 "%s: Q%d: fifo[%d] != NULL (%p)\n", 742 __func__, 743 qtype, 744 re->m_fifo_tail, 745 re->m_fifo[re->m_fifo_tail]); 746 747 /* Free the slot */ 748 ath_edma_rxbuf_free(sc, re->m_fifo[re->m_fifo_tail]); 749 re->m_fifo_depth--; 750 /* XXX check it's not < 0 */ 751 re->m_fifo[re->m_fifo_tail] = NULL; 752 } 753 754 bf = ath_edma_rxbuf_alloc(sc); 755 /* XXX should ensure the FIFO is not NULL? */ 756 if (bf == NULL) { 757 device_printf(sc->sc_dev, 758 "%s: Q%d: alloc failed: i=%d, nbufs=%d?\n", 759 __func__, 760 qtype, 761 i, 762 nbufs); 763 break; 764 } 765 766 re->m_fifo[re->m_fifo_tail] = bf; 767 768 /* Write to the RX FIFO */ 769 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 770 "%s: Q%d: putrxbuf=%p (0x%jx)\n", 771 __func__, 772 qtype, 773 bf->bf_desc, 774 (uintmax_t) bf->bf_daddr); 775 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 776 777 re->m_fifo_depth++; 778 INCR(re->m_fifo_tail, re->m_fifolen); 779 } 780 781 /* 782 * Return how many were allocated. 783 */ 784 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: nbufs=%d, nalloced=%d\n", 785 __func__, 786 qtype, 787 nbufs, 788 i); 789 return (i); 790 } 791 792 static int 793 ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype) 794 { 795 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 796 int i; 797 798 ATH_RX_LOCK_ASSERT(sc); 799 800 for (i = 0; i < re->m_fifolen; i++) { 801 if (re->m_fifo[i] != NULL) { 802 #ifdef ATH_DEBUG 803 struct ath_buf *bf = re->m_fifo[i]; 804 805 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 806 ath_printrxbuf(sc, bf, 0, HAL_OK); 807 #endif 808 ath_edma_rxbuf_free(sc, re->m_fifo[i]); 809 re->m_fifo[i] = NULL; 810 re->m_fifo_depth--; 811 } 812 } 813 814 if (re->m_rxpending != NULL) { 815 m_freem(re->m_rxpending); 816 re->m_rxpending = NULL; 817 } 818 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 819 820 return (0); 821 } 822 823 /* 824 * Setup the initial RX FIFO structure. 825 */ 826 static int 827 ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 828 { 829 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 830 831 ATH_RX_LOCK_ASSERT(sc); 832 833 if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) { 834 device_printf(sc->sc_dev, "%s: qtype=%d, failed\n", 835 __func__, 836 qtype); 837 return (-EINVAL); 838 } 839 device_printf(sc->sc_dev, "%s: type=%d, FIFO depth = %d entries\n", 840 __func__, 841 qtype, 842 re->m_fifolen); 843 844 /* Allocate ath_buf FIFO array, pre-zero'ed */ 845 re->m_fifo = malloc(sizeof(struct ath_buf *) * re->m_fifolen, 846 M_ATHDEV, 847 M_NOWAIT | M_ZERO); 848 if (re->m_fifo == NULL) { 849 device_printf(sc->sc_dev, "%s: malloc failed\n", 850 __func__); 851 return (-ENOMEM); 852 } 853 854 /* 855 * Set initial "empty" state. 856 */ 857 re->m_rxpending = NULL; 858 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 859 860 return (0); 861 } 862 863 static int 864 ath_edma_rxfifo_free(struct ath_softc *sc, HAL_RX_QUEUE qtype) 865 { 866 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 867 868 device_printf(sc->sc_dev, "%s: called; qtype=%d\n", 869 __func__, 870 qtype); 871 872 free(re->m_fifo, M_ATHDEV); 873 874 return (0); 875 } 876 877 static int 878 ath_edma_dma_rxsetup(struct ath_softc *sc) 879 { 880 int error; 881 882 /* 883 * Create RX DMA tag and buffers. 884 */ 885 error = ath_descdma_setup_rx_edma(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 886 "rx", ath_rxbuf, sc->sc_rx_statuslen); 887 if (error != 0) 888 return error; 889 890 ATH_RX_LOCK(sc); 891 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP); 892 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP); 893 ATH_RX_UNLOCK(sc); 894 895 return (0); 896 } 897 898 static int 899 ath_edma_dma_rxteardown(struct ath_softc *sc) 900 { 901 902 ATH_RX_LOCK(sc); 903 ath_edma_flush_deferred_queue(sc); 904 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP); 905 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP); 906 907 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP); 908 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP); 909 ATH_RX_UNLOCK(sc); 910 911 /* Free RX ath_buf */ 912 /* Free RX DMA tag */ 913 if (sc->sc_rxdma.dd_desc_len != 0) 914 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 915 916 return (0); 917 } 918 919 void 920 ath_recv_setup_edma(struct ath_softc *sc) 921 { 922 923 /* Set buffer size to 4k */ 924 sc->sc_edma_bufsize = 4096; 925 926 /* Fetch EDMA field and buffer sizes */ 927 (void) ath_hal_getrxstatuslen(sc->sc_ah, &sc->sc_rx_statuslen); 928 929 /* Configure the hardware with the RX buffer size */ 930 (void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize - 931 sc->sc_rx_statuslen); 932 933 device_printf(sc->sc_dev, "RX status length: %d\n", 934 sc->sc_rx_statuslen); 935 device_printf(sc->sc_dev, "RX buffer size: %d\n", 936 sc->sc_edma_bufsize); 937 938 sc->sc_rx.recv_stop = ath_edma_stoprecv; 939 sc->sc_rx.recv_start = ath_edma_startrecv; 940 sc->sc_rx.recv_flush = ath_edma_recv_flush; 941 sc->sc_rx.recv_tasklet = ath_edma_recv_tasklet; 942 sc->sc_rx.recv_rxbuf_init = ath_edma_rxbuf_init; 943 944 sc->sc_rx.recv_setup = ath_edma_dma_rxsetup; 945 sc->sc_rx.recv_teardown = ath_edma_dma_rxteardown; 946 947 sc->sc_rx.recv_sched = ath_edma_recv_sched; 948 sc->sc_rx.recv_sched_queue = ath_edma_recv_sched_queue; 949 } 950