1 /*- 2 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_ath.h" 42 /* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49 #include "opt_ah.h" 50 #include "opt_wlan.h" 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/sysctl.h> 55 #include <sys/mbuf.h> 56 #include <sys/malloc.h> 57 #include <sys/lock.h> 58 #include <sys/mutex.h> 59 #include <sys/kernel.h> 60 #include <sys/socket.h> 61 #include <sys/sockio.h> 62 #include <sys/errno.h> 63 #include <sys/callout.h> 64 #include <sys/bus.h> 65 #include <sys/endian.h> 66 #include <sys/kthread.h> 67 #include <sys/taskqueue.h> 68 #include <sys/priv.h> 69 #include <sys/module.h> 70 #include <sys/ktr.h> 71 #include <sys/smp.h> /* for mp_ncpus */ 72 73 #include <machine/bus.h> 74 75 #include <net/if.h> 76 #include <net/if_dl.h> 77 #include <net/if_media.h> 78 #include <net/if_types.h> 79 #include <net/if_arp.h> 80 #include <net/ethernet.h> 81 #include <net/if_llc.h> 82 83 #include <net80211/ieee80211_var.h> 84 #include <net80211/ieee80211_regdomain.h> 85 #ifdef IEEE80211_SUPPORT_SUPERG 86 #include <net80211/ieee80211_superg.h> 87 #endif 88 #ifdef IEEE80211_SUPPORT_TDMA 89 #include <net80211/ieee80211_tdma.h> 90 #endif 91 92 #include <net/bpf.h> 93 94 #ifdef INET 95 #include <netinet/in.h> 96 #include <netinet/if_ether.h> 97 #endif 98 99 #include <dev/ath/if_athvar.h> 100 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 101 #include <dev/ath/ath_hal/ah_diagcodes.h> 102 103 #include <dev/ath/if_ath_debug.h> 104 #include <dev/ath/if_ath_misc.h> 105 #include <dev/ath/if_ath_tsf.h> 106 #include <dev/ath/if_ath_tx.h> 107 #include <dev/ath/if_ath_sysctl.h> 108 #include <dev/ath/if_ath_led.h> 109 #include <dev/ath/if_ath_keycache.h> 110 #include <dev/ath/if_ath_rx.h> 111 #include <dev/ath/if_ath_beacon.h> 112 #include <dev/ath/if_athdfs.h> 113 114 #ifdef ATH_TX99_DIAG 115 #include <dev/ath/ath_tx99/ath_tx99.h> 116 #endif 117 118 #include <dev/ath/if_ath_rx_edma.h> 119 120 #ifdef ATH_DEBUG_ALQ 121 #include <dev/ath/if_ath_alq.h> 122 #endif 123 124 /* 125 * some general macros 126 */ 127 #define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1) 128 #define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1) 129 130 MALLOC_DECLARE(M_ATHDEV); 131 132 /* 133 * XXX TODO: 134 * 135 * + Make sure the FIFO is correctly flushed and reinitialised 136 * through a reset; 137 * + Verify multi-descriptor frames work! 138 * + There's a "memory use after free" which needs to be tracked down 139 * and fixed ASAP. I've seen this in the legacy path too, so it 140 * may be a generic RX path issue. 141 */ 142 143 /* 144 * XXX shuffle the function orders so these pre-declarations aren't 145 * required! 146 */ 147 static int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, 148 int nbufs); 149 static int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype); 150 static void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf); 151 static void ath_edma_recv_proc_queue(struct ath_softc *sc, 152 HAL_RX_QUEUE qtype, int dosched); 153 static int ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, 154 HAL_RX_QUEUE qtype, int dosched); 155 156 static void 157 ath_edma_stoprecv(struct ath_softc *sc, int dodelay) 158 { 159 struct ath_hal *ah = sc->sc_ah; 160 161 ATH_RX_LOCK(sc); 162 ath_hal_stoppcurecv(ah); 163 ath_hal_setrxfilter(ah, 0); 164 ath_hal_stopdmarecv(ah); 165 166 DELAY(3000); 167 168 /* Flush RX pending for each queue */ 169 /* XXX should generic-ify this */ 170 if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending) { 171 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending); 172 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL; 173 } 174 175 if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending) { 176 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending); 177 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL; 178 } 179 ATH_RX_UNLOCK(sc); 180 } 181 182 /* 183 * Re-initialise the FIFO given the current buffer contents. 184 * Specifically, walk from head -> tail, pushing the FIFO contents 185 * back into the FIFO. 186 */ 187 static void 188 ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 189 { 190 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 191 struct ath_buf *bf; 192 int i, j; 193 194 ATH_RX_LOCK_ASSERT(sc); 195 196 i = re->m_fifo_head; 197 for (j = 0; j < re->m_fifo_depth; j++) { 198 bf = re->m_fifo[i]; 199 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 200 "%s: Q%d: pos=%i, addr=0x%jx\n", 201 __func__, 202 qtype, 203 i, 204 (uintmax_t)bf->bf_daddr); 205 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 206 INCR(i, re->m_fifolen); 207 } 208 209 /* Ensure this worked out right */ 210 if (i != re->m_fifo_tail) { 211 device_printf(sc->sc_dev, "%s: i (%d) != tail! (%d)\n", 212 __func__, 213 i, 214 re->m_fifo_tail); 215 } 216 } 217 218 /* 219 * Start receive. 220 * 221 * XXX TODO: this needs to reallocate the FIFO entries when a reset 222 * occurs, in case the FIFO is filled up and no new descriptors get 223 * thrown into the FIFO. 224 */ 225 static int 226 ath_edma_startrecv(struct ath_softc *sc) 227 { 228 struct ath_hal *ah = sc->sc_ah; 229 230 ATH_RX_LOCK(sc); 231 232 /* Enable RX FIFO */ 233 ath_hal_rxena(ah); 234 235 /* 236 * Entries should only be written out if the 237 * FIFO is empty. 238 * 239 * XXX This isn't correct. I should be looking 240 * at the value of AR_RXDP_SIZE (0x0070) to determine 241 * how many entries are in here. 242 * 243 * A warm reset will clear the registers but not the FIFO. 244 * 245 * And I believe this is actually the address of the last 246 * handled buffer rather than the current FIFO pointer. 247 * So if no frames have been (yet) seen, we'll reinit the 248 * FIFO. 249 * 250 * I'll chase that up at some point. 251 */ 252 if (ath_hal_getrxbuf(sc->sc_ah, HAL_RX_QUEUE_HP) == 0) { 253 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 254 "%s: Re-initing HP FIFO\n", __func__); 255 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_HP); 256 } 257 if (ath_hal_getrxbuf(sc->sc_ah, HAL_RX_QUEUE_LP) == 0) { 258 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 259 "%s: Re-initing LP FIFO\n", __func__); 260 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_LP); 261 } 262 263 /* Add up to m_fifolen entries in each queue */ 264 /* 265 * These must occur after the above write so the FIFO buffers 266 * are pushed/tracked in the same order as the hardware will 267 * process them. 268 */ 269 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_HP, 270 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_fifolen); 271 272 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_LP, 273 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_fifolen); 274 275 ath_mode_init(sc); 276 ath_hal_startpcurecv(ah); 277 278 ATH_RX_UNLOCK(sc); 279 280 return (0); 281 } 282 283 static void 284 ath_edma_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 285 int dosched) 286 { 287 288 ath_edma_recv_proc_queue(sc, qtype, dosched); 289 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 290 } 291 292 static void 293 ath_edma_recv_sched(struct ath_softc *sc, int dosched) 294 { 295 296 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, dosched); 297 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, dosched); 298 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 299 } 300 301 static void 302 ath_edma_recv_flush(struct ath_softc *sc) 303 { 304 305 DPRINTF(sc, ATH_DEBUG_RECV, "%s: called\n", __func__); 306 307 ATH_PCU_LOCK(sc); 308 sc->sc_rxproc_cnt++; 309 ATH_PCU_UNLOCK(sc); 310 311 /* 312 * Flush any active frames from FIFO -> deferred list 313 */ 314 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 0); 315 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 0); 316 317 /* 318 * Process what's in the deferred queue 319 */ 320 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 0); 321 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 0); 322 323 ATH_PCU_LOCK(sc); 324 sc->sc_rxproc_cnt--; 325 ATH_PCU_UNLOCK(sc); 326 } 327 328 /* 329 * Process frames from the current queue into the deferred queue. 330 */ 331 static void 332 ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 333 int dosched) 334 { 335 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 336 struct ath_rx_status *rs; 337 struct ath_desc *ds; 338 struct ath_buf *bf; 339 struct mbuf *m; 340 struct ath_hal *ah = sc->sc_ah; 341 uint64_t tsf; 342 uint16_t nf; 343 int npkts = 0; 344 345 tsf = ath_hal_gettsf64(ah); 346 nf = ath_hal_getchannoise(ah, sc->sc_curchan); 347 sc->sc_stats.ast_rx_noise = nf; 348 349 ATH_RX_LOCK(sc); 350 351 do { 352 bf = re->m_fifo[re->m_fifo_head]; 353 /* This shouldn't occur! */ 354 if (bf == NULL) { 355 device_printf(sc->sc_dev, "%s: Q%d: NULL bf?\n", 356 __func__, 357 qtype); 358 break; 359 } 360 m = bf->bf_m; 361 ds = bf->bf_desc; 362 363 /* 364 * Sync descriptor memory - this also syncs the buffer for us. 365 * EDMA descriptors are in cached memory. 366 */ 367 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 368 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 369 rs = &bf->bf_status.ds_rxstat; 370 bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, 371 NULL, rs); 372 #ifdef ATH_DEBUG 373 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 374 ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK); 375 #endif /* ATH_DEBUG */ 376 #ifdef ATH_DEBUG_ALQ 377 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS)) 378 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS, 379 sc->sc_rx_statuslen, (char *) ds); 380 #endif /* ATH_DEBUG */ 381 if (bf->bf_rxstatus == HAL_EINPROGRESS) 382 break; 383 384 /* 385 * Completed descriptor. 386 */ 387 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 388 "%s: Q%d: completed!\n", __func__, qtype); 389 npkts++; 390 391 /* 392 * We've been synced already, so unmap. 393 */ 394 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 395 396 /* 397 * Remove the FIFO entry and place it on the completion 398 * queue. 399 */ 400 re->m_fifo[re->m_fifo_head] = NULL; 401 TAILQ_INSERT_TAIL(&sc->sc_rx_rxlist[qtype], bf, bf_list); 402 403 /* Bump the descriptor FIFO stats */ 404 INCR(re->m_fifo_head, re->m_fifolen); 405 re->m_fifo_depth--; 406 /* XXX check it doesn't fall below 0 */ 407 } while (re->m_fifo_depth > 0); 408 409 /* Append some more fresh frames to the FIFO */ 410 if (dosched) 411 ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen); 412 413 ATH_RX_UNLOCK(sc); 414 415 /* rx signal state monitoring */ 416 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); 417 418 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 419 "ath edma rx proc: npkts=%d\n", 420 npkts); 421 422 /* Handle resched and kickpcu appropriately */ 423 ATH_PCU_LOCK(sc); 424 if (dosched && sc->sc_kickpcu) { 425 ATH_KTR(sc, ATH_KTR_ERROR, 0, 426 "ath_edma_recv_proc_queue(): kickpcu"); 427 if (npkts > 0) 428 device_printf(sc->sc_dev, 429 "%s: handled npkts %d\n", 430 __func__, npkts); 431 432 /* 433 * XXX TODO: what should occur here? Just re-poke and 434 * re-enable the RX FIFO? 435 */ 436 sc->sc_kickpcu = 0; 437 } 438 ATH_PCU_UNLOCK(sc); 439 440 return; 441 } 442 443 /* 444 * Flush the deferred queue. 445 * 446 * This destructively flushes the deferred queue - it doesn't 447 * call the wireless stack on each mbuf. 448 */ 449 static void 450 ath_edma_flush_deferred_queue(struct ath_softc *sc) 451 { 452 struct ath_buf *bf, *next; 453 454 ATH_RX_LOCK_ASSERT(sc); 455 456 /* Free in one set, inside the lock */ 457 TAILQ_FOREACH_SAFE(bf, 458 &sc->sc_rx_rxlist[HAL_RX_QUEUE_LP], bf_list, next) { 459 /* Free the buffer/mbuf */ 460 ath_edma_rxbuf_free(sc, bf); 461 } 462 TAILQ_FOREACH_SAFE(bf, 463 &sc->sc_rx_rxlist[HAL_RX_QUEUE_HP], bf_list, next) { 464 /* Free the buffer/mbuf */ 465 ath_edma_rxbuf_free(sc, bf); 466 } 467 } 468 469 static int 470 ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 471 int dosched) 472 { 473 int ngood = 0; 474 uint64_t tsf; 475 struct ath_buf *bf, *next; 476 struct ath_rx_status *rs; 477 int16_t nf; 478 ath_bufhead rxlist; 479 struct mbuf *m; 480 481 TAILQ_INIT(&rxlist); 482 483 nf = ath_hal_getchannoise(sc->sc_ah, sc->sc_curchan); 484 /* 485 * XXX TODO: the NF/TSF should be stamped on the bufs themselves, 486 * otherwise we may end up adding in the wrong values if this 487 * is delayed too far.. 488 */ 489 tsf = ath_hal_gettsf64(sc->sc_ah); 490 491 /* Copy the list over */ 492 ATH_RX_LOCK(sc); 493 TAILQ_CONCAT(&rxlist, &sc->sc_rx_rxlist[qtype], bf_list); 494 ATH_RX_UNLOCK(sc); 495 496 /* Handle the completed descriptors */ 497 TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) { 498 /* 499 * Skip the RX descriptor status - start at the data offset 500 */ 501 m_adj(bf->bf_m, sc->sc_rx_statuslen); 502 503 /* Handle the frame */ 504 505 rs = &bf->bf_status.ds_rxstat; 506 m = bf->bf_m; 507 bf->bf_m = NULL; 508 if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf, m)) 509 ngood++; 510 } 511 512 if (ngood) { 513 sc->sc_lastrx = tsf; 514 } 515 516 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 517 "ath edma rx deferred proc: ngood=%d\n", 518 ngood); 519 520 /* Free in one set, inside the lock */ 521 ATH_RX_LOCK(sc); 522 TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) { 523 /* Free the buffer/mbuf */ 524 ath_edma_rxbuf_free(sc, bf); 525 } 526 ATH_RX_UNLOCK(sc); 527 528 return (ngood); 529 } 530 531 static void 532 ath_edma_recv_tasklet(void *arg, int npending) 533 { 534 struct ath_softc *sc = (struct ath_softc *) arg; 535 struct ifnet *ifp = sc->sc_ifp; 536 #ifdef IEEE80211_SUPPORT_SUPERG 537 struct ieee80211com *ic = ifp->if_l2com; 538 #endif 539 540 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; npending=%d\n", 541 __func__, 542 npending); 543 544 ATH_PCU_LOCK(sc); 545 if (sc->sc_inreset_cnt > 0) { 546 device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n", 547 __func__); 548 ATH_PCU_UNLOCK(sc); 549 return; 550 } 551 sc->sc_rxproc_cnt++; 552 ATH_PCU_UNLOCK(sc); 553 554 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 1); 555 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 1); 556 557 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 1); 558 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 1); 559 560 /* XXX inside IF_LOCK ? */ 561 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 562 #ifdef IEEE80211_SUPPORT_SUPERG 563 ieee80211_ff_age_all(ic, 100); 564 #endif 565 if (! IFQ_IS_EMPTY(&ifp->if_snd)) 566 ath_tx_kick(sc); 567 } 568 if (ath_dfs_tasklet_needed(sc, sc->sc_curchan)) 569 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); 570 571 ATH_PCU_LOCK(sc); 572 sc->sc_rxproc_cnt--; 573 ATH_PCU_UNLOCK(sc); 574 } 575 576 /* 577 * Allocate an RX mbuf for the given ath_buf and initialise 578 * it for EDMA. 579 * 580 * + Allocate a 4KB mbuf; 581 * + Setup the DMA map for the given buffer; 582 * + Return that. 583 */ 584 static int 585 ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 586 { 587 588 struct mbuf *m; 589 int error; 590 int len; 591 592 ATH_RX_LOCK_ASSERT(sc); 593 594 m = m_getm(NULL, sc->sc_edma_bufsize, M_NOWAIT, MT_DATA); 595 if (! m) 596 return (ENOBUFS); /* XXX ?*/ 597 598 /* XXX warn/enforce alignment */ 599 600 len = m->m_ext.ext_size; 601 #if 0 602 device_printf(sc->sc_dev, "%s: called: m=%p, size=%d, mtod=%p\n", 603 __func__, 604 m, 605 len, 606 mtod(m, char *)); 607 #endif 608 609 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 610 611 /* 612 * Populate ath_buf fields. 613 */ 614 bf->bf_desc = mtod(m, struct ath_desc *); 615 bf->bf_lastds = bf->bf_desc; /* XXX only really for TX? */ 616 bf->bf_m = m; 617 618 /* 619 * Zero the descriptor and ensure it makes it out to the 620 * bounce buffer if one is required. 621 * 622 * XXX PREWRITE will copy the whole buffer; we only needed it 623 * to sync the first 32 DWORDS. Oh well. 624 */ 625 memset(bf->bf_desc, '\0', sc->sc_rx_statuslen); 626 627 /* 628 * Create DMA mapping. 629 */ 630 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 631 bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); 632 633 if (error != 0) { 634 device_printf(sc->sc_dev, "%s: failed; error=%d\n", 635 __func__, 636 error); 637 m_freem(m); 638 return (error); 639 } 640 641 /* 642 * Set daddr to the physical mapping page. 643 */ 644 bf->bf_daddr = bf->bf_segs[0].ds_addr; 645 646 /* 647 * Prepare for the upcoming read. 648 * 649 * We need to both sync some data into the buffer (the zero'ed 650 * descriptor payload) and also prepare for the read that's going 651 * to occur. 652 */ 653 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 654 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 655 656 /* Finish! */ 657 return (0); 658 } 659 660 /* 661 * Allocate a RX buffer. 662 */ 663 static struct ath_buf * 664 ath_edma_rxbuf_alloc(struct ath_softc *sc) 665 { 666 struct ath_buf *bf; 667 int error; 668 669 ATH_RX_LOCK_ASSERT(sc); 670 671 /* Allocate buffer */ 672 bf = TAILQ_FIRST(&sc->sc_rxbuf); 673 /* XXX shouldn't happen upon startup? */ 674 if (bf == NULL) { 675 device_printf(sc->sc_dev, "%s: nothing on rxbuf?!\n", 676 __func__); 677 return (NULL); 678 } 679 680 /* Remove it from the free list */ 681 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 682 683 /* Assign RX mbuf to it */ 684 error = ath_edma_rxbuf_init(sc, bf); 685 if (error != 0) { 686 device_printf(sc->sc_dev, 687 "%s: bf=%p, rxbuf alloc failed! error=%d\n", 688 __func__, 689 bf, 690 error); 691 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 692 return (NULL); 693 } 694 695 return (bf); 696 } 697 698 static void 699 ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf) 700 { 701 702 ATH_RX_LOCK_ASSERT(sc); 703 704 /* 705 * Only unload the frame if we haven't consumed 706 * the mbuf via ath_rx_pkt(). 707 */ 708 if (bf->bf_m) { 709 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 710 m_freem(bf->bf_m); 711 bf->bf_m = NULL; 712 } 713 714 /* XXX lock? */ 715 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 716 } 717 718 /* 719 * Allocate up to 'n' entries and push them onto the hardware FIFO. 720 * 721 * Return how many entries were successfully pushed onto the 722 * FIFO. 723 */ 724 static int 725 ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs) 726 { 727 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 728 struct ath_buf *bf; 729 int i; 730 731 ATH_RX_LOCK_ASSERT(sc); 732 733 /* 734 * Allocate buffers until the FIFO is full or nbufs is reached. 735 */ 736 for (i = 0; i < nbufs && re->m_fifo_depth < re->m_fifolen; i++) { 737 /* Ensure the FIFO is already blank, complain loudly! */ 738 if (re->m_fifo[re->m_fifo_tail] != NULL) { 739 device_printf(sc->sc_dev, 740 "%s: Q%d: fifo[%d] != NULL (%p)\n", 741 __func__, 742 qtype, 743 re->m_fifo_tail, 744 re->m_fifo[re->m_fifo_tail]); 745 746 /* Free the slot */ 747 ath_edma_rxbuf_free(sc, re->m_fifo[re->m_fifo_tail]); 748 re->m_fifo_depth--; 749 /* XXX check it's not < 0 */ 750 re->m_fifo[re->m_fifo_tail] = NULL; 751 } 752 753 bf = ath_edma_rxbuf_alloc(sc); 754 /* XXX should ensure the FIFO is not NULL? */ 755 if (bf == NULL) { 756 device_printf(sc->sc_dev, 757 "%s: Q%d: alloc failed: i=%d, nbufs=%d?\n", 758 __func__, 759 qtype, 760 i, 761 nbufs); 762 break; 763 } 764 765 re->m_fifo[re->m_fifo_tail] = bf; 766 767 /* Write to the RX FIFO */ 768 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 769 "%s: Q%d: putrxbuf=%p (0x%jx)\n", 770 __func__, 771 qtype, 772 bf->bf_desc, 773 (uintmax_t) bf->bf_daddr); 774 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 775 776 re->m_fifo_depth++; 777 INCR(re->m_fifo_tail, re->m_fifolen); 778 } 779 780 /* 781 * Return how many were allocated. 782 */ 783 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: nbufs=%d, nalloced=%d\n", 784 __func__, 785 qtype, 786 nbufs, 787 i); 788 return (i); 789 } 790 791 static int 792 ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype) 793 { 794 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 795 int i; 796 797 ATH_RX_LOCK_ASSERT(sc); 798 799 for (i = 0; i < re->m_fifolen; i++) { 800 if (re->m_fifo[i] != NULL) { 801 #ifdef ATH_DEBUG 802 struct ath_buf *bf = re->m_fifo[i]; 803 804 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 805 ath_printrxbuf(sc, bf, 0, HAL_OK); 806 #endif 807 ath_edma_rxbuf_free(sc, re->m_fifo[i]); 808 re->m_fifo[i] = NULL; 809 re->m_fifo_depth--; 810 } 811 } 812 813 if (re->m_rxpending != NULL) { 814 m_freem(re->m_rxpending); 815 re->m_rxpending = NULL; 816 } 817 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 818 819 return (0); 820 } 821 822 /* 823 * Setup the initial RX FIFO structure. 824 */ 825 static int 826 ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 827 { 828 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 829 830 ATH_RX_LOCK_ASSERT(sc); 831 832 if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) { 833 device_printf(sc->sc_dev, "%s: qtype=%d, failed\n", 834 __func__, 835 qtype); 836 return (-EINVAL); 837 } 838 device_printf(sc->sc_dev, "%s: type=%d, FIFO depth = %d entries\n", 839 __func__, 840 qtype, 841 re->m_fifolen); 842 843 /* Allocate ath_buf FIFO array, pre-zero'ed */ 844 re->m_fifo = malloc(sizeof(struct ath_buf *) * re->m_fifolen, 845 M_ATHDEV, 846 M_NOWAIT | M_ZERO); 847 if (re->m_fifo == NULL) { 848 device_printf(sc->sc_dev, "%s: malloc failed\n", 849 __func__); 850 return (-ENOMEM); 851 } 852 853 /* 854 * Set initial "empty" state. 855 */ 856 re->m_rxpending = NULL; 857 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 858 859 return (0); 860 } 861 862 static int 863 ath_edma_rxfifo_free(struct ath_softc *sc, HAL_RX_QUEUE qtype) 864 { 865 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 866 867 device_printf(sc->sc_dev, "%s: called; qtype=%d\n", 868 __func__, 869 qtype); 870 871 free(re->m_fifo, M_ATHDEV); 872 873 return (0); 874 } 875 876 static int 877 ath_edma_dma_rxsetup(struct ath_softc *sc) 878 { 879 int error; 880 881 /* 882 * Create RX DMA tag and buffers. 883 */ 884 error = ath_descdma_setup_rx_edma(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 885 "rx", ath_rxbuf, sc->sc_rx_statuslen); 886 if (error != 0) 887 return error; 888 889 ATH_RX_LOCK(sc); 890 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP); 891 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP); 892 ATH_RX_UNLOCK(sc); 893 894 return (0); 895 } 896 897 static int 898 ath_edma_dma_rxteardown(struct ath_softc *sc) 899 { 900 901 ATH_RX_LOCK(sc); 902 ath_edma_flush_deferred_queue(sc); 903 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP); 904 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP); 905 906 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP); 907 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP); 908 ATH_RX_UNLOCK(sc); 909 910 /* Free RX ath_buf */ 911 /* Free RX DMA tag */ 912 if (sc->sc_rxdma.dd_desc_len != 0) 913 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 914 915 return (0); 916 } 917 918 void 919 ath_recv_setup_edma(struct ath_softc *sc) 920 { 921 922 /* Set buffer size to 4k */ 923 sc->sc_edma_bufsize = 4096; 924 925 /* Fetch EDMA field and buffer sizes */ 926 (void) ath_hal_getrxstatuslen(sc->sc_ah, &sc->sc_rx_statuslen); 927 928 /* Configure the hardware with the RX buffer size */ 929 (void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize - 930 sc->sc_rx_statuslen); 931 932 device_printf(sc->sc_dev, "RX status length: %d\n", 933 sc->sc_rx_statuslen); 934 device_printf(sc->sc_dev, "RX buffer size: %d\n", 935 sc->sc_edma_bufsize); 936 937 sc->sc_rx.recv_stop = ath_edma_stoprecv; 938 sc->sc_rx.recv_start = ath_edma_startrecv; 939 sc->sc_rx.recv_flush = ath_edma_recv_flush; 940 sc->sc_rx.recv_tasklet = ath_edma_recv_tasklet; 941 sc->sc_rx.recv_rxbuf_init = ath_edma_rxbuf_init; 942 943 sc->sc_rx.recv_setup = ath_edma_dma_rxsetup; 944 sc->sc_rx.recv_teardown = ath_edma_dma_rxteardown; 945 946 sc->sc_rx.recv_sched = ath_edma_recv_sched; 947 sc->sc_rx.recv_sched_queue = ath_edma_recv_sched_queue; 948 } 949