1 /*- 2 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_ath.h" 42 /* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49 #include "opt_ah.h" 50 #include "opt_wlan.h" 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/sysctl.h> 55 #include <sys/mbuf.h> 56 #include <sys/malloc.h> 57 #include <sys/lock.h> 58 #include <sys/mutex.h> 59 #include <sys/kernel.h> 60 #include <sys/socket.h> 61 #include <sys/sockio.h> 62 #include <sys/errno.h> 63 #include <sys/callout.h> 64 #include <sys/bus.h> 65 #include <sys/endian.h> 66 #include <sys/kthread.h> 67 #include <sys/taskqueue.h> 68 #include <sys/priv.h> 69 #include <sys/module.h> 70 #include <sys/ktr.h> 71 #include <sys/smp.h> /* for mp_ncpus */ 72 73 #include <machine/bus.h> 74 75 #include <net/if.h> 76 #include <net/if_dl.h> 77 #include <net/if_media.h> 78 #include <net/if_types.h> 79 #include <net/if_arp.h> 80 #include <net/ethernet.h> 81 #include <net/if_llc.h> 82 83 #include <net80211/ieee80211_var.h> 84 #include <net80211/ieee80211_regdomain.h> 85 #ifdef IEEE80211_SUPPORT_SUPERG 86 #include <net80211/ieee80211_superg.h> 87 #endif 88 #ifdef IEEE80211_SUPPORT_TDMA 89 #include <net80211/ieee80211_tdma.h> 90 #endif 91 92 #include <net/bpf.h> 93 94 #ifdef INET 95 #include <netinet/in.h> 96 #include <netinet/if_ether.h> 97 #endif 98 99 #include <dev/ath/if_athvar.h> 100 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 101 #include <dev/ath/ath_hal/ah_diagcodes.h> 102 103 #include <dev/ath/if_ath_debug.h> 104 #include <dev/ath/if_ath_misc.h> 105 #include <dev/ath/if_ath_tsf.h> 106 #include <dev/ath/if_ath_tx.h> 107 #include <dev/ath/if_ath_sysctl.h> 108 #include <dev/ath/if_ath_led.h> 109 #include <dev/ath/if_ath_keycache.h> 110 #include <dev/ath/if_ath_rx.h> 111 #include <dev/ath/if_ath_beacon.h> 112 #include <dev/ath/if_athdfs.h> 113 114 #ifdef ATH_TX99_DIAG 115 #include <dev/ath/ath_tx99/ath_tx99.h> 116 #endif 117 118 #include <dev/ath/if_ath_rx_edma.h> 119 120 /* 121 * some general macros 122 */ 123 #define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1) 124 #define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1) 125 126 MALLOC_DECLARE(M_ATHDEV); 127 128 /* 129 * XXX TODO: 130 * 131 * + Add an RX lock, just to ensure we don't have things clash; 132 * + Make sure the FIFO is correctly flushed and reinitialised 133 * through a reset; 134 * + Handle the "kickpcu" state where the FIFO overflows. 135 * + Implement a "flush" routine, which doesn't push any 136 * new frames into the FIFO. 137 * + Verify multi-descriptor frames work! 138 * + There's a "memory use after free" which needs to be tracked down 139 * and fixed ASAP. I've seen this in the legacy path too, so it 140 * may be a generic RX path issue. 141 */ 142 143 /* 144 * XXX shuffle the function orders so these pre-declarations aren't 145 * required! 146 */ 147 static int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, 148 int nbufs); 149 static int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype); 150 static void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf); 151 static int ath_edma_recv_proc_queue(struct ath_softc *sc, 152 HAL_RX_QUEUE qtype, int dosched); 153 154 static void 155 ath_edma_stoprecv(struct ath_softc *sc, int dodelay) 156 { 157 struct ath_hal *ah = sc->sc_ah; 158 159 ATH_RX_LOCK(sc); 160 ath_hal_stoppcurecv(ah); 161 ath_hal_setrxfilter(ah, 0); 162 ath_hal_stopdmarecv(ah); 163 164 DELAY(3000); 165 166 /* Flush RX pending for each queue */ 167 /* XXX should generic-ify this */ 168 if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending) { 169 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending); 170 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL; 171 } 172 173 if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending) { 174 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending); 175 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL; 176 } 177 ATH_RX_UNLOCK(sc); 178 } 179 180 /* 181 * Re-initialise the FIFO given the current buffer contents. 182 * Specifically, walk from head -> tail, pushing the FIFO contents 183 * back into the FIFO. 184 */ 185 static void 186 ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 187 { 188 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 189 struct ath_buf *bf; 190 int i, j; 191 192 ATH_RX_LOCK_ASSERT(sc); 193 194 i = re->m_fifo_head; 195 for (j = 0; j < re->m_fifo_depth; j++) { 196 bf = re->m_fifo[i]; 197 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 198 "%s: Q%d: pos=%i, addr=0x%jx\n", 199 __func__, 200 qtype, 201 i, 202 (uintmax_t)bf->bf_daddr); 203 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 204 INCR(i, re->m_fifolen); 205 } 206 207 /* Ensure this worked out right */ 208 if (i != re->m_fifo_tail) { 209 device_printf(sc->sc_dev, "%s: i (%d) != tail! (%d)\n", 210 __func__, 211 i, 212 re->m_fifo_tail); 213 } 214 } 215 216 /* 217 * Start receive. 218 * 219 * XXX TODO: this needs to reallocate the FIFO entries when a reset 220 * occurs, in case the FIFO is filled up and no new descriptors get 221 * thrown into the FIFO. 222 */ 223 static int 224 ath_edma_startrecv(struct ath_softc *sc) 225 { 226 struct ath_hal *ah = sc->sc_ah; 227 228 ATH_RX_LOCK(sc); 229 230 /* Enable RX FIFO */ 231 ath_hal_rxena(ah); 232 233 /* 234 * Entries should only be written out if the 235 * FIFO is empty. 236 * 237 * XXX This isn't correct. I should be looking 238 * at the value of AR_RXDP_SIZE (0x0070) to determine 239 * how many entries are in here. 240 * 241 * A warm reset will clear the registers but not the FIFO. 242 * 243 * And I believe this is actually the address of the last 244 * handled buffer rather than the current FIFO pointer. 245 * So if no frames have been (yet) seen, we'll reinit the 246 * FIFO. 247 * 248 * I'll chase that up at some point. 249 */ 250 if (ath_hal_getrxbuf(sc->sc_ah, HAL_RX_QUEUE_HP) == 0) { 251 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 252 "%s: Re-initing HP FIFO\n", __func__); 253 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_HP); 254 } 255 if (ath_hal_getrxbuf(sc->sc_ah, HAL_RX_QUEUE_LP) == 0) { 256 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 257 "%s: Re-initing LP FIFO\n", __func__); 258 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_LP); 259 } 260 261 /* Add up to m_fifolen entries in each queue */ 262 /* 263 * These must occur after the above write so the FIFO buffers 264 * are pushed/tracked in the same order as the hardware will 265 * process them. 266 */ 267 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_HP, 268 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_fifolen); 269 270 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_LP, 271 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_fifolen); 272 273 ath_mode_init(sc); 274 ath_hal_startpcurecv(ah); 275 276 ATH_RX_UNLOCK(sc); 277 278 return (0); 279 } 280 281 static void 282 ath_edma_recv_flush(struct ath_softc *sc) 283 { 284 285 device_printf(sc->sc_dev, "%s: called\n", __func__); 286 287 ATH_PCU_LOCK(sc); 288 sc->sc_rxproc_cnt++; 289 ATH_PCU_UNLOCK(sc); 290 291 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 0); 292 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 0); 293 294 ATH_PCU_LOCK(sc); 295 sc->sc_rxproc_cnt--; 296 ATH_PCU_UNLOCK(sc); 297 } 298 299 /* 300 * Process frames from the current queue. 301 * 302 * TODO: 303 * 304 * + Add a "dosched" flag, so we don't reschedule any FIFO frames 305 * to the hardware or re-kick the PCU after 'kickpcu' is set. 306 * 307 * + Perhaps split "check FIFO contents" and "handle frames", so 308 * we can run the "check FIFO contents" in ath_intr(), but 309 * "handle frames" in the RX tasklet. 310 */ 311 static int 312 ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 313 int dosched) 314 { 315 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 316 struct ath_rx_status *rs; 317 struct ath_desc *ds; 318 struct ath_buf *bf; 319 struct mbuf *m; 320 struct ath_hal *ah = sc->sc_ah; 321 uint64_t tsf; 322 int16_t nf; 323 int ngood = 0, npkts = 0; 324 ath_bufhead rxlist; 325 struct ath_buf *next; 326 327 TAILQ_INIT(&rxlist); 328 329 tsf = ath_hal_gettsf64(ah); 330 nf = ath_hal_getchannoise(ah, sc->sc_curchan); 331 sc->sc_stats.ast_rx_noise = nf; 332 333 ATH_RX_LOCK(sc); 334 335 do { 336 bf = re->m_fifo[re->m_fifo_head]; 337 /* This shouldn't occur! */ 338 if (bf == NULL) { 339 device_printf(sc->sc_dev, "%s: Q%d: NULL bf?\n", 340 __func__, 341 qtype); 342 break; 343 } 344 m = bf->bf_m; 345 ds = bf->bf_desc; 346 347 /* 348 * Sync descriptor memory - this also syncs the buffer for us. 349 * 350 * EDMA descriptors are in cached memory. 351 */ 352 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 353 BUS_DMASYNC_POSTREAD); 354 rs = &bf->bf_status.ds_rxstat; 355 bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, 356 NULL, rs); 357 #ifdef ATH_DEBUG 358 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 359 ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK); 360 #endif 361 if (bf->bf_rxstatus == HAL_EINPROGRESS) 362 break; 363 364 /* 365 * Completed descriptor. 366 * 367 * In the future we'll call ath_rx_pkt(), but it first 368 * has to be taught about EDMA RX queues (so it can 369 * access sc_rxpending correctly.) 370 */ 371 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 372 "%s: Q%d: completed!\n", __func__, qtype); 373 npkts++; 374 375 /* 376 * Remove the FIFO entry and place it on the completion 377 * queue. 378 */ 379 re->m_fifo[re->m_fifo_head] = NULL; 380 TAILQ_INSERT_TAIL(&rxlist, bf, bf_list); 381 382 /* Bump the descriptor FIFO stats */ 383 INCR(re->m_fifo_head, re->m_fifolen); 384 re->m_fifo_depth--; 385 /* XXX check it doesn't fall below 0 */ 386 } while (re->m_fifo_depth > 0); 387 388 /* Append some more fresh frames to the FIFO */ 389 if (dosched) 390 ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen); 391 392 ATH_RX_UNLOCK(sc); 393 394 /* Handle the completed descriptors */ 395 TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) { 396 /* 397 * Skip the RX descriptor status - start at the data offset 398 */ 399 m_adj(bf->bf_m, sc->sc_rx_statuslen); 400 401 /* Handle the frame */ 402 /* 403 * Note: this may or may not free bf->bf_m and sync/unmap 404 * the frame. 405 */ 406 rs = &bf->bf_status.ds_rxstat; 407 if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf)) 408 ngood++; 409 } 410 411 /* Free in one set, inside the lock */ 412 ATH_RX_LOCK(sc); 413 TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) { 414 /* Free the buffer/mbuf */ 415 ath_edma_rxbuf_free(sc, bf); 416 } 417 ATH_RX_UNLOCK(sc); 418 419 /* rx signal state monitoring */ 420 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); 421 if (ngood) 422 sc->sc_lastrx = tsf; 423 424 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 2, 425 "ath edma rx proc: npkts=%d, ngood=%d", 426 npkts, ngood); 427 428 /* Handle resched and kickpcu appropriately */ 429 ATH_PCU_LOCK(sc); 430 if (dosched && sc->sc_kickpcu) { 431 ATH_KTR(sc, ATH_KTR_ERROR, 0, 432 "ath_edma_recv_proc_queue(): kickpcu"); 433 device_printf(sc->sc_dev, 434 "%s: handled npkts %d ngood %d\n", 435 __func__, npkts, ngood); 436 437 /* 438 * XXX TODO: what should occur here? Just re-poke and 439 * re-enable the RX FIFO? 440 */ 441 sc->sc_kickpcu = 0; 442 } 443 ATH_PCU_UNLOCK(sc); 444 445 return (ngood); 446 } 447 448 static void 449 ath_edma_recv_tasklet(void *arg, int npending) 450 { 451 struct ath_softc *sc = (struct ath_softc *) arg; 452 struct ifnet *ifp = sc->sc_ifp; 453 #ifdef IEEE80211_SUPPORT_SUPERG 454 struct ieee80211com *ic = ifp->if_l2com; 455 #endif 456 457 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; npending=%d\n", 458 __func__, 459 npending); 460 461 ATH_PCU_LOCK(sc); 462 if (sc->sc_inreset_cnt > 0) { 463 device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n", 464 __func__); 465 ATH_PCU_UNLOCK(sc); 466 return; 467 } 468 sc->sc_rxproc_cnt++; 469 ATH_PCU_UNLOCK(sc); 470 471 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 1); 472 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 1); 473 474 /* XXX inside IF_LOCK ? */ 475 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 476 #ifdef IEEE80211_SUPPORT_SUPERG 477 ieee80211_ff_age_all(ic, 100); 478 #endif 479 if (! IFQ_IS_EMPTY(&ifp->if_snd)) 480 ath_tx_kick(sc); 481 } 482 if (ath_dfs_tasklet_needed(sc, sc->sc_curchan)) 483 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); 484 485 ATH_PCU_LOCK(sc); 486 sc->sc_rxproc_cnt--; 487 ATH_PCU_UNLOCK(sc); 488 } 489 490 /* 491 * Allocate an RX mbuf for the given ath_buf and initialise 492 * it for EDMA. 493 * 494 * + Allocate a 4KB mbuf; 495 * + Setup the DMA map for the given buffer; 496 * + Keep a pointer to the start of the mbuf - that's where the 497 * descriptor lies; 498 * + Take a pointer to the start of the RX buffer, set the 499 * mbuf "start" to be there; 500 * + Return that. 501 */ 502 static int 503 ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 504 { 505 506 struct mbuf *m; 507 int error; 508 int len; 509 510 ATH_RX_LOCK_ASSERT(sc); 511 512 m = m_getm(NULL, sc->sc_edma_bufsize, M_DONTWAIT, MT_DATA); 513 if (! m) 514 return (ENOBUFS); /* XXX ?*/ 515 516 /* XXX warn/enforce alignment */ 517 518 len = m->m_ext.ext_size; 519 #if 0 520 device_printf(sc->sc_dev, "%s: called: m=%p, size=%d, mtod=%p\n", 521 __func__, 522 m, 523 len, 524 mtod(m, char *)); 525 #endif 526 527 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 528 529 /* 530 * Create DMA mapping. 531 */ 532 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 533 bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); 534 if (error != 0) { 535 device_printf(sc->sc_dev, "%s: failed; error=%d\n", 536 __func__, 537 error); 538 m_freem(m); 539 return (error); 540 } 541 542 /* 543 * Populate ath_buf fields. 544 */ 545 546 bf->bf_desc = mtod(m, struct ath_desc *); 547 bf->bf_daddr = bf->bf_segs[0].ds_addr; 548 bf->bf_lastds = bf->bf_desc; /* XXX only really for TX? */ 549 bf->bf_m = m; 550 551 /* Zero the descriptor */ 552 memset(bf->bf_desc, '\0', sc->sc_rx_statuslen); 553 554 #if 0 555 /* 556 * Adjust mbuf header and length/size to compensate for the 557 * descriptor size. 558 */ 559 m_adj(m, sc->sc_rx_statuslen); 560 #endif 561 562 /* Finish! */ 563 564 return (0); 565 } 566 567 static struct ath_buf * 568 ath_edma_rxbuf_alloc(struct ath_softc *sc) 569 { 570 struct ath_buf *bf; 571 int error; 572 573 ATH_RX_LOCK_ASSERT(sc); 574 575 /* Allocate buffer */ 576 bf = TAILQ_FIRST(&sc->sc_rxbuf); 577 /* XXX shouldn't happen upon startup? */ 578 if (bf == NULL) 579 return (NULL); 580 581 /* Remove it from the free list */ 582 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 583 584 /* Assign RX mbuf to it */ 585 error = ath_edma_rxbuf_init(sc, bf); 586 if (error != 0) { 587 device_printf(sc->sc_dev, 588 "%s: bf=%p, rxbuf alloc failed! error=%d\n", 589 __func__, 590 bf, 591 error); 592 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 593 return (NULL); 594 } 595 596 return (bf); 597 } 598 599 static void 600 ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf) 601 { 602 603 ATH_RX_LOCK_ASSERT(sc); 604 605 /* We're doing this multiple times? */ 606 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 607 608 if (bf->bf_m) { 609 m_freem(bf->bf_m); 610 bf->bf_m = NULL; 611 } 612 613 /* XXX lock? */ 614 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 615 } 616 617 /* 618 * Allocate up to 'n' entries and push them onto the hardware FIFO. 619 * 620 * Return how many entries were successfully pushed onto the 621 * FIFO. 622 */ 623 static int 624 ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs) 625 { 626 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 627 struct ath_buf *bf; 628 int i; 629 630 ATH_RX_LOCK_ASSERT(sc); 631 632 /* 633 * Allocate buffers until the FIFO is full or nbufs is reached. 634 */ 635 for (i = 0; i < nbufs && re->m_fifo_depth < re->m_fifolen; i++) { 636 /* Ensure the FIFO is already blank, complain loudly! */ 637 if (re->m_fifo[re->m_fifo_tail] != NULL) { 638 device_printf(sc->sc_dev, 639 "%s: Q%d: fifo[%d] != NULL (%p)\n", 640 __func__, 641 qtype, 642 re->m_fifo_tail, 643 re->m_fifo[re->m_fifo_tail]); 644 645 /* Free the slot */ 646 ath_edma_rxbuf_free(sc, re->m_fifo[re->m_fifo_tail]); 647 re->m_fifo_depth--; 648 /* XXX check it's not < 0 */ 649 re->m_fifo[re->m_fifo_tail] = NULL; 650 } 651 652 bf = ath_edma_rxbuf_alloc(sc); 653 /* XXX should ensure the FIFO is not NULL? */ 654 if (bf == NULL) { 655 device_printf(sc->sc_dev, "%s: Q%d: alloc failed?\n", 656 __func__, 657 qtype); 658 break; 659 } 660 661 re->m_fifo[re->m_fifo_tail] = bf; 662 663 /* 664 * Flush the descriptor contents before it's handed to the 665 * hardware. 666 */ 667 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 668 BUS_DMASYNC_PREREAD); 669 670 /* Write to the RX FIFO */ 671 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: putrxbuf=%p\n", 672 __func__, 673 qtype, 674 bf->bf_desc); 675 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 676 677 re->m_fifo_depth++; 678 INCR(re->m_fifo_tail, re->m_fifolen); 679 } 680 681 /* 682 * Return how many were allocated. 683 */ 684 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: nbufs=%d, nalloced=%d\n", 685 __func__, 686 qtype, 687 nbufs, 688 i); 689 return (i); 690 } 691 692 static int 693 ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype) 694 { 695 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 696 int i; 697 698 ATH_RX_LOCK_ASSERT(sc); 699 700 for (i = 0; i < re->m_fifolen; i++) { 701 if (re->m_fifo[i] != NULL) { 702 #ifdef ATH_DEBUG 703 struct ath_buf *bf = re->m_fifo[i]; 704 705 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 706 ath_printrxbuf(sc, bf, 0, HAL_OK); 707 #endif 708 ath_edma_rxbuf_free(sc, re->m_fifo[i]); 709 re->m_fifo[i] = NULL; 710 re->m_fifo_depth--; 711 } 712 } 713 714 if (re->m_rxpending != NULL) { 715 m_freem(re->m_rxpending); 716 re->m_rxpending = NULL; 717 } 718 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 719 720 return (0); 721 } 722 723 /* 724 * Setup the initial RX FIFO structure. 725 */ 726 static int 727 ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 728 { 729 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 730 731 ATH_RX_LOCK_ASSERT(sc); 732 733 if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) { 734 device_printf(sc->sc_dev, "%s: qtype=%d, failed\n", 735 __func__, 736 qtype); 737 return (-EINVAL); 738 } 739 device_printf(sc->sc_dev, "%s: type=%d, FIFO depth = %d entries\n", 740 __func__, 741 qtype, 742 re->m_fifolen); 743 744 /* Allocate ath_buf FIFO array, pre-zero'ed */ 745 re->m_fifo = malloc(sizeof(struct ath_buf *) * re->m_fifolen, 746 M_ATHDEV, 747 M_NOWAIT | M_ZERO); 748 if (re->m_fifo == NULL) { 749 device_printf(sc->sc_dev, "%s: malloc failed\n", 750 __func__); 751 return (-ENOMEM); 752 } 753 754 /* 755 * Set initial "empty" state. 756 */ 757 re->m_rxpending = NULL; 758 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 759 760 return (0); 761 } 762 763 static int 764 ath_edma_rxfifo_free(struct ath_softc *sc, HAL_RX_QUEUE qtype) 765 { 766 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 767 768 device_printf(sc->sc_dev, "%s: called; qtype=%d\n", 769 __func__, 770 qtype); 771 772 free(re->m_fifo, M_ATHDEV); 773 774 return (0); 775 } 776 777 static int 778 ath_edma_dma_rxsetup(struct ath_softc *sc) 779 { 780 int error; 781 782 /* 783 * Create RX DMA tag and buffers. 784 */ 785 error = ath_descdma_setup_rx_edma(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 786 "rx", ath_rxbuf, sc->sc_rx_statuslen); 787 if (error != 0) 788 return error; 789 790 ATH_RX_LOCK(sc); 791 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP); 792 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP); 793 ATH_RX_UNLOCK(sc); 794 795 return (0); 796 } 797 798 static int 799 ath_edma_dma_rxteardown(struct ath_softc *sc) 800 { 801 802 ATH_RX_LOCK(sc); 803 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP); 804 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP); 805 806 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP); 807 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP); 808 ATH_RX_UNLOCK(sc); 809 810 /* Free RX ath_buf */ 811 /* Free RX DMA tag */ 812 if (sc->sc_rxdma.dd_desc_len != 0) 813 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 814 815 return (0); 816 } 817 818 void 819 ath_recv_setup_edma(struct ath_softc *sc) 820 { 821 822 /* Set buffer size to 4k */ 823 sc->sc_edma_bufsize = 4096; 824 825 /* Fetch EDMA field and buffer sizes */ 826 (void) ath_hal_getrxstatuslen(sc->sc_ah, &sc->sc_rx_statuslen); 827 828 /* Configure the hardware with the RX buffer size */ 829 (void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize - 830 sc->sc_rx_statuslen); 831 832 device_printf(sc->sc_dev, "RX status length: %d\n", 833 sc->sc_rx_statuslen); 834 device_printf(sc->sc_dev, "RX buffer size: %d\n", 835 sc->sc_edma_bufsize); 836 837 sc->sc_rx.recv_stop = ath_edma_stoprecv; 838 sc->sc_rx.recv_start = ath_edma_startrecv; 839 sc->sc_rx.recv_flush = ath_edma_recv_flush; 840 sc->sc_rx.recv_tasklet = ath_edma_recv_tasklet; 841 sc->sc_rx.recv_rxbuf_init = ath_edma_rxbuf_init; 842 843 sc->sc_rx.recv_setup = ath_edma_dma_rxsetup; 844 sc->sc_rx.recv_teardown = ath_edma_dma_rxteardown; 845 } 846