1 /*- 2 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_ath.h" 42 /* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49 #include "opt_ah.h" 50 #include "opt_wlan.h" 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/sysctl.h> 55 #include <sys/mbuf.h> 56 #include <sys/malloc.h> 57 #include <sys/lock.h> 58 #include <sys/mutex.h> 59 #include <sys/kernel.h> 60 #include <sys/socket.h> 61 #include <sys/sockio.h> 62 #include <sys/errno.h> 63 #include <sys/callout.h> 64 #include <sys/bus.h> 65 #include <sys/endian.h> 66 #include <sys/kthread.h> 67 #include <sys/taskqueue.h> 68 #include <sys/priv.h> 69 #include <sys/module.h> 70 #include <sys/ktr.h> 71 #include <sys/smp.h> /* for mp_ncpus */ 72 73 #include <machine/bus.h> 74 75 #include <net/if.h> 76 #include <net/if_var.h> 77 #include <net/if_dl.h> 78 #include <net/if_media.h> 79 #include <net/if_types.h> 80 #include <net/if_arp.h> 81 #include <net/ethernet.h> 82 #include <net/if_llc.h> 83 84 #include <net80211/ieee80211_var.h> 85 #include <net80211/ieee80211_regdomain.h> 86 #ifdef IEEE80211_SUPPORT_SUPERG 87 #include <net80211/ieee80211_superg.h> 88 #endif 89 #ifdef IEEE80211_SUPPORT_TDMA 90 #include <net80211/ieee80211_tdma.h> 91 #endif 92 93 #include <net/bpf.h> 94 95 #ifdef INET 96 #include <netinet/in.h> 97 #include <netinet/if_ether.h> 98 #endif 99 100 #include <dev/ath/if_athvar.h> 101 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 102 #include <dev/ath/ath_hal/ah_diagcodes.h> 103 104 #include <dev/ath/if_ath_debug.h> 105 #include <dev/ath/if_ath_misc.h> 106 #include <dev/ath/if_ath_tsf.h> 107 #include <dev/ath/if_ath_tx.h> 108 #include <dev/ath/if_ath_sysctl.h> 109 #include <dev/ath/if_ath_led.h> 110 #include <dev/ath/if_ath_keycache.h> 111 #include <dev/ath/if_ath_rx.h> 112 #include <dev/ath/if_ath_beacon.h> 113 #include <dev/ath/if_athdfs.h> 114 115 #ifdef ATH_TX99_DIAG 116 #include <dev/ath/ath_tx99/ath_tx99.h> 117 #endif 118 119 #include <dev/ath/if_ath_rx_edma.h> 120 121 #ifdef ATH_DEBUG_ALQ 122 #include <dev/ath/if_ath_alq.h> 123 #endif 124 125 /* 126 * some general macros 127 */ 128 #define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1) 129 #define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1) 130 131 MALLOC_DECLARE(M_ATHDEV); 132 133 /* 134 * XXX TODO: 135 * 136 * + Make sure the FIFO is correctly flushed and reinitialised 137 * through a reset; 138 * + Verify multi-descriptor frames work! 139 * + There's a "memory use after free" which needs to be tracked down 140 * and fixed ASAP. I've seen this in the legacy path too, so it 141 * may be a generic RX path issue. 142 */ 143 144 /* 145 * XXX shuffle the function orders so these pre-declarations aren't 146 * required! 147 */ 148 static int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, 149 int nbufs); 150 static int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype); 151 static void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf); 152 static void ath_edma_recv_proc_queue(struct ath_softc *sc, 153 HAL_RX_QUEUE qtype, int dosched); 154 static int ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, 155 HAL_RX_QUEUE qtype, int dosched); 156 157 static void 158 ath_edma_stoprecv(struct ath_softc *sc, int dodelay) 159 { 160 struct ath_hal *ah = sc->sc_ah; 161 162 ATH_RX_LOCK(sc); 163 ath_hal_stoppcurecv(ah); 164 ath_hal_setrxfilter(ah, 0); 165 ath_hal_stopdmarecv(ah); 166 167 DELAY(3000); 168 169 /* Flush RX pending for each queue */ 170 /* XXX should generic-ify this */ 171 if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending) { 172 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending); 173 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL; 174 } 175 176 if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending) { 177 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending); 178 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL; 179 } 180 ATH_RX_UNLOCK(sc); 181 } 182 183 /* 184 * Re-initialise the FIFO given the current buffer contents. 185 * Specifically, walk from head -> tail, pushing the FIFO contents 186 * back into the FIFO. 187 */ 188 static void 189 ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 190 { 191 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 192 struct ath_buf *bf; 193 int i, j; 194 195 ATH_RX_LOCK_ASSERT(sc); 196 197 i = re->m_fifo_head; 198 for (j = 0; j < re->m_fifo_depth; j++) { 199 bf = re->m_fifo[i]; 200 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 201 "%s: Q%d: pos=%i, addr=0x%jx\n", 202 __func__, 203 qtype, 204 i, 205 (uintmax_t)bf->bf_daddr); 206 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 207 INCR(i, re->m_fifolen); 208 } 209 210 /* Ensure this worked out right */ 211 if (i != re->m_fifo_tail) { 212 device_printf(sc->sc_dev, "%s: i (%d) != tail! (%d)\n", 213 __func__, 214 i, 215 re->m_fifo_tail); 216 } 217 } 218 219 /* 220 * Start receive. 221 * 222 * XXX TODO: this needs to reallocate the FIFO entries when a reset 223 * occurs, in case the FIFO is filled up and no new descriptors get 224 * thrown into the FIFO. 225 */ 226 static int 227 ath_edma_startrecv(struct ath_softc *sc) 228 { 229 struct ath_hal *ah = sc->sc_ah; 230 231 ATH_RX_LOCK(sc); 232 233 /* Enable RX FIFO */ 234 ath_hal_rxena(ah); 235 236 /* 237 * Entries should only be written out if the 238 * FIFO is empty. 239 * 240 * XXX This isn't correct. I should be looking 241 * at the value of AR_RXDP_SIZE (0x0070) to determine 242 * how many entries are in here. 243 * 244 * A warm reset will clear the registers but not the FIFO. 245 * 246 * And I believe this is actually the address of the last 247 * handled buffer rather than the current FIFO pointer. 248 * So if no frames have been (yet) seen, we'll reinit the 249 * FIFO. 250 * 251 * I'll chase that up at some point. 252 */ 253 if (ath_hal_getrxbuf(sc->sc_ah, HAL_RX_QUEUE_HP) == 0) { 254 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 255 "%s: Re-initing HP FIFO\n", __func__); 256 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_HP); 257 } 258 if (ath_hal_getrxbuf(sc->sc_ah, HAL_RX_QUEUE_LP) == 0) { 259 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 260 "%s: Re-initing LP FIFO\n", __func__); 261 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_LP); 262 } 263 264 /* Add up to m_fifolen entries in each queue */ 265 /* 266 * These must occur after the above write so the FIFO buffers 267 * are pushed/tracked in the same order as the hardware will 268 * process them. 269 */ 270 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_HP, 271 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_fifolen); 272 273 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_LP, 274 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_fifolen); 275 276 ath_mode_init(sc); 277 ath_hal_startpcurecv(ah); 278 279 ATH_RX_UNLOCK(sc); 280 281 return (0); 282 } 283 284 static void 285 ath_edma_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 286 int dosched) 287 { 288 289 ath_edma_recv_proc_queue(sc, qtype, dosched); 290 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 291 } 292 293 static void 294 ath_edma_recv_sched(struct ath_softc *sc, int dosched) 295 { 296 297 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, dosched); 298 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, dosched); 299 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 300 } 301 302 static void 303 ath_edma_recv_flush(struct ath_softc *sc) 304 { 305 306 DPRINTF(sc, ATH_DEBUG_RECV, "%s: called\n", __func__); 307 308 ATH_PCU_LOCK(sc); 309 sc->sc_rxproc_cnt++; 310 ATH_PCU_UNLOCK(sc); 311 312 /* 313 * Flush any active frames from FIFO -> deferred list 314 */ 315 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 0); 316 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 0); 317 318 /* 319 * Process what's in the deferred queue 320 */ 321 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 0); 322 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 0); 323 324 ATH_PCU_LOCK(sc); 325 sc->sc_rxproc_cnt--; 326 ATH_PCU_UNLOCK(sc); 327 } 328 329 /* 330 * Process frames from the current queue into the deferred queue. 331 */ 332 static void 333 ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 334 int dosched) 335 { 336 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 337 struct ath_rx_status *rs; 338 struct ath_desc *ds; 339 struct ath_buf *bf; 340 struct mbuf *m; 341 struct ath_hal *ah = sc->sc_ah; 342 uint64_t tsf; 343 uint16_t nf; 344 int npkts = 0; 345 346 tsf = ath_hal_gettsf64(ah); 347 nf = ath_hal_getchannoise(ah, sc->sc_curchan); 348 sc->sc_stats.ast_rx_noise = nf; 349 350 ATH_RX_LOCK(sc); 351 352 do { 353 bf = re->m_fifo[re->m_fifo_head]; 354 /* This shouldn't occur! */ 355 if (bf == NULL) { 356 device_printf(sc->sc_dev, "%s: Q%d: NULL bf?\n", 357 __func__, 358 qtype); 359 break; 360 } 361 m = bf->bf_m; 362 ds = bf->bf_desc; 363 364 /* 365 * Sync descriptor memory - this also syncs the buffer for us. 366 * EDMA descriptors are in cached memory. 367 */ 368 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 369 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 370 rs = &bf->bf_status.ds_rxstat; 371 bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, 372 NULL, rs); 373 #ifdef ATH_DEBUG 374 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 375 ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK); 376 #endif /* ATH_DEBUG */ 377 #ifdef ATH_DEBUG_ALQ 378 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS)) 379 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS, 380 sc->sc_rx_statuslen, (char *) ds); 381 #endif /* ATH_DEBUG */ 382 if (bf->bf_rxstatus == HAL_EINPROGRESS) 383 break; 384 385 /* 386 * Completed descriptor. 387 */ 388 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 389 "%s: Q%d: completed!\n", __func__, qtype); 390 npkts++; 391 392 /* 393 * We've been synced already, so unmap. 394 */ 395 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 396 397 /* 398 * Remove the FIFO entry and place it on the completion 399 * queue. 400 */ 401 re->m_fifo[re->m_fifo_head] = NULL; 402 TAILQ_INSERT_TAIL(&sc->sc_rx_rxlist[qtype], bf, bf_list); 403 404 /* Bump the descriptor FIFO stats */ 405 INCR(re->m_fifo_head, re->m_fifolen); 406 re->m_fifo_depth--; 407 /* XXX check it doesn't fall below 0 */ 408 } while (re->m_fifo_depth > 0); 409 410 /* Append some more fresh frames to the FIFO */ 411 if (dosched) 412 ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen); 413 414 ATH_RX_UNLOCK(sc); 415 416 /* rx signal state monitoring */ 417 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); 418 419 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 420 "ath edma rx proc: npkts=%d\n", 421 npkts); 422 423 /* Handle resched and kickpcu appropriately */ 424 ATH_PCU_LOCK(sc); 425 if (dosched && sc->sc_kickpcu) { 426 ATH_KTR(sc, ATH_KTR_ERROR, 0, 427 "ath_edma_recv_proc_queue(): kickpcu"); 428 if (npkts > 0) 429 device_printf(sc->sc_dev, 430 "%s: handled npkts %d\n", 431 __func__, npkts); 432 433 /* 434 * XXX TODO: what should occur here? Just re-poke and 435 * re-enable the RX FIFO? 436 */ 437 sc->sc_kickpcu = 0; 438 } 439 ATH_PCU_UNLOCK(sc); 440 441 return; 442 } 443 444 /* 445 * Flush the deferred queue. 446 * 447 * This destructively flushes the deferred queue - it doesn't 448 * call the wireless stack on each mbuf. 449 */ 450 static void 451 ath_edma_flush_deferred_queue(struct ath_softc *sc) 452 { 453 struct ath_buf *bf; 454 455 ATH_RX_LOCK_ASSERT(sc); 456 457 /* Free in one set, inside the lock */ 458 while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP])) { 459 bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]); 460 TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP], bf, bf_list); 461 /* Free the buffer/mbuf */ 462 ath_edma_rxbuf_free(sc, bf); 463 } 464 while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP])) { 465 bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]); 466 TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP], bf, bf_list); 467 /* Free the buffer/mbuf */ 468 ath_edma_rxbuf_free(sc, bf); 469 } 470 } 471 472 static int 473 ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, 474 int dosched) 475 { 476 int ngood = 0; 477 uint64_t tsf; 478 struct ath_buf *bf, *next; 479 struct ath_rx_status *rs; 480 int16_t nf; 481 ath_bufhead rxlist; 482 struct mbuf *m; 483 484 TAILQ_INIT(&rxlist); 485 486 nf = ath_hal_getchannoise(sc->sc_ah, sc->sc_curchan); 487 /* 488 * XXX TODO: the NF/TSF should be stamped on the bufs themselves, 489 * otherwise we may end up adding in the wrong values if this 490 * is delayed too far.. 491 */ 492 tsf = ath_hal_gettsf64(sc->sc_ah); 493 494 /* Copy the list over */ 495 ATH_RX_LOCK(sc); 496 TAILQ_CONCAT(&rxlist, &sc->sc_rx_rxlist[qtype], bf_list); 497 ATH_RX_UNLOCK(sc); 498 499 /* Handle the completed descriptors */ 500 /* 501 * XXX is this SAFE call needed? The ath_buf entries 502 * aren't modified by ath_rx_pkt, right? 503 */ 504 TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) { 505 /* 506 * Skip the RX descriptor status - start at the data offset 507 */ 508 m_adj(bf->bf_m, sc->sc_rx_statuslen); 509 510 /* Handle the frame */ 511 512 rs = &bf->bf_status.ds_rxstat; 513 m = bf->bf_m; 514 bf->bf_m = NULL; 515 if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf, m)) 516 ngood++; 517 } 518 519 if (ngood) { 520 sc->sc_lastrx = tsf; 521 } 522 523 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, 524 "ath edma rx deferred proc: ngood=%d\n", 525 ngood); 526 527 /* Free in one set, inside the lock */ 528 ATH_RX_LOCK(sc); 529 while (! TAILQ_EMPTY(&rxlist)) { 530 bf = TAILQ_FIRST(&rxlist); 531 TAILQ_REMOVE(&rxlist, bf, bf_list); 532 /* Free the buffer/mbuf */ 533 ath_edma_rxbuf_free(sc, bf); 534 } 535 ATH_RX_UNLOCK(sc); 536 537 return (ngood); 538 } 539 540 static void 541 ath_edma_recv_tasklet(void *arg, int npending) 542 { 543 struct ath_softc *sc = (struct ath_softc *) arg; 544 struct ifnet *ifp = sc->sc_ifp; 545 #ifdef IEEE80211_SUPPORT_SUPERG 546 struct ieee80211com *ic = ifp->if_l2com; 547 #endif 548 549 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; npending=%d\n", 550 __func__, 551 npending); 552 553 ATH_PCU_LOCK(sc); 554 if (sc->sc_inreset_cnt > 0) { 555 device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n", 556 __func__); 557 ATH_PCU_UNLOCK(sc); 558 return; 559 } 560 sc->sc_rxproc_cnt++; 561 ATH_PCU_UNLOCK(sc); 562 563 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 1); 564 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 1); 565 566 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 1); 567 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 1); 568 569 /* XXX inside IF_LOCK ? */ 570 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 571 #ifdef IEEE80211_SUPPORT_SUPERG 572 ieee80211_ff_age_all(ic, 100); 573 #endif 574 if (! IFQ_IS_EMPTY(&ifp->if_snd)) 575 ath_tx_kick(sc); 576 } 577 if (ath_dfs_tasklet_needed(sc, sc->sc_curchan)) 578 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); 579 580 ATH_PCU_LOCK(sc); 581 sc->sc_rxproc_cnt--; 582 ATH_PCU_UNLOCK(sc); 583 } 584 585 /* 586 * Allocate an RX mbuf for the given ath_buf and initialise 587 * it for EDMA. 588 * 589 * + Allocate a 4KB mbuf; 590 * + Setup the DMA map for the given buffer; 591 * + Return that. 592 */ 593 static int 594 ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 595 { 596 597 struct mbuf *m; 598 int error; 599 int len; 600 601 ATH_RX_LOCK_ASSERT(sc); 602 603 m = m_getm(NULL, sc->sc_edma_bufsize, M_NOWAIT, MT_DATA); 604 if (! m) 605 return (ENOBUFS); /* XXX ?*/ 606 607 /* XXX warn/enforce alignment */ 608 609 len = m->m_ext.ext_size; 610 #if 0 611 device_printf(sc->sc_dev, "%s: called: m=%p, size=%d, mtod=%p\n", 612 __func__, 613 m, 614 len, 615 mtod(m, char *)); 616 #endif 617 618 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 619 620 /* 621 * Populate ath_buf fields. 622 */ 623 bf->bf_desc = mtod(m, struct ath_desc *); 624 bf->bf_lastds = bf->bf_desc; /* XXX only really for TX? */ 625 bf->bf_m = m; 626 627 /* 628 * Zero the descriptor and ensure it makes it out to the 629 * bounce buffer if one is required. 630 * 631 * XXX PREWRITE will copy the whole buffer; we only needed it 632 * to sync the first 32 DWORDS. Oh well. 633 */ 634 memset(bf->bf_desc, '\0', sc->sc_rx_statuslen); 635 636 /* 637 * Create DMA mapping. 638 */ 639 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 640 bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); 641 642 if (error != 0) { 643 device_printf(sc->sc_dev, "%s: failed; error=%d\n", 644 __func__, 645 error); 646 m_freem(m); 647 return (error); 648 } 649 650 /* 651 * Set daddr to the physical mapping page. 652 */ 653 bf->bf_daddr = bf->bf_segs[0].ds_addr; 654 655 /* 656 * Prepare for the upcoming read. 657 * 658 * We need to both sync some data into the buffer (the zero'ed 659 * descriptor payload) and also prepare for the read that's going 660 * to occur. 661 */ 662 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 663 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 664 665 /* Finish! */ 666 return (0); 667 } 668 669 /* 670 * Allocate a RX buffer. 671 */ 672 static struct ath_buf * 673 ath_edma_rxbuf_alloc(struct ath_softc *sc) 674 { 675 struct ath_buf *bf; 676 int error; 677 678 ATH_RX_LOCK_ASSERT(sc); 679 680 /* Allocate buffer */ 681 bf = TAILQ_FIRST(&sc->sc_rxbuf); 682 /* XXX shouldn't happen upon startup? */ 683 if (bf == NULL) { 684 device_printf(sc->sc_dev, "%s: nothing on rxbuf?!\n", 685 __func__); 686 return (NULL); 687 } 688 689 /* Remove it from the free list */ 690 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 691 692 /* Assign RX mbuf to it */ 693 error = ath_edma_rxbuf_init(sc, bf); 694 if (error != 0) { 695 device_printf(sc->sc_dev, 696 "%s: bf=%p, rxbuf alloc failed! error=%d\n", 697 __func__, 698 bf, 699 error); 700 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 701 return (NULL); 702 } 703 704 return (bf); 705 } 706 707 static void 708 ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf) 709 { 710 711 ATH_RX_LOCK_ASSERT(sc); 712 713 /* 714 * Only unload the frame if we haven't consumed 715 * the mbuf via ath_rx_pkt(). 716 */ 717 if (bf->bf_m) { 718 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 719 m_freem(bf->bf_m); 720 bf->bf_m = NULL; 721 } 722 723 /* XXX lock? */ 724 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 725 } 726 727 /* 728 * Allocate up to 'n' entries and push them onto the hardware FIFO. 729 * 730 * Return how many entries were successfully pushed onto the 731 * FIFO. 732 */ 733 static int 734 ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs) 735 { 736 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 737 struct ath_buf *bf; 738 int i; 739 740 ATH_RX_LOCK_ASSERT(sc); 741 742 /* 743 * Allocate buffers until the FIFO is full or nbufs is reached. 744 */ 745 for (i = 0; i < nbufs && re->m_fifo_depth < re->m_fifolen; i++) { 746 /* Ensure the FIFO is already blank, complain loudly! */ 747 if (re->m_fifo[re->m_fifo_tail] != NULL) { 748 device_printf(sc->sc_dev, 749 "%s: Q%d: fifo[%d] != NULL (%p)\n", 750 __func__, 751 qtype, 752 re->m_fifo_tail, 753 re->m_fifo[re->m_fifo_tail]); 754 755 /* Free the slot */ 756 ath_edma_rxbuf_free(sc, re->m_fifo[re->m_fifo_tail]); 757 re->m_fifo_depth--; 758 /* XXX check it's not < 0 */ 759 re->m_fifo[re->m_fifo_tail] = NULL; 760 } 761 762 bf = ath_edma_rxbuf_alloc(sc); 763 /* XXX should ensure the FIFO is not NULL? */ 764 if (bf == NULL) { 765 device_printf(sc->sc_dev, 766 "%s: Q%d: alloc failed: i=%d, nbufs=%d?\n", 767 __func__, 768 qtype, 769 i, 770 nbufs); 771 break; 772 } 773 774 re->m_fifo[re->m_fifo_tail] = bf; 775 776 /* Write to the RX FIFO */ 777 DPRINTF(sc, ATH_DEBUG_EDMA_RX, 778 "%s: Q%d: putrxbuf=%p (0x%jx)\n", 779 __func__, 780 qtype, 781 bf->bf_desc, 782 (uintmax_t) bf->bf_daddr); 783 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); 784 785 re->m_fifo_depth++; 786 INCR(re->m_fifo_tail, re->m_fifolen); 787 } 788 789 /* 790 * Return how many were allocated. 791 */ 792 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: nbufs=%d, nalloced=%d\n", 793 __func__, 794 qtype, 795 nbufs, 796 i); 797 return (i); 798 } 799 800 static int 801 ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype) 802 { 803 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 804 int i; 805 806 ATH_RX_LOCK_ASSERT(sc); 807 808 for (i = 0; i < re->m_fifolen; i++) { 809 if (re->m_fifo[i] != NULL) { 810 #ifdef ATH_DEBUG 811 struct ath_buf *bf = re->m_fifo[i]; 812 813 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 814 ath_printrxbuf(sc, bf, 0, HAL_OK); 815 #endif 816 ath_edma_rxbuf_free(sc, re->m_fifo[i]); 817 re->m_fifo[i] = NULL; 818 re->m_fifo_depth--; 819 } 820 } 821 822 if (re->m_rxpending != NULL) { 823 m_freem(re->m_rxpending); 824 re->m_rxpending = NULL; 825 } 826 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 827 828 return (0); 829 } 830 831 /* 832 * Setup the initial RX FIFO structure. 833 */ 834 static int 835 ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) 836 { 837 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 838 839 ATH_RX_LOCK_ASSERT(sc); 840 841 if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) { 842 device_printf(sc->sc_dev, "%s: qtype=%d, failed\n", 843 __func__, 844 qtype); 845 return (-EINVAL); 846 } 847 device_printf(sc->sc_dev, "%s: type=%d, FIFO depth = %d entries\n", 848 __func__, 849 qtype, 850 re->m_fifolen); 851 852 /* Allocate ath_buf FIFO array, pre-zero'ed */ 853 re->m_fifo = malloc(sizeof(struct ath_buf *) * re->m_fifolen, 854 M_ATHDEV, 855 M_NOWAIT | M_ZERO); 856 if (re->m_fifo == NULL) { 857 device_printf(sc->sc_dev, "%s: malloc failed\n", 858 __func__); 859 return (-ENOMEM); 860 } 861 862 /* 863 * Set initial "empty" state. 864 */ 865 re->m_rxpending = NULL; 866 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; 867 868 return (0); 869 } 870 871 static int 872 ath_edma_rxfifo_free(struct ath_softc *sc, HAL_RX_QUEUE qtype) 873 { 874 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; 875 876 device_printf(sc->sc_dev, "%s: called; qtype=%d\n", 877 __func__, 878 qtype); 879 880 free(re->m_fifo, M_ATHDEV); 881 882 return (0); 883 } 884 885 static int 886 ath_edma_dma_rxsetup(struct ath_softc *sc) 887 { 888 int error; 889 890 /* 891 * Create RX DMA tag and buffers. 892 */ 893 error = ath_descdma_setup_rx_edma(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 894 "rx", ath_rxbuf, sc->sc_rx_statuslen); 895 if (error != 0) 896 return error; 897 898 ATH_RX_LOCK(sc); 899 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP); 900 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP); 901 ATH_RX_UNLOCK(sc); 902 903 return (0); 904 } 905 906 static int 907 ath_edma_dma_rxteardown(struct ath_softc *sc) 908 { 909 910 ATH_RX_LOCK(sc); 911 ath_edma_flush_deferred_queue(sc); 912 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP); 913 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP); 914 915 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP); 916 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP); 917 ATH_RX_UNLOCK(sc); 918 919 /* Free RX ath_buf */ 920 /* Free RX DMA tag */ 921 if (sc->sc_rxdma.dd_desc_len != 0) 922 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 923 924 return (0); 925 } 926 927 void 928 ath_recv_setup_edma(struct ath_softc *sc) 929 { 930 931 /* Set buffer size to 4k */ 932 sc->sc_edma_bufsize = 4096; 933 934 /* Fetch EDMA field and buffer sizes */ 935 (void) ath_hal_getrxstatuslen(sc->sc_ah, &sc->sc_rx_statuslen); 936 937 /* Configure the hardware with the RX buffer size */ 938 (void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize - 939 sc->sc_rx_statuslen); 940 941 device_printf(sc->sc_dev, "RX status length: %d\n", 942 sc->sc_rx_statuslen); 943 device_printf(sc->sc_dev, "RX buffer size: %d\n", 944 sc->sc_edma_bufsize); 945 946 sc->sc_rx.recv_stop = ath_edma_stoprecv; 947 sc->sc_rx.recv_start = ath_edma_startrecv; 948 sc->sc_rx.recv_flush = ath_edma_recv_flush; 949 sc->sc_rx.recv_tasklet = ath_edma_recv_tasklet; 950 sc->sc_rx.recv_rxbuf_init = ath_edma_rxbuf_init; 951 952 sc->sc_rx.recv_setup = ath_edma_dma_rxsetup; 953 sc->sc_rx.recv_teardown = ath_edma_dma_rxteardown; 954 955 sc->sc_rx.recv_sched = ath_edma_recv_sched; 956 sc->sc_rx.recv_sched_queue = ath_edma_recv_sched_queue; 957 } 958