1*e60c4fc2SAdrian Chadd /*- 2*e60c4fc2SAdrian Chadd * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3*e60c4fc2SAdrian Chadd * All rights reserved. 4*e60c4fc2SAdrian Chadd * 5*e60c4fc2SAdrian Chadd * Redistribution and use in source and binary forms, with or without 6*e60c4fc2SAdrian Chadd * modification, are permitted provided that the following conditions 7*e60c4fc2SAdrian Chadd * are met: 8*e60c4fc2SAdrian Chadd * 1. Redistributions of source code must retain the above copyright 9*e60c4fc2SAdrian Chadd * notice, this list of conditions and the following disclaimer, 10*e60c4fc2SAdrian Chadd * without modification. 11*e60c4fc2SAdrian Chadd * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12*e60c4fc2SAdrian Chadd * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13*e60c4fc2SAdrian Chadd * redistribution must be conditioned upon including a substantially 14*e60c4fc2SAdrian Chadd * similar Disclaimer requirement for further binary redistribution. 15*e60c4fc2SAdrian Chadd * 16*e60c4fc2SAdrian Chadd * NO WARRANTY 17*e60c4fc2SAdrian Chadd * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18*e60c4fc2SAdrian Chadd * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19*e60c4fc2SAdrian Chadd * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20*e60c4fc2SAdrian Chadd * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21*e60c4fc2SAdrian Chadd * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22*e60c4fc2SAdrian Chadd * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23*e60c4fc2SAdrian Chadd * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24*e60c4fc2SAdrian Chadd * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25*e60c4fc2SAdrian Chadd * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26*e60c4fc2SAdrian Chadd * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27*e60c4fc2SAdrian Chadd * THE POSSIBILITY OF SUCH DAMAGES. 28*e60c4fc2SAdrian Chadd */ 29*e60c4fc2SAdrian Chadd 30*e60c4fc2SAdrian Chadd #include <sys/cdefs.h> 31*e60c4fc2SAdrian Chadd __FBSDID("$FreeBSD$"); 32*e60c4fc2SAdrian Chadd 33*e60c4fc2SAdrian Chadd /* 34*e60c4fc2SAdrian Chadd * Driver for the Atheros Wireless LAN controller. 35*e60c4fc2SAdrian Chadd * 36*e60c4fc2SAdrian Chadd * This software is derived from work of Atsushi Onoe; his contribution 37*e60c4fc2SAdrian Chadd * is greatly appreciated. 38*e60c4fc2SAdrian Chadd */ 39*e60c4fc2SAdrian Chadd 40*e60c4fc2SAdrian Chadd #include "opt_inet.h" 41*e60c4fc2SAdrian Chadd #include "opt_ath.h" 42*e60c4fc2SAdrian Chadd /* 43*e60c4fc2SAdrian Chadd * This is needed for register operations which are performed 44*e60c4fc2SAdrian Chadd * by the driver - eg, calls to ath_hal_gettsf32(). 45*e60c4fc2SAdrian Chadd * 46*e60c4fc2SAdrian Chadd * It's also required for any AH_DEBUG checks in here, eg the 47*e60c4fc2SAdrian Chadd * module dependencies. 48*e60c4fc2SAdrian Chadd */ 49*e60c4fc2SAdrian Chadd #include "opt_ah.h" 50*e60c4fc2SAdrian Chadd #include "opt_wlan.h" 51*e60c4fc2SAdrian Chadd 52*e60c4fc2SAdrian Chadd #include <sys/param.h> 53*e60c4fc2SAdrian Chadd #include <sys/systm.h> 54*e60c4fc2SAdrian Chadd #include <sys/sysctl.h> 55*e60c4fc2SAdrian Chadd #include <sys/mbuf.h> 56*e60c4fc2SAdrian Chadd #include <sys/malloc.h> 57*e60c4fc2SAdrian Chadd #include <sys/lock.h> 58*e60c4fc2SAdrian Chadd #include <sys/mutex.h> 59*e60c4fc2SAdrian Chadd #include <sys/kernel.h> 60*e60c4fc2SAdrian Chadd #include <sys/socket.h> 61*e60c4fc2SAdrian Chadd #include <sys/sockio.h> 62*e60c4fc2SAdrian Chadd #include <sys/errno.h> 63*e60c4fc2SAdrian Chadd #include <sys/callout.h> 64*e60c4fc2SAdrian Chadd #include <sys/bus.h> 65*e60c4fc2SAdrian Chadd #include <sys/endian.h> 66*e60c4fc2SAdrian Chadd #include <sys/kthread.h> 67*e60c4fc2SAdrian Chadd #include <sys/taskqueue.h> 68*e60c4fc2SAdrian Chadd #include <sys/priv.h> 69*e60c4fc2SAdrian Chadd #include <sys/module.h> 70*e60c4fc2SAdrian Chadd #include <sys/ktr.h> 71*e60c4fc2SAdrian Chadd #include <sys/smp.h> /* for mp_ncpus */ 72*e60c4fc2SAdrian Chadd 73*e60c4fc2SAdrian Chadd #include <machine/bus.h> 74*e60c4fc2SAdrian Chadd 75*e60c4fc2SAdrian Chadd #include <net/if.h> 76*e60c4fc2SAdrian Chadd #include <net/if_dl.h> 77*e60c4fc2SAdrian Chadd #include <net/if_media.h> 78*e60c4fc2SAdrian Chadd #include <net/if_types.h> 79*e60c4fc2SAdrian Chadd #include <net/if_arp.h> 80*e60c4fc2SAdrian Chadd #include <net/ethernet.h> 81*e60c4fc2SAdrian Chadd #include <net/if_llc.h> 82*e60c4fc2SAdrian Chadd 83*e60c4fc2SAdrian Chadd #include <net80211/ieee80211_var.h> 84*e60c4fc2SAdrian Chadd #include <net80211/ieee80211_regdomain.h> 85*e60c4fc2SAdrian Chadd #ifdef IEEE80211_SUPPORT_SUPERG 86*e60c4fc2SAdrian Chadd #include <net80211/ieee80211_superg.h> 87*e60c4fc2SAdrian Chadd #endif 88*e60c4fc2SAdrian Chadd #ifdef IEEE80211_SUPPORT_TDMA 89*e60c4fc2SAdrian Chadd #include <net80211/ieee80211_tdma.h> 90*e60c4fc2SAdrian Chadd #endif 91*e60c4fc2SAdrian Chadd 92*e60c4fc2SAdrian Chadd #include <net/bpf.h> 93*e60c4fc2SAdrian Chadd 94*e60c4fc2SAdrian Chadd #ifdef INET 95*e60c4fc2SAdrian Chadd #include <netinet/in.h> 96*e60c4fc2SAdrian Chadd #include <netinet/if_ether.h> 97*e60c4fc2SAdrian Chadd #endif 98*e60c4fc2SAdrian Chadd 99*e60c4fc2SAdrian Chadd #include <dev/ath/if_athvar.h> 100*e60c4fc2SAdrian Chadd #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 101*e60c4fc2SAdrian Chadd #include <dev/ath/ath_hal/ah_diagcodes.h> 102*e60c4fc2SAdrian Chadd 103*e60c4fc2SAdrian Chadd #include <dev/ath/if_ath_debug.h> 104*e60c4fc2SAdrian Chadd #include <dev/ath/if_ath_misc.h> 105*e60c4fc2SAdrian Chadd #include <dev/ath/if_ath_tsf.h> 106*e60c4fc2SAdrian Chadd #include <dev/ath/if_ath_tx.h> 107*e60c4fc2SAdrian Chadd #include <dev/ath/if_ath_sysctl.h> 108*e60c4fc2SAdrian Chadd #include <dev/ath/if_ath_led.h> 109*e60c4fc2SAdrian Chadd #include <dev/ath/if_ath_keycache.h> 110*e60c4fc2SAdrian Chadd #include <dev/ath/if_ath_rx.h> 111*e60c4fc2SAdrian Chadd #include <dev/ath/if_athdfs.h> 112*e60c4fc2SAdrian Chadd 113*e60c4fc2SAdrian Chadd #ifdef ATH_TX99_DIAG 114*e60c4fc2SAdrian Chadd #include <dev/ath/ath_tx99/ath_tx99.h> 115*e60c4fc2SAdrian Chadd #endif 116*e60c4fc2SAdrian Chadd 117*e60c4fc2SAdrian Chadd #define ATH_KTR_INTR KTR_SPARE4 118*e60c4fc2SAdrian Chadd #define ATH_KTR_ERR KTR_SPARE3 119*e60c4fc2SAdrian Chadd 120*e60c4fc2SAdrian Chadd /* 121*e60c4fc2SAdrian Chadd * Calculate the receive filter according to the 122*e60c4fc2SAdrian Chadd * operating mode and state: 123*e60c4fc2SAdrian Chadd * 124*e60c4fc2SAdrian Chadd * o always accept unicast, broadcast, and multicast traffic 125*e60c4fc2SAdrian Chadd * o accept PHY error frames when hardware doesn't have MIB support 126*e60c4fc2SAdrian Chadd * to count and we need them for ANI (sta mode only until recently) 127*e60c4fc2SAdrian Chadd * and we are not scanning (ANI is disabled) 128*e60c4fc2SAdrian Chadd * NB: older hal's add rx filter bits out of sight and we need to 129*e60c4fc2SAdrian Chadd * blindly preserve them 130*e60c4fc2SAdrian Chadd * o probe request frames are accepted only when operating in 131*e60c4fc2SAdrian Chadd * hostap, adhoc, mesh, or monitor modes 132*e60c4fc2SAdrian Chadd * o enable promiscuous mode 133*e60c4fc2SAdrian Chadd * - when in monitor mode 134*e60c4fc2SAdrian Chadd * - if interface marked PROMISC (assumes bridge setting is filtered) 135*e60c4fc2SAdrian Chadd * o accept beacons: 136*e60c4fc2SAdrian Chadd * - when operating in station mode for collecting rssi data when 137*e60c4fc2SAdrian Chadd * the station is otherwise quiet, or 138*e60c4fc2SAdrian Chadd * - when operating in adhoc mode so the 802.11 layer creates 139*e60c4fc2SAdrian Chadd * node table entries for peers, 140*e60c4fc2SAdrian Chadd * - when scanning 141*e60c4fc2SAdrian Chadd * - when doing s/w beacon miss (e.g. for ap+sta) 142*e60c4fc2SAdrian Chadd * - when operating in ap mode in 11g to detect overlapping bss that 143*e60c4fc2SAdrian Chadd * require protection 144*e60c4fc2SAdrian Chadd * - when operating in mesh mode to detect neighbors 145*e60c4fc2SAdrian Chadd * o accept control frames: 146*e60c4fc2SAdrian Chadd * - when in monitor mode 147*e60c4fc2SAdrian Chadd * XXX HT protection for 11n 148*e60c4fc2SAdrian Chadd */ 149*e60c4fc2SAdrian Chadd u_int32_t 150*e60c4fc2SAdrian Chadd ath_calcrxfilter(struct ath_softc *sc) 151*e60c4fc2SAdrian Chadd { 152*e60c4fc2SAdrian Chadd struct ifnet *ifp = sc->sc_ifp; 153*e60c4fc2SAdrian Chadd struct ieee80211com *ic = ifp->if_l2com; 154*e60c4fc2SAdrian Chadd u_int32_t rfilt; 155*e60c4fc2SAdrian Chadd 156*e60c4fc2SAdrian Chadd rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; 157*e60c4fc2SAdrian Chadd if (!sc->sc_needmib && !sc->sc_scanning) 158*e60c4fc2SAdrian Chadd rfilt |= HAL_RX_FILTER_PHYERR; 159*e60c4fc2SAdrian Chadd if (ic->ic_opmode != IEEE80211_M_STA) 160*e60c4fc2SAdrian Chadd rfilt |= HAL_RX_FILTER_PROBEREQ; 161*e60c4fc2SAdrian Chadd /* XXX ic->ic_monvaps != 0? */ 162*e60c4fc2SAdrian Chadd if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC)) 163*e60c4fc2SAdrian Chadd rfilt |= HAL_RX_FILTER_PROM; 164*e60c4fc2SAdrian Chadd if (ic->ic_opmode == IEEE80211_M_STA || 165*e60c4fc2SAdrian Chadd ic->ic_opmode == IEEE80211_M_IBSS || 166*e60c4fc2SAdrian Chadd sc->sc_swbmiss || sc->sc_scanning) 167*e60c4fc2SAdrian Chadd rfilt |= HAL_RX_FILTER_BEACON; 168*e60c4fc2SAdrian Chadd /* 169*e60c4fc2SAdrian Chadd * NB: We don't recalculate the rx filter when 170*e60c4fc2SAdrian Chadd * ic_protmode changes; otherwise we could do 171*e60c4fc2SAdrian Chadd * this only when ic_protmode != NONE. 172*e60c4fc2SAdrian Chadd */ 173*e60c4fc2SAdrian Chadd if (ic->ic_opmode == IEEE80211_M_HOSTAP && 174*e60c4fc2SAdrian Chadd IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) 175*e60c4fc2SAdrian Chadd rfilt |= HAL_RX_FILTER_BEACON; 176*e60c4fc2SAdrian Chadd 177*e60c4fc2SAdrian Chadd /* 178*e60c4fc2SAdrian Chadd * Enable hardware PS-POLL RX only for hostap mode; 179*e60c4fc2SAdrian Chadd * STA mode sends PS-POLL frames but never 180*e60c4fc2SAdrian Chadd * receives them. 181*e60c4fc2SAdrian Chadd */ 182*e60c4fc2SAdrian Chadd if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PSPOLL, 183*e60c4fc2SAdrian Chadd 0, NULL) == HAL_OK && 184*e60c4fc2SAdrian Chadd ic->ic_opmode == IEEE80211_M_HOSTAP) 185*e60c4fc2SAdrian Chadd rfilt |= HAL_RX_FILTER_PSPOLL; 186*e60c4fc2SAdrian Chadd 187*e60c4fc2SAdrian Chadd if (sc->sc_nmeshvaps) { 188*e60c4fc2SAdrian Chadd rfilt |= HAL_RX_FILTER_BEACON; 189*e60c4fc2SAdrian Chadd if (sc->sc_hasbmatch) 190*e60c4fc2SAdrian Chadd rfilt |= HAL_RX_FILTER_BSSID; 191*e60c4fc2SAdrian Chadd else 192*e60c4fc2SAdrian Chadd rfilt |= HAL_RX_FILTER_PROM; 193*e60c4fc2SAdrian Chadd } 194*e60c4fc2SAdrian Chadd if (ic->ic_opmode == IEEE80211_M_MONITOR) 195*e60c4fc2SAdrian Chadd rfilt |= HAL_RX_FILTER_CONTROL; 196*e60c4fc2SAdrian Chadd 197*e60c4fc2SAdrian Chadd /* 198*e60c4fc2SAdrian Chadd * Enable RX of compressed BAR frames only when doing 199*e60c4fc2SAdrian Chadd * 802.11n. Required for A-MPDU. 200*e60c4fc2SAdrian Chadd */ 201*e60c4fc2SAdrian Chadd if (IEEE80211_IS_CHAN_HT(ic->ic_curchan)) 202*e60c4fc2SAdrian Chadd rfilt |= HAL_RX_FILTER_COMPBAR; 203*e60c4fc2SAdrian Chadd 204*e60c4fc2SAdrian Chadd /* 205*e60c4fc2SAdrian Chadd * Enable radar PHY errors if requested by the 206*e60c4fc2SAdrian Chadd * DFS module. 207*e60c4fc2SAdrian Chadd */ 208*e60c4fc2SAdrian Chadd if (sc->sc_dodfs) 209*e60c4fc2SAdrian Chadd rfilt |= HAL_RX_FILTER_PHYRADAR; 210*e60c4fc2SAdrian Chadd 211*e60c4fc2SAdrian Chadd DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n", 212*e60c4fc2SAdrian Chadd __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags); 213*e60c4fc2SAdrian Chadd return rfilt; 214*e60c4fc2SAdrian Chadd } 215*e60c4fc2SAdrian Chadd 216*e60c4fc2SAdrian Chadd int 217*e60c4fc2SAdrian Chadd ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 218*e60c4fc2SAdrian Chadd { 219*e60c4fc2SAdrian Chadd struct ath_hal *ah = sc->sc_ah; 220*e60c4fc2SAdrian Chadd int error; 221*e60c4fc2SAdrian Chadd struct mbuf *m; 222*e60c4fc2SAdrian Chadd struct ath_desc *ds; 223*e60c4fc2SAdrian Chadd 224*e60c4fc2SAdrian Chadd m = bf->bf_m; 225*e60c4fc2SAdrian Chadd if (m == NULL) { 226*e60c4fc2SAdrian Chadd /* 227*e60c4fc2SAdrian Chadd * NB: by assigning a page to the rx dma buffer we 228*e60c4fc2SAdrian Chadd * implicitly satisfy the Atheros requirement that 229*e60c4fc2SAdrian Chadd * this buffer be cache-line-aligned and sized to be 230*e60c4fc2SAdrian Chadd * multiple of the cache line size. Not doing this 231*e60c4fc2SAdrian Chadd * causes weird stuff to happen (for the 5210 at least). 232*e60c4fc2SAdrian Chadd */ 233*e60c4fc2SAdrian Chadd m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 234*e60c4fc2SAdrian Chadd if (m == NULL) { 235*e60c4fc2SAdrian Chadd DPRINTF(sc, ATH_DEBUG_ANY, 236*e60c4fc2SAdrian Chadd "%s: no mbuf/cluster\n", __func__); 237*e60c4fc2SAdrian Chadd sc->sc_stats.ast_rx_nombuf++; 238*e60c4fc2SAdrian Chadd return ENOMEM; 239*e60c4fc2SAdrian Chadd } 240*e60c4fc2SAdrian Chadd m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 241*e60c4fc2SAdrian Chadd 242*e60c4fc2SAdrian Chadd error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 243*e60c4fc2SAdrian Chadd bf->bf_dmamap, m, 244*e60c4fc2SAdrian Chadd bf->bf_segs, &bf->bf_nseg, 245*e60c4fc2SAdrian Chadd BUS_DMA_NOWAIT); 246*e60c4fc2SAdrian Chadd if (error != 0) { 247*e60c4fc2SAdrian Chadd DPRINTF(sc, ATH_DEBUG_ANY, 248*e60c4fc2SAdrian Chadd "%s: bus_dmamap_load_mbuf_sg failed; error %d\n", 249*e60c4fc2SAdrian Chadd __func__, error); 250*e60c4fc2SAdrian Chadd sc->sc_stats.ast_rx_busdma++; 251*e60c4fc2SAdrian Chadd m_freem(m); 252*e60c4fc2SAdrian Chadd return error; 253*e60c4fc2SAdrian Chadd } 254*e60c4fc2SAdrian Chadd KASSERT(bf->bf_nseg == 1, 255*e60c4fc2SAdrian Chadd ("multi-segment packet; nseg %u", bf->bf_nseg)); 256*e60c4fc2SAdrian Chadd bf->bf_m = m; 257*e60c4fc2SAdrian Chadd } 258*e60c4fc2SAdrian Chadd bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD); 259*e60c4fc2SAdrian Chadd 260*e60c4fc2SAdrian Chadd /* 261*e60c4fc2SAdrian Chadd * Setup descriptors. For receive we always terminate 262*e60c4fc2SAdrian Chadd * the descriptor list with a self-linked entry so we'll 263*e60c4fc2SAdrian Chadd * not get overrun under high load (as can happen with a 264*e60c4fc2SAdrian Chadd * 5212 when ANI processing enables PHY error frames). 265*e60c4fc2SAdrian Chadd * 266*e60c4fc2SAdrian Chadd * To insure the last descriptor is self-linked we create 267*e60c4fc2SAdrian Chadd * each descriptor as self-linked and add it to the end. As 268*e60c4fc2SAdrian Chadd * each additional descriptor is added the previous self-linked 269*e60c4fc2SAdrian Chadd * entry is ``fixed'' naturally. This should be safe even 270*e60c4fc2SAdrian Chadd * if DMA is happening. When processing RX interrupts we 271*e60c4fc2SAdrian Chadd * never remove/process the last, self-linked, entry on the 272*e60c4fc2SAdrian Chadd * descriptor list. This insures the hardware always has 273*e60c4fc2SAdrian Chadd * someplace to write a new frame. 274*e60c4fc2SAdrian Chadd */ 275*e60c4fc2SAdrian Chadd /* 276*e60c4fc2SAdrian Chadd * 11N: we can no longer afford to self link the last descriptor. 277*e60c4fc2SAdrian Chadd * MAC acknowledges BA status as long as it copies frames to host 278*e60c4fc2SAdrian Chadd * buffer (or rx fifo). This can incorrectly acknowledge packets 279*e60c4fc2SAdrian Chadd * to a sender if last desc is self-linked. 280*e60c4fc2SAdrian Chadd */ 281*e60c4fc2SAdrian Chadd ds = bf->bf_desc; 282*e60c4fc2SAdrian Chadd if (sc->sc_rxslink) 283*e60c4fc2SAdrian Chadd ds->ds_link = bf->bf_daddr; /* link to self */ 284*e60c4fc2SAdrian Chadd else 285*e60c4fc2SAdrian Chadd ds->ds_link = 0; /* terminate the list */ 286*e60c4fc2SAdrian Chadd ds->ds_data = bf->bf_segs[0].ds_addr; 287*e60c4fc2SAdrian Chadd ath_hal_setuprxdesc(ah, ds 288*e60c4fc2SAdrian Chadd , m->m_len /* buffer size */ 289*e60c4fc2SAdrian Chadd , 0 290*e60c4fc2SAdrian Chadd ); 291*e60c4fc2SAdrian Chadd 292*e60c4fc2SAdrian Chadd if (sc->sc_rxlink != NULL) 293*e60c4fc2SAdrian Chadd *sc->sc_rxlink = bf->bf_daddr; 294*e60c4fc2SAdrian Chadd sc->sc_rxlink = &ds->ds_link; 295*e60c4fc2SAdrian Chadd return 0; 296*e60c4fc2SAdrian Chadd } 297*e60c4fc2SAdrian Chadd 298*e60c4fc2SAdrian Chadd /* 299*e60c4fc2SAdrian Chadd * Intercept management frames to collect beacon rssi data 300*e60c4fc2SAdrian Chadd * and to do ibss merges. 301*e60c4fc2SAdrian Chadd */ 302*e60c4fc2SAdrian Chadd void 303*e60c4fc2SAdrian Chadd ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, 304*e60c4fc2SAdrian Chadd int subtype, int rssi, int nf) 305*e60c4fc2SAdrian Chadd { 306*e60c4fc2SAdrian Chadd struct ieee80211vap *vap = ni->ni_vap; 307*e60c4fc2SAdrian Chadd struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 308*e60c4fc2SAdrian Chadd 309*e60c4fc2SAdrian Chadd /* 310*e60c4fc2SAdrian Chadd * Call up first so subsequent work can use information 311*e60c4fc2SAdrian Chadd * potentially stored in the node (e.g. for ibss merge). 312*e60c4fc2SAdrian Chadd */ 313*e60c4fc2SAdrian Chadd ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, nf); 314*e60c4fc2SAdrian Chadd switch (subtype) { 315*e60c4fc2SAdrian Chadd case IEEE80211_FC0_SUBTYPE_BEACON: 316*e60c4fc2SAdrian Chadd /* update rssi statistics for use by the hal */ 317*e60c4fc2SAdrian Chadd /* XXX unlocked check against vap->iv_bss? */ 318*e60c4fc2SAdrian Chadd ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi); 319*e60c4fc2SAdrian Chadd if (sc->sc_syncbeacon && 320*e60c4fc2SAdrian Chadd ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) { 321*e60c4fc2SAdrian Chadd /* 322*e60c4fc2SAdrian Chadd * Resync beacon timers using the tsf of the beacon 323*e60c4fc2SAdrian Chadd * frame we just received. 324*e60c4fc2SAdrian Chadd */ 325*e60c4fc2SAdrian Chadd ath_beacon_config(sc, vap); 326*e60c4fc2SAdrian Chadd } 327*e60c4fc2SAdrian Chadd /* fall thru... */ 328*e60c4fc2SAdrian Chadd case IEEE80211_FC0_SUBTYPE_PROBE_RESP: 329*e60c4fc2SAdrian Chadd if (vap->iv_opmode == IEEE80211_M_IBSS && 330*e60c4fc2SAdrian Chadd vap->iv_state == IEEE80211_S_RUN) { 331*e60c4fc2SAdrian Chadd uint32_t rstamp = sc->sc_lastrs->rs_tstamp; 332*e60c4fc2SAdrian Chadd uint64_t tsf = ath_extend_tsf(sc, rstamp, 333*e60c4fc2SAdrian Chadd ath_hal_gettsf64(sc->sc_ah)); 334*e60c4fc2SAdrian Chadd /* 335*e60c4fc2SAdrian Chadd * Handle ibss merge as needed; check the tsf on the 336*e60c4fc2SAdrian Chadd * frame before attempting the merge. The 802.11 spec 337*e60c4fc2SAdrian Chadd * says the station should change it's bssid to match 338*e60c4fc2SAdrian Chadd * the oldest station with the same ssid, where oldest 339*e60c4fc2SAdrian Chadd * is determined by the tsf. Note that hardware 340*e60c4fc2SAdrian Chadd * reconfiguration happens through callback to 341*e60c4fc2SAdrian Chadd * ath_newstate as the state machine will go from 342*e60c4fc2SAdrian Chadd * RUN -> RUN when this happens. 343*e60c4fc2SAdrian Chadd */ 344*e60c4fc2SAdrian Chadd if (le64toh(ni->ni_tstamp.tsf) >= tsf) { 345*e60c4fc2SAdrian Chadd DPRINTF(sc, ATH_DEBUG_STATE, 346*e60c4fc2SAdrian Chadd "ibss merge, rstamp %u tsf %ju " 347*e60c4fc2SAdrian Chadd "tstamp %ju\n", rstamp, (uintmax_t)tsf, 348*e60c4fc2SAdrian Chadd (uintmax_t)ni->ni_tstamp.tsf); 349*e60c4fc2SAdrian Chadd (void) ieee80211_ibss_merge(ni); 350*e60c4fc2SAdrian Chadd } 351*e60c4fc2SAdrian Chadd } 352*e60c4fc2SAdrian Chadd break; 353*e60c4fc2SAdrian Chadd } 354*e60c4fc2SAdrian Chadd } 355*e60c4fc2SAdrian Chadd 356*e60c4fc2SAdrian Chadd static void 357*e60c4fc2SAdrian Chadd ath_rx_tap(struct ifnet *ifp, struct mbuf *m, 358*e60c4fc2SAdrian Chadd const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf) 359*e60c4fc2SAdrian Chadd { 360*e60c4fc2SAdrian Chadd #define CHAN_HT20 htole32(IEEE80211_CHAN_HT20) 361*e60c4fc2SAdrian Chadd #define CHAN_HT40U htole32(IEEE80211_CHAN_HT40U) 362*e60c4fc2SAdrian Chadd #define CHAN_HT40D htole32(IEEE80211_CHAN_HT40D) 363*e60c4fc2SAdrian Chadd #define CHAN_HT (CHAN_HT20|CHAN_HT40U|CHAN_HT40D) 364*e60c4fc2SAdrian Chadd struct ath_softc *sc = ifp->if_softc; 365*e60c4fc2SAdrian Chadd const HAL_RATE_TABLE *rt; 366*e60c4fc2SAdrian Chadd uint8_t rix; 367*e60c4fc2SAdrian Chadd 368*e60c4fc2SAdrian Chadd rt = sc->sc_currates; 369*e60c4fc2SAdrian Chadd KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 370*e60c4fc2SAdrian Chadd rix = rt->rateCodeToIndex[rs->rs_rate]; 371*e60c4fc2SAdrian Chadd sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate; 372*e60c4fc2SAdrian Chadd sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags; 373*e60c4fc2SAdrian Chadd #ifdef AH_SUPPORT_AR5416 374*e60c4fc2SAdrian Chadd sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT; 375*e60c4fc2SAdrian Chadd if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) { /* HT rate */ 376*e60c4fc2SAdrian Chadd struct ieee80211com *ic = ifp->if_l2com; 377*e60c4fc2SAdrian Chadd 378*e60c4fc2SAdrian Chadd if ((rs->rs_flags & HAL_RX_2040) == 0) 379*e60c4fc2SAdrian Chadd sc->sc_rx_th.wr_chan_flags |= CHAN_HT20; 380*e60c4fc2SAdrian Chadd else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan)) 381*e60c4fc2SAdrian Chadd sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U; 382*e60c4fc2SAdrian Chadd else 383*e60c4fc2SAdrian Chadd sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D; 384*e60c4fc2SAdrian Chadd if ((rs->rs_flags & HAL_RX_GI) == 0) 385*e60c4fc2SAdrian Chadd sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI; 386*e60c4fc2SAdrian Chadd } 387*e60c4fc2SAdrian Chadd #endif 388*e60c4fc2SAdrian Chadd sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(sc, rs->rs_tstamp, tsf)); 389*e60c4fc2SAdrian Chadd if (rs->rs_status & HAL_RXERR_CRC) 390*e60c4fc2SAdrian Chadd sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; 391*e60c4fc2SAdrian Chadd /* XXX propagate other error flags from descriptor */ 392*e60c4fc2SAdrian Chadd sc->sc_rx_th.wr_antnoise = nf; 393*e60c4fc2SAdrian Chadd sc->sc_rx_th.wr_antsignal = nf + rs->rs_rssi; 394*e60c4fc2SAdrian Chadd sc->sc_rx_th.wr_antenna = rs->rs_antenna; 395*e60c4fc2SAdrian Chadd #undef CHAN_HT 396*e60c4fc2SAdrian Chadd #undef CHAN_HT20 397*e60c4fc2SAdrian Chadd #undef CHAN_HT40U 398*e60c4fc2SAdrian Chadd #undef CHAN_HT40D 399*e60c4fc2SAdrian Chadd } 400*e60c4fc2SAdrian Chadd 401*e60c4fc2SAdrian Chadd static void 402*e60c4fc2SAdrian Chadd ath_handle_micerror(struct ieee80211com *ic, 403*e60c4fc2SAdrian Chadd struct ieee80211_frame *wh, int keyix) 404*e60c4fc2SAdrian Chadd { 405*e60c4fc2SAdrian Chadd struct ieee80211_node *ni; 406*e60c4fc2SAdrian Chadd 407*e60c4fc2SAdrian Chadd /* XXX recheck MIC to deal w/ chips that lie */ 408*e60c4fc2SAdrian Chadd /* XXX discard MIC errors on !data frames */ 409*e60c4fc2SAdrian Chadd ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh); 410*e60c4fc2SAdrian Chadd if (ni != NULL) { 411*e60c4fc2SAdrian Chadd ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix); 412*e60c4fc2SAdrian Chadd ieee80211_free_node(ni); 413*e60c4fc2SAdrian Chadd } 414*e60c4fc2SAdrian Chadd } 415*e60c4fc2SAdrian Chadd 416*e60c4fc2SAdrian Chadd /* 417*e60c4fc2SAdrian Chadd * Only run the RX proc if it's not already running. 418*e60c4fc2SAdrian Chadd * Since this may get run as part of the reset/flush path, 419*e60c4fc2SAdrian Chadd * the task can't clash with an existing, running tasklet. 420*e60c4fc2SAdrian Chadd */ 421*e60c4fc2SAdrian Chadd void 422*e60c4fc2SAdrian Chadd ath_rx_tasklet(void *arg, int npending) 423*e60c4fc2SAdrian Chadd { 424*e60c4fc2SAdrian Chadd struct ath_softc *sc = arg; 425*e60c4fc2SAdrian Chadd 426*e60c4fc2SAdrian Chadd CTR1(ATH_KTR_INTR, "ath_rx_proc: pending=%d", npending); 427*e60c4fc2SAdrian Chadd DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending); 428*e60c4fc2SAdrian Chadd ATH_PCU_LOCK(sc); 429*e60c4fc2SAdrian Chadd if (sc->sc_inreset_cnt > 0) { 430*e60c4fc2SAdrian Chadd device_printf(sc->sc_dev, 431*e60c4fc2SAdrian Chadd "%s: sc_inreset_cnt > 0; skipping\n", __func__); 432*e60c4fc2SAdrian Chadd ATH_PCU_UNLOCK(sc); 433*e60c4fc2SAdrian Chadd return; 434*e60c4fc2SAdrian Chadd } 435*e60c4fc2SAdrian Chadd ATH_PCU_UNLOCK(sc); 436*e60c4fc2SAdrian Chadd ath_rx_proc(sc, 1); 437*e60c4fc2SAdrian Chadd } 438*e60c4fc2SAdrian Chadd 439*e60c4fc2SAdrian Chadd void 440*e60c4fc2SAdrian Chadd ath_rx_proc(struct ath_softc *sc, int resched) 441*e60c4fc2SAdrian Chadd { 442*e60c4fc2SAdrian Chadd #define PA2DESC(_sc, _pa) \ 443*e60c4fc2SAdrian Chadd ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 444*e60c4fc2SAdrian Chadd ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 445*e60c4fc2SAdrian Chadd struct ath_buf *bf; 446*e60c4fc2SAdrian Chadd struct ifnet *ifp = sc->sc_ifp; 447*e60c4fc2SAdrian Chadd struct ieee80211com *ic = ifp->if_l2com; 448*e60c4fc2SAdrian Chadd struct ath_hal *ah = sc->sc_ah; 449*e60c4fc2SAdrian Chadd struct ath_desc *ds; 450*e60c4fc2SAdrian Chadd struct ath_rx_status *rs; 451*e60c4fc2SAdrian Chadd struct mbuf *m; 452*e60c4fc2SAdrian Chadd struct ieee80211_node *ni; 453*e60c4fc2SAdrian Chadd int len, type, ngood; 454*e60c4fc2SAdrian Chadd HAL_STATUS status; 455*e60c4fc2SAdrian Chadd int16_t nf; 456*e60c4fc2SAdrian Chadd u_int64_t tsf, rstamp; 457*e60c4fc2SAdrian Chadd int npkts = 0; 458*e60c4fc2SAdrian Chadd 459*e60c4fc2SAdrian Chadd /* XXX we must not hold the ATH_LOCK here */ 460*e60c4fc2SAdrian Chadd ATH_UNLOCK_ASSERT(sc); 461*e60c4fc2SAdrian Chadd ATH_PCU_UNLOCK_ASSERT(sc); 462*e60c4fc2SAdrian Chadd 463*e60c4fc2SAdrian Chadd ATH_PCU_LOCK(sc); 464*e60c4fc2SAdrian Chadd sc->sc_rxproc_cnt++; 465*e60c4fc2SAdrian Chadd ATH_PCU_UNLOCK(sc); 466*e60c4fc2SAdrian Chadd 467*e60c4fc2SAdrian Chadd DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: called\n", __func__); 468*e60c4fc2SAdrian Chadd ngood = 0; 469*e60c4fc2SAdrian Chadd nf = ath_hal_getchannoise(ah, sc->sc_curchan); 470*e60c4fc2SAdrian Chadd sc->sc_stats.ast_rx_noise = nf; 471*e60c4fc2SAdrian Chadd tsf = ath_hal_gettsf64(ah); 472*e60c4fc2SAdrian Chadd do { 473*e60c4fc2SAdrian Chadd bf = TAILQ_FIRST(&sc->sc_rxbuf); 474*e60c4fc2SAdrian Chadd if (sc->sc_rxslink && bf == NULL) { /* NB: shouldn't happen */ 475*e60c4fc2SAdrian Chadd if_printf(ifp, "%s: no buffer!\n", __func__); 476*e60c4fc2SAdrian Chadd break; 477*e60c4fc2SAdrian Chadd } else if (bf == NULL) { 478*e60c4fc2SAdrian Chadd /* 479*e60c4fc2SAdrian Chadd * End of List: 480*e60c4fc2SAdrian Chadd * this can happen for non-self-linked RX chains 481*e60c4fc2SAdrian Chadd */ 482*e60c4fc2SAdrian Chadd sc->sc_stats.ast_rx_hitqueueend++; 483*e60c4fc2SAdrian Chadd break; 484*e60c4fc2SAdrian Chadd } 485*e60c4fc2SAdrian Chadd m = bf->bf_m; 486*e60c4fc2SAdrian Chadd if (m == NULL) { /* NB: shouldn't happen */ 487*e60c4fc2SAdrian Chadd /* 488*e60c4fc2SAdrian Chadd * If mbuf allocation failed previously there 489*e60c4fc2SAdrian Chadd * will be no mbuf; try again to re-populate it. 490*e60c4fc2SAdrian Chadd */ 491*e60c4fc2SAdrian Chadd /* XXX make debug msg */ 492*e60c4fc2SAdrian Chadd if_printf(ifp, "%s: no mbuf!\n", __func__); 493*e60c4fc2SAdrian Chadd TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 494*e60c4fc2SAdrian Chadd goto rx_next; 495*e60c4fc2SAdrian Chadd } 496*e60c4fc2SAdrian Chadd ds = bf->bf_desc; 497*e60c4fc2SAdrian Chadd if (ds->ds_link == bf->bf_daddr) { 498*e60c4fc2SAdrian Chadd /* NB: never process the self-linked entry at the end */ 499*e60c4fc2SAdrian Chadd sc->sc_stats.ast_rx_hitqueueend++; 500*e60c4fc2SAdrian Chadd break; 501*e60c4fc2SAdrian Chadd } 502*e60c4fc2SAdrian Chadd /* XXX sync descriptor memory */ 503*e60c4fc2SAdrian Chadd /* 504*e60c4fc2SAdrian Chadd * Must provide the virtual address of the current 505*e60c4fc2SAdrian Chadd * descriptor, the physical address, and the virtual 506*e60c4fc2SAdrian Chadd * address of the next descriptor in the h/w chain. 507*e60c4fc2SAdrian Chadd * This allows the HAL to look ahead to see if the 508*e60c4fc2SAdrian Chadd * hardware is done with a descriptor by checking the 509*e60c4fc2SAdrian Chadd * done bit in the following descriptor and the address 510*e60c4fc2SAdrian Chadd * of the current descriptor the DMA engine is working 511*e60c4fc2SAdrian Chadd * on. All this is necessary because of our use of 512*e60c4fc2SAdrian Chadd * a self-linked list to avoid rx overruns. 513*e60c4fc2SAdrian Chadd */ 514*e60c4fc2SAdrian Chadd rs = &bf->bf_status.ds_rxstat; 515*e60c4fc2SAdrian Chadd status = ath_hal_rxprocdesc(ah, ds, 516*e60c4fc2SAdrian Chadd bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 517*e60c4fc2SAdrian Chadd #ifdef ATH_DEBUG 518*e60c4fc2SAdrian Chadd if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 519*e60c4fc2SAdrian Chadd ath_printrxbuf(sc, bf, 0, status == HAL_OK); 520*e60c4fc2SAdrian Chadd #endif 521*e60c4fc2SAdrian Chadd if (status == HAL_EINPROGRESS) 522*e60c4fc2SAdrian Chadd break; 523*e60c4fc2SAdrian Chadd 524*e60c4fc2SAdrian Chadd TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 525*e60c4fc2SAdrian Chadd npkts++; 526*e60c4fc2SAdrian Chadd 527*e60c4fc2SAdrian Chadd /* 528*e60c4fc2SAdrian Chadd * Calculate the correct 64 bit TSF given 529*e60c4fc2SAdrian Chadd * the TSF64 register value and rs_tstamp. 530*e60c4fc2SAdrian Chadd */ 531*e60c4fc2SAdrian Chadd rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf); 532*e60c4fc2SAdrian Chadd 533*e60c4fc2SAdrian Chadd /* These aren't specifically errors */ 534*e60c4fc2SAdrian Chadd #ifdef AH_SUPPORT_AR5416 535*e60c4fc2SAdrian Chadd if (rs->rs_flags & HAL_RX_GI) 536*e60c4fc2SAdrian Chadd sc->sc_stats.ast_rx_halfgi++; 537*e60c4fc2SAdrian Chadd if (rs->rs_flags & HAL_RX_2040) 538*e60c4fc2SAdrian Chadd sc->sc_stats.ast_rx_2040++; 539*e60c4fc2SAdrian Chadd if (rs->rs_flags & HAL_RX_DELIM_CRC_PRE) 540*e60c4fc2SAdrian Chadd sc->sc_stats.ast_rx_pre_crc_err++; 541*e60c4fc2SAdrian Chadd if (rs->rs_flags & HAL_RX_DELIM_CRC_POST) 542*e60c4fc2SAdrian Chadd sc->sc_stats.ast_rx_post_crc_err++; 543*e60c4fc2SAdrian Chadd if (rs->rs_flags & HAL_RX_DECRYPT_BUSY) 544*e60c4fc2SAdrian Chadd sc->sc_stats.ast_rx_decrypt_busy_err++; 545*e60c4fc2SAdrian Chadd if (rs->rs_flags & HAL_RX_HI_RX_CHAIN) 546*e60c4fc2SAdrian Chadd sc->sc_stats.ast_rx_hi_rx_chain++; 547*e60c4fc2SAdrian Chadd #endif /* AH_SUPPORT_AR5416 */ 548*e60c4fc2SAdrian Chadd 549*e60c4fc2SAdrian Chadd if (rs->rs_status != 0) { 550*e60c4fc2SAdrian Chadd if (rs->rs_status & HAL_RXERR_CRC) 551*e60c4fc2SAdrian Chadd sc->sc_stats.ast_rx_crcerr++; 552*e60c4fc2SAdrian Chadd if (rs->rs_status & HAL_RXERR_FIFO) 553*e60c4fc2SAdrian Chadd sc->sc_stats.ast_rx_fifoerr++; 554*e60c4fc2SAdrian Chadd if (rs->rs_status & HAL_RXERR_PHY) { 555*e60c4fc2SAdrian Chadd sc->sc_stats.ast_rx_phyerr++; 556*e60c4fc2SAdrian Chadd /* Process DFS radar events */ 557*e60c4fc2SAdrian Chadd if ((rs->rs_phyerr == HAL_PHYERR_RADAR) || 558*e60c4fc2SAdrian Chadd (rs->rs_phyerr == HAL_PHYERR_FALSE_RADAR_EXT)) { 559*e60c4fc2SAdrian Chadd /* Since we're touching the frame data, sync it */ 560*e60c4fc2SAdrian Chadd bus_dmamap_sync(sc->sc_dmat, 561*e60c4fc2SAdrian Chadd bf->bf_dmamap, 562*e60c4fc2SAdrian Chadd BUS_DMASYNC_POSTREAD); 563*e60c4fc2SAdrian Chadd /* Now pass it to the radar processing code */ 564*e60c4fc2SAdrian Chadd ath_dfs_process_phy_err(sc, mtod(m, char *), rstamp, rs); 565*e60c4fc2SAdrian Chadd } 566*e60c4fc2SAdrian Chadd 567*e60c4fc2SAdrian Chadd /* Be suitably paranoid about receiving phy errors out of the stats array bounds */ 568*e60c4fc2SAdrian Chadd if (rs->rs_phyerr < 64) 569*e60c4fc2SAdrian Chadd sc->sc_stats.ast_rx_phy[rs->rs_phyerr]++; 570*e60c4fc2SAdrian Chadd goto rx_error; /* NB: don't count in ierrors */ 571*e60c4fc2SAdrian Chadd } 572*e60c4fc2SAdrian Chadd if (rs->rs_status & HAL_RXERR_DECRYPT) { 573*e60c4fc2SAdrian Chadd /* 574*e60c4fc2SAdrian Chadd * Decrypt error. If the error occurred 575*e60c4fc2SAdrian Chadd * because there was no hardware key, then 576*e60c4fc2SAdrian Chadd * let the frame through so the upper layers 577*e60c4fc2SAdrian Chadd * can process it. This is necessary for 5210 578*e60c4fc2SAdrian Chadd * parts which have no way to setup a ``clear'' 579*e60c4fc2SAdrian Chadd * key cache entry. 580*e60c4fc2SAdrian Chadd * 581*e60c4fc2SAdrian Chadd * XXX do key cache faulting 582*e60c4fc2SAdrian Chadd */ 583*e60c4fc2SAdrian Chadd if (rs->rs_keyix == HAL_RXKEYIX_INVALID) 584*e60c4fc2SAdrian Chadd goto rx_accept; 585*e60c4fc2SAdrian Chadd sc->sc_stats.ast_rx_badcrypt++; 586*e60c4fc2SAdrian Chadd } 587*e60c4fc2SAdrian Chadd if (rs->rs_status & HAL_RXERR_MIC) { 588*e60c4fc2SAdrian Chadd sc->sc_stats.ast_rx_badmic++; 589*e60c4fc2SAdrian Chadd /* 590*e60c4fc2SAdrian Chadd * Do minimal work required to hand off 591*e60c4fc2SAdrian Chadd * the 802.11 header for notification. 592*e60c4fc2SAdrian Chadd */ 593*e60c4fc2SAdrian Chadd /* XXX frag's and qos frames */ 594*e60c4fc2SAdrian Chadd len = rs->rs_datalen; 595*e60c4fc2SAdrian Chadd if (len >= sizeof (struct ieee80211_frame)) { 596*e60c4fc2SAdrian Chadd bus_dmamap_sync(sc->sc_dmat, 597*e60c4fc2SAdrian Chadd bf->bf_dmamap, 598*e60c4fc2SAdrian Chadd BUS_DMASYNC_POSTREAD); 599*e60c4fc2SAdrian Chadd ath_handle_micerror(ic, 600*e60c4fc2SAdrian Chadd mtod(m, struct ieee80211_frame *), 601*e60c4fc2SAdrian Chadd sc->sc_splitmic ? 602*e60c4fc2SAdrian Chadd rs->rs_keyix-32 : rs->rs_keyix); 603*e60c4fc2SAdrian Chadd } 604*e60c4fc2SAdrian Chadd } 605*e60c4fc2SAdrian Chadd ifp->if_ierrors++; 606*e60c4fc2SAdrian Chadd rx_error: 607*e60c4fc2SAdrian Chadd /* 608*e60c4fc2SAdrian Chadd * Cleanup any pending partial frame. 609*e60c4fc2SAdrian Chadd */ 610*e60c4fc2SAdrian Chadd if (sc->sc_rxpending != NULL) { 611*e60c4fc2SAdrian Chadd m_freem(sc->sc_rxpending); 612*e60c4fc2SAdrian Chadd sc->sc_rxpending = NULL; 613*e60c4fc2SAdrian Chadd } 614*e60c4fc2SAdrian Chadd /* 615*e60c4fc2SAdrian Chadd * When a tap is present pass error frames 616*e60c4fc2SAdrian Chadd * that have been requested. By default we 617*e60c4fc2SAdrian Chadd * pass decrypt+mic errors but others may be 618*e60c4fc2SAdrian Chadd * interesting (e.g. crc). 619*e60c4fc2SAdrian Chadd */ 620*e60c4fc2SAdrian Chadd if (ieee80211_radiotap_active(ic) && 621*e60c4fc2SAdrian Chadd (rs->rs_status & sc->sc_monpass)) { 622*e60c4fc2SAdrian Chadd bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 623*e60c4fc2SAdrian Chadd BUS_DMASYNC_POSTREAD); 624*e60c4fc2SAdrian Chadd /* NB: bpf needs the mbuf length setup */ 625*e60c4fc2SAdrian Chadd len = rs->rs_datalen; 626*e60c4fc2SAdrian Chadd m->m_pkthdr.len = m->m_len = len; 627*e60c4fc2SAdrian Chadd bf->bf_m = NULL; 628*e60c4fc2SAdrian Chadd ath_rx_tap(ifp, m, rs, rstamp, nf); 629*e60c4fc2SAdrian Chadd ieee80211_radiotap_rx_all(ic, m); 630*e60c4fc2SAdrian Chadd m_freem(m); 631*e60c4fc2SAdrian Chadd } 632*e60c4fc2SAdrian Chadd /* XXX pass MIC errors up for s/w reclaculation */ 633*e60c4fc2SAdrian Chadd goto rx_next; 634*e60c4fc2SAdrian Chadd } 635*e60c4fc2SAdrian Chadd rx_accept: 636*e60c4fc2SAdrian Chadd /* 637*e60c4fc2SAdrian Chadd * Sync and unmap the frame. At this point we're 638*e60c4fc2SAdrian Chadd * committed to passing the mbuf somewhere so clear 639*e60c4fc2SAdrian Chadd * bf_m; this means a new mbuf must be allocated 640*e60c4fc2SAdrian Chadd * when the rx descriptor is setup again to receive 641*e60c4fc2SAdrian Chadd * another frame. 642*e60c4fc2SAdrian Chadd */ 643*e60c4fc2SAdrian Chadd bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 644*e60c4fc2SAdrian Chadd BUS_DMASYNC_POSTREAD); 645*e60c4fc2SAdrian Chadd bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 646*e60c4fc2SAdrian Chadd bf->bf_m = NULL; 647*e60c4fc2SAdrian Chadd 648*e60c4fc2SAdrian Chadd len = rs->rs_datalen; 649*e60c4fc2SAdrian Chadd m->m_len = len; 650*e60c4fc2SAdrian Chadd 651*e60c4fc2SAdrian Chadd if (rs->rs_more) { 652*e60c4fc2SAdrian Chadd /* 653*e60c4fc2SAdrian Chadd * Frame spans multiple descriptors; save 654*e60c4fc2SAdrian Chadd * it for the next completed descriptor, it 655*e60c4fc2SAdrian Chadd * will be used to construct a jumbogram. 656*e60c4fc2SAdrian Chadd */ 657*e60c4fc2SAdrian Chadd if (sc->sc_rxpending != NULL) { 658*e60c4fc2SAdrian Chadd /* NB: max frame size is currently 2 clusters */ 659*e60c4fc2SAdrian Chadd sc->sc_stats.ast_rx_toobig++; 660*e60c4fc2SAdrian Chadd m_freem(sc->sc_rxpending); 661*e60c4fc2SAdrian Chadd } 662*e60c4fc2SAdrian Chadd m->m_pkthdr.rcvif = ifp; 663*e60c4fc2SAdrian Chadd m->m_pkthdr.len = len; 664*e60c4fc2SAdrian Chadd sc->sc_rxpending = m; 665*e60c4fc2SAdrian Chadd goto rx_next; 666*e60c4fc2SAdrian Chadd } else if (sc->sc_rxpending != NULL) { 667*e60c4fc2SAdrian Chadd /* 668*e60c4fc2SAdrian Chadd * This is the second part of a jumbogram, 669*e60c4fc2SAdrian Chadd * chain it to the first mbuf, adjust the 670*e60c4fc2SAdrian Chadd * frame length, and clear the rxpending state. 671*e60c4fc2SAdrian Chadd */ 672*e60c4fc2SAdrian Chadd sc->sc_rxpending->m_next = m; 673*e60c4fc2SAdrian Chadd sc->sc_rxpending->m_pkthdr.len += len; 674*e60c4fc2SAdrian Chadd m = sc->sc_rxpending; 675*e60c4fc2SAdrian Chadd sc->sc_rxpending = NULL; 676*e60c4fc2SAdrian Chadd } else { 677*e60c4fc2SAdrian Chadd /* 678*e60c4fc2SAdrian Chadd * Normal single-descriptor receive; setup 679*e60c4fc2SAdrian Chadd * the rcvif and packet length. 680*e60c4fc2SAdrian Chadd */ 681*e60c4fc2SAdrian Chadd m->m_pkthdr.rcvif = ifp; 682*e60c4fc2SAdrian Chadd m->m_pkthdr.len = len; 683*e60c4fc2SAdrian Chadd } 684*e60c4fc2SAdrian Chadd 685*e60c4fc2SAdrian Chadd /* 686*e60c4fc2SAdrian Chadd * Validate rs->rs_antenna. 687*e60c4fc2SAdrian Chadd * 688*e60c4fc2SAdrian Chadd * Some users w/ AR9285 NICs have reported crashes 689*e60c4fc2SAdrian Chadd * here because rs_antenna field is bogusly large. 690*e60c4fc2SAdrian Chadd * Let's enforce the maximum antenna limit of 8 691*e60c4fc2SAdrian Chadd * (and it shouldn't be hard coded, but that's a 692*e60c4fc2SAdrian Chadd * separate problem) and if there's an issue, print 693*e60c4fc2SAdrian Chadd * out an error and adjust rs_antenna to something 694*e60c4fc2SAdrian Chadd * sensible. 695*e60c4fc2SAdrian Chadd * 696*e60c4fc2SAdrian Chadd * This code should be removed once the actual 697*e60c4fc2SAdrian Chadd * root cause of the issue has been identified. 698*e60c4fc2SAdrian Chadd * For example, it may be that the rs_antenna 699*e60c4fc2SAdrian Chadd * field is only valid for the lsat frame of 700*e60c4fc2SAdrian Chadd * an aggregate and it just happens that it is 701*e60c4fc2SAdrian Chadd * "mostly" right. (This is a general statement - 702*e60c4fc2SAdrian Chadd * the majority of the statistics are only valid 703*e60c4fc2SAdrian Chadd * for the last frame in an aggregate. 704*e60c4fc2SAdrian Chadd */ 705*e60c4fc2SAdrian Chadd if (rs->rs_antenna > 7) { 706*e60c4fc2SAdrian Chadd device_printf(sc->sc_dev, "%s: rs_antenna > 7 (%d)\n", 707*e60c4fc2SAdrian Chadd __func__, rs->rs_antenna); 708*e60c4fc2SAdrian Chadd #ifdef ATH_DEBUG 709*e60c4fc2SAdrian Chadd ath_printrxbuf(sc, bf, 0, status == HAL_OK); 710*e60c4fc2SAdrian Chadd #endif /* ATH_DEBUG */ 711*e60c4fc2SAdrian Chadd rs->rs_antenna = 0; /* XXX better than nothing */ 712*e60c4fc2SAdrian Chadd } 713*e60c4fc2SAdrian Chadd 714*e60c4fc2SAdrian Chadd ifp->if_ipackets++; 715*e60c4fc2SAdrian Chadd sc->sc_stats.ast_ant_rx[rs->rs_antenna]++; 716*e60c4fc2SAdrian Chadd 717*e60c4fc2SAdrian Chadd /* 718*e60c4fc2SAdrian Chadd * Populate the rx status block. When there are bpf 719*e60c4fc2SAdrian Chadd * listeners we do the additional work to provide 720*e60c4fc2SAdrian Chadd * complete status. Otherwise we fill in only the 721*e60c4fc2SAdrian Chadd * material required by ieee80211_input. Note that 722*e60c4fc2SAdrian Chadd * noise setting is filled in above. 723*e60c4fc2SAdrian Chadd */ 724*e60c4fc2SAdrian Chadd if (ieee80211_radiotap_active(ic)) 725*e60c4fc2SAdrian Chadd ath_rx_tap(ifp, m, rs, rstamp, nf); 726*e60c4fc2SAdrian Chadd 727*e60c4fc2SAdrian Chadd /* 728*e60c4fc2SAdrian Chadd * From this point on we assume the frame is at least 729*e60c4fc2SAdrian Chadd * as large as ieee80211_frame_min; verify that. 730*e60c4fc2SAdrian Chadd */ 731*e60c4fc2SAdrian Chadd if (len < IEEE80211_MIN_LEN) { 732*e60c4fc2SAdrian Chadd if (!ieee80211_radiotap_active(ic)) { 733*e60c4fc2SAdrian Chadd DPRINTF(sc, ATH_DEBUG_RECV, 734*e60c4fc2SAdrian Chadd "%s: short packet %d\n", __func__, len); 735*e60c4fc2SAdrian Chadd sc->sc_stats.ast_rx_tooshort++; 736*e60c4fc2SAdrian Chadd } else { 737*e60c4fc2SAdrian Chadd /* NB: in particular this captures ack's */ 738*e60c4fc2SAdrian Chadd ieee80211_radiotap_rx_all(ic, m); 739*e60c4fc2SAdrian Chadd } 740*e60c4fc2SAdrian Chadd m_freem(m); 741*e60c4fc2SAdrian Chadd goto rx_next; 742*e60c4fc2SAdrian Chadd } 743*e60c4fc2SAdrian Chadd 744*e60c4fc2SAdrian Chadd if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) { 745*e60c4fc2SAdrian Chadd const HAL_RATE_TABLE *rt = sc->sc_currates; 746*e60c4fc2SAdrian Chadd uint8_t rix = rt->rateCodeToIndex[rs->rs_rate]; 747*e60c4fc2SAdrian Chadd 748*e60c4fc2SAdrian Chadd ieee80211_dump_pkt(ic, mtod(m, caddr_t), len, 749*e60c4fc2SAdrian Chadd sc->sc_hwmap[rix].ieeerate, rs->rs_rssi); 750*e60c4fc2SAdrian Chadd } 751*e60c4fc2SAdrian Chadd 752*e60c4fc2SAdrian Chadd m_adj(m, -IEEE80211_CRC_LEN); 753*e60c4fc2SAdrian Chadd 754*e60c4fc2SAdrian Chadd /* 755*e60c4fc2SAdrian Chadd * Locate the node for sender, track state, and then 756*e60c4fc2SAdrian Chadd * pass the (referenced) node up to the 802.11 layer 757*e60c4fc2SAdrian Chadd * for its use. 758*e60c4fc2SAdrian Chadd */ 759*e60c4fc2SAdrian Chadd ni = ieee80211_find_rxnode_withkey(ic, 760*e60c4fc2SAdrian Chadd mtod(m, const struct ieee80211_frame_min *), 761*e60c4fc2SAdrian Chadd rs->rs_keyix == HAL_RXKEYIX_INVALID ? 762*e60c4fc2SAdrian Chadd IEEE80211_KEYIX_NONE : rs->rs_keyix); 763*e60c4fc2SAdrian Chadd sc->sc_lastrs = rs; 764*e60c4fc2SAdrian Chadd 765*e60c4fc2SAdrian Chadd #ifdef AH_SUPPORT_AR5416 766*e60c4fc2SAdrian Chadd if (rs->rs_isaggr) 767*e60c4fc2SAdrian Chadd sc->sc_stats.ast_rx_agg++; 768*e60c4fc2SAdrian Chadd #endif /* AH_SUPPORT_AR5416 */ 769*e60c4fc2SAdrian Chadd 770*e60c4fc2SAdrian Chadd if (ni != NULL) { 771*e60c4fc2SAdrian Chadd /* 772*e60c4fc2SAdrian Chadd * Only punt packets for ampdu reorder processing for 773*e60c4fc2SAdrian Chadd * 11n nodes; net80211 enforces that M_AMPDU is only 774*e60c4fc2SAdrian Chadd * set for 11n nodes. 775*e60c4fc2SAdrian Chadd */ 776*e60c4fc2SAdrian Chadd if (ni->ni_flags & IEEE80211_NODE_HT) 777*e60c4fc2SAdrian Chadd m->m_flags |= M_AMPDU; 778*e60c4fc2SAdrian Chadd 779*e60c4fc2SAdrian Chadd /* 780*e60c4fc2SAdrian Chadd * Sending station is known, dispatch directly. 781*e60c4fc2SAdrian Chadd */ 782*e60c4fc2SAdrian Chadd type = ieee80211_input(ni, m, rs->rs_rssi, nf); 783*e60c4fc2SAdrian Chadd ieee80211_free_node(ni); 784*e60c4fc2SAdrian Chadd /* 785*e60c4fc2SAdrian Chadd * Arrange to update the last rx timestamp only for 786*e60c4fc2SAdrian Chadd * frames from our ap when operating in station mode. 787*e60c4fc2SAdrian Chadd * This assumes the rx key is always setup when 788*e60c4fc2SAdrian Chadd * associated. 789*e60c4fc2SAdrian Chadd */ 790*e60c4fc2SAdrian Chadd if (ic->ic_opmode == IEEE80211_M_STA && 791*e60c4fc2SAdrian Chadd rs->rs_keyix != HAL_RXKEYIX_INVALID) 792*e60c4fc2SAdrian Chadd ngood++; 793*e60c4fc2SAdrian Chadd } else { 794*e60c4fc2SAdrian Chadd type = ieee80211_input_all(ic, m, rs->rs_rssi, nf); 795*e60c4fc2SAdrian Chadd } 796*e60c4fc2SAdrian Chadd /* 797*e60c4fc2SAdrian Chadd * Track rx rssi and do any rx antenna management. 798*e60c4fc2SAdrian Chadd */ 799*e60c4fc2SAdrian Chadd ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi); 800*e60c4fc2SAdrian Chadd if (sc->sc_diversity) { 801*e60c4fc2SAdrian Chadd /* 802*e60c4fc2SAdrian Chadd * When using fast diversity, change the default rx 803*e60c4fc2SAdrian Chadd * antenna if diversity chooses the other antenna 3 804*e60c4fc2SAdrian Chadd * times in a row. 805*e60c4fc2SAdrian Chadd */ 806*e60c4fc2SAdrian Chadd if (sc->sc_defant != rs->rs_antenna) { 807*e60c4fc2SAdrian Chadd if (++sc->sc_rxotherant >= 3) 808*e60c4fc2SAdrian Chadd ath_setdefantenna(sc, rs->rs_antenna); 809*e60c4fc2SAdrian Chadd } else 810*e60c4fc2SAdrian Chadd sc->sc_rxotherant = 0; 811*e60c4fc2SAdrian Chadd } 812*e60c4fc2SAdrian Chadd 813*e60c4fc2SAdrian Chadd /* Newer school diversity - kite specific for now */ 814*e60c4fc2SAdrian Chadd /* XXX perhaps migrate the normal diversity code to this? */ 815*e60c4fc2SAdrian Chadd if ((ah)->ah_rxAntCombDiversity) 816*e60c4fc2SAdrian Chadd (*(ah)->ah_rxAntCombDiversity)(ah, rs, ticks, hz); 817*e60c4fc2SAdrian Chadd 818*e60c4fc2SAdrian Chadd if (sc->sc_softled) { 819*e60c4fc2SAdrian Chadd /* 820*e60c4fc2SAdrian Chadd * Blink for any data frame. Otherwise do a 821*e60c4fc2SAdrian Chadd * heartbeat-style blink when idle. The latter 822*e60c4fc2SAdrian Chadd * is mainly for station mode where we depend on 823*e60c4fc2SAdrian Chadd * periodic beacon frames to trigger the poll event. 824*e60c4fc2SAdrian Chadd */ 825*e60c4fc2SAdrian Chadd if (type == IEEE80211_FC0_TYPE_DATA) { 826*e60c4fc2SAdrian Chadd const HAL_RATE_TABLE *rt = sc->sc_currates; 827*e60c4fc2SAdrian Chadd ath_led_event(sc, 828*e60c4fc2SAdrian Chadd rt->rateCodeToIndex[rs->rs_rate]); 829*e60c4fc2SAdrian Chadd } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle) 830*e60c4fc2SAdrian Chadd ath_led_event(sc, 0); 831*e60c4fc2SAdrian Chadd } 832*e60c4fc2SAdrian Chadd rx_next: 833*e60c4fc2SAdrian Chadd TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 834*e60c4fc2SAdrian Chadd } while (ath_rxbuf_init(sc, bf) == 0); 835*e60c4fc2SAdrian Chadd 836*e60c4fc2SAdrian Chadd /* rx signal state monitoring */ 837*e60c4fc2SAdrian Chadd ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); 838*e60c4fc2SAdrian Chadd if (ngood) 839*e60c4fc2SAdrian Chadd sc->sc_lastrx = tsf; 840*e60c4fc2SAdrian Chadd 841*e60c4fc2SAdrian Chadd CTR2(ATH_KTR_INTR, "ath_rx_proc: npkts=%d, ngood=%d", npkts, ngood); 842*e60c4fc2SAdrian Chadd /* Queue DFS tasklet if needed */ 843*e60c4fc2SAdrian Chadd if (resched && ath_dfs_tasklet_needed(sc, sc->sc_curchan)) 844*e60c4fc2SAdrian Chadd taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); 845*e60c4fc2SAdrian Chadd 846*e60c4fc2SAdrian Chadd /* 847*e60c4fc2SAdrian Chadd * Now that all the RX frames were handled that 848*e60c4fc2SAdrian Chadd * need to be handled, kick the PCU if there's 849*e60c4fc2SAdrian Chadd * been an RXEOL condition. 850*e60c4fc2SAdrian Chadd */ 851*e60c4fc2SAdrian Chadd ATH_PCU_LOCK(sc); 852*e60c4fc2SAdrian Chadd if (resched && sc->sc_kickpcu) { 853*e60c4fc2SAdrian Chadd CTR0(ATH_KTR_ERR, "ath_rx_proc: kickpcu"); 854*e60c4fc2SAdrian Chadd device_printf(sc->sc_dev, "%s: kickpcu; handled %d packets\n", 855*e60c4fc2SAdrian Chadd __func__, npkts); 856*e60c4fc2SAdrian Chadd 857*e60c4fc2SAdrian Chadd /* XXX rxslink? */ 858*e60c4fc2SAdrian Chadd /* 859*e60c4fc2SAdrian Chadd * XXX can we hold the PCU lock here? 860*e60c4fc2SAdrian Chadd * Are there any net80211 buffer calls involved? 861*e60c4fc2SAdrian Chadd */ 862*e60c4fc2SAdrian Chadd bf = TAILQ_FIRST(&sc->sc_rxbuf); 863*e60c4fc2SAdrian Chadd ath_hal_putrxbuf(ah, bf->bf_daddr); 864*e60c4fc2SAdrian Chadd ath_hal_rxena(ah); /* enable recv descriptors */ 865*e60c4fc2SAdrian Chadd ath_mode_init(sc); /* set filters, etc. */ 866*e60c4fc2SAdrian Chadd ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 867*e60c4fc2SAdrian Chadd 868*e60c4fc2SAdrian Chadd ath_hal_intrset(ah, sc->sc_imask); 869*e60c4fc2SAdrian Chadd sc->sc_kickpcu = 0; 870*e60c4fc2SAdrian Chadd } 871*e60c4fc2SAdrian Chadd ATH_PCU_UNLOCK(sc); 872*e60c4fc2SAdrian Chadd 873*e60c4fc2SAdrian Chadd /* XXX check this inside of IF_LOCK? */ 874*e60c4fc2SAdrian Chadd if (resched && (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 875*e60c4fc2SAdrian Chadd #ifdef IEEE80211_SUPPORT_SUPERG 876*e60c4fc2SAdrian Chadd ieee80211_ff_age_all(ic, 100); 877*e60c4fc2SAdrian Chadd #endif 878*e60c4fc2SAdrian Chadd if (!IFQ_IS_EMPTY(&ifp->if_snd)) 879*e60c4fc2SAdrian Chadd ath_start(ifp); 880*e60c4fc2SAdrian Chadd } 881*e60c4fc2SAdrian Chadd #undef PA2DESC 882*e60c4fc2SAdrian Chadd 883*e60c4fc2SAdrian Chadd ATH_PCU_LOCK(sc); 884*e60c4fc2SAdrian Chadd sc->sc_rxproc_cnt--; 885*e60c4fc2SAdrian Chadd ATH_PCU_UNLOCK(sc); 886*e60c4fc2SAdrian Chadd } 887*e60c4fc2SAdrian Chadd 888*e60c4fc2SAdrian Chadd /* 889*e60c4fc2SAdrian Chadd * Disable the receive h/w in preparation for a reset. 890*e60c4fc2SAdrian Chadd */ 891*e60c4fc2SAdrian Chadd void 892*e60c4fc2SAdrian Chadd ath_stoprecv(struct ath_softc *sc, int dodelay) 893*e60c4fc2SAdrian Chadd { 894*e60c4fc2SAdrian Chadd #define PA2DESC(_sc, _pa) \ 895*e60c4fc2SAdrian Chadd ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 896*e60c4fc2SAdrian Chadd ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 897*e60c4fc2SAdrian Chadd struct ath_hal *ah = sc->sc_ah; 898*e60c4fc2SAdrian Chadd 899*e60c4fc2SAdrian Chadd ath_hal_stoppcurecv(ah); /* disable PCU */ 900*e60c4fc2SAdrian Chadd ath_hal_setrxfilter(ah, 0); /* clear recv filter */ 901*e60c4fc2SAdrian Chadd ath_hal_stopdmarecv(ah); /* disable DMA engine */ 902*e60c4fc2SAdrian Chadd /* 903*e60c4fc2SAdrian Chadd * TODO: see if this particular DELAY() is required; it may be 904*e60c4fc2SAdrian Chadd * masking some missing FIFO flush or DMA sync. 905*e60c4fc2SAdrian Chadd */ 906*e60c4fc2SAdrian Chadd #if 0 907*e60c4fc2SAdrian Chadd if (dodelay) 908*e60c4fc2SAdrian Chadd #endif 909*e60c4fc2SAdrian Chadd DELAY(3000); /* 3ms is long enough for 1 frame */ 910*e60c4fc2SAdrian Chadd #ifdef ATH_DEBUG 911*e60c4fc2SAdrian Chadd if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) { 912*e60c4fc2SAdrian Chadd struct ath_buf *bf; 913*e60c4fc2SAdrian Chadd u_int ix; 914*e60c4fc2SAdrian Chadd 915*e60c4fc2SAdrian Chadd device_printf(sc->sc_dev, 916*e60c4fc2SAdrian Chadd "%s: rx queue %p, link %p\n", 917*e60c4fc2SAdrian Chadd __func__, 918*e60c4fc2SAdrian Chadd (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), 919*e60c4fc2SAdrian Chadd sc->sc_rxlink); 920*e60c4fc2SAdrian Chadd ix = 0; 921*e60c4fc2SAdrian Chadd TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 922*e60c4fc2SAdrian Chadd struct ath_desc *ds = bf->bf_desc; 923*e60c4fc2SAdrian Chadd struct ath_rx_status *rs = &bf->bf_status.ds_rxstat; 924*e60c4fc2SAdrian Chadd HAL_STATUS status = ath_hal_rxprocdesc(ah, ds, 925*e60c4fc2SAdrian Chadd bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 926*e60c4fc2SAdrian Chadd if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL)) 927*e60c4fc2SAdrian Chadd ath_printrxbuf(sc, bf, ix, status == HAL_OK); 928*e60c4fc2SAdrian Chadd ix++; 929*e60c4fc2SAdrian Chadd } 930*e60c4fc2SAdrian Chadd } 931*e60c4fc2SAdrian Chadd #endif 932*e60c4fc2SAdrian Chadd if (sc->sc_rxpending != NULL) { 933*e60c4fc2SAdrian Chadd m_freem(sc->sc_rxpending); 934*e60c4fc2SAdrian Chadd sc->sc_rxpending = NULL; 935*e60c4fc2SAdrian Chadd } 936*e60c4fc2SAdrian Chadd sc->sc_rxlink = NULL; /* just in case */ 937*e60c4fc2SAdrian Chadd #undef PA2DESC 938*e60c4fc2SAdrian Chadd } 939*e60c4fc2SAdrian Chadd 940*e60c4fc2SAdrian Chadd /* 941*e60c4fc2SAdrian Chadd * Enable the receive h/w following a reset. 942*e60c4fc2SAdrian Chadd */ 943*e60c4fc2SAdrian Chadd int 944*e60c4fc2SAdrian Chadd ath_startrecv(struct ath_softc *sc) 945*e60c4fc2SAdrian Chadd { 946*e60c4fc2SAdrian Chadd struct ath_hal *ah = sc->sc_ah; 947*e60c4fc2SAdrian Chadd struct ath_buf *bf; 948*e60c4fc2SAdrian Chadd 949*e60c4fc2SAdrian Chadd sc->sc_rxlink = NULL; 950*e60c4fc2SAdrian Chadd sc->sc_rxpending = NULL; 951*e60c4fc2SAdrian Chadd TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 952*e60c4fc2SAdrian Chadd int error = ath_rxbuf_init(sc, bf); 953*e60c4fc2SAdrian Chadd if (error != 0) { 954*e60c4fc2SAdrian Chadd DPRINTF(sc, ATH_DEBUG_RECV, 955*e60c4fc2SAdrian Chadd "%s: ath_rxbuf_init failed %d\n", 956*e60c4fc2SAdrian Chadd __func__, error); 957*e60c4fc2SAdrian Chadd return error; 958*e60c4fc2SAdrian Chadd } 959*e60c4fc2SAdrian Chadd } 960*e60c4fc2SAdrian Chadd 961*e60c4fc2SAdrian Chadd bf = TAILQ_FIRST(&sc->sc_rxbuf); 962*e60c4fc2SAdrian Chadd ath_hal_putrxbuf(ah, bf->bf_daddr); 963*e60c4fc2SAdrian Chadd ath_hal_rxena(ah); /* enable recv descriptors */ 964*e60c4fc2SAdrian Chadd ath_mode_init(sc); /* set filters, etc. */ 965*e60c4fc2SAdrian Chadd ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 966*e60c4fc2SAdrian Chadd return 0; 967*e60c4fc2SAdrian Chadd } 968