Lines Matching +full:mode +full:- +full:xxx

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
44 * by the driver - eg, calls to ath_hal_gettsf32().
101 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
128 * operating mode and state:
132 * to count and we need them for ANI (sta mode only until recently)
138 * o enable promiscuous mode
139 * - when in monitor mode
140 * - if interface marked PROMISC (assumes bridge setting is filtered)
142 * - when operating in station mode for collecting rssi data when
144 * - when operating in adhoc mode so the 802.11 layer creates
146 * - when scanning
147 * - when doing s/w beacon miss (e.g. for ap+sta)
148 * - when operating in ap mode in 11g to detect overlapping bss that
150 * - when operating in mesh mode to detect neighbors
152 * - when in monitor mode
153 * XXX HT protection for 11n
158 struct ieee80211com *ic = &sc->sc_ic; in ath_calcrxfilter()
162 if (!sc->sc_needmib && !sc->sc_scanning) in ath_calcrxfilter()
164 if (ic->ic_opmode != IEEE80211_M_STA) in ath_calcrxfilter()
166 /* XXX ic->ic_monvaps != 0? */ in ath_calcrxfilter()
167 if (ic->ic_opmode == IEEE80211_M_MONITOR || ic->ic_promisc > 0) in ath_calcrxfilter()
180 if (ic->ic_opmode == IEEE80211_M_IBSS || sc->sc_swbmiss) { in ath_calcrxfilter()
182 } else if (ic->ic_opmode == IEEE80211_M_STA) { in ath_calcrxfilter()
183 if (sc->sc_do_mybeacon && ! sc->sc_scanning) { in ath_calcrxfilter()
185 } else { /* scanning, non-mybeacon chips */ in ath_calcrxfilter()
195 if (ic->ic_opmode == IEEE80211_M_HOSTAP && in ath_calcrxfilter()
196 IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) in ath_calcrxfilter()
200 * Enable hardware PS-POLL RX only for hostap mode; in ath_calcrxfilter()
201 * STA mode sends PS-POLL frames but never in ath_calcrxfilter()
204 if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PSPOLL, in ath_calcrxfilter()
206 ic->ic_opmode == IEEE80211_M_HOSTAP) in ath_calcrxfilter()
209 if (sc->sc_nmeshvaps) { in ath_calcrxfilter()
211 if (sc->sc_hasbmatch) in ath_calcrxfilter()
216 if (ic->ic_opmode == IEEE80211_M_MONITOR) in ath_calcrxfilter()
221 * 802.11n. Required for A-MPDU. in ath_calcrxfilter()
223 if (IEEE80211_IS_CHAN_HT(ic->ic_curchan)) in ath_calcrxfilter()
230 if (sc->sc_dodfs) in ath_calcrxfilter()
237 if (sc->sc_dospectral) in ath_calcrxfilter()
241 __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode]); in ath_calcrxfilter()
248 struct ath_hal *ah = sc->sc_ah; in ath_legacy_rxbuf_init()
253 /* XXX TODO: ATH_RX_LOCK_ASSERT(sc); */ in ath_legacy_rxbuf_init()
255 m = bf->bf_m; in ath_legacy_rxbuf_init()
260 * this buffer be cache-line-aligned and sized to be in ath_legacy_rxbuf_init()
268 sc->sc_stats.ast_rx_nombuf++; in ath_legacy_rxbuf_init()
271 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; in ath_legacy_rxbuf_init()
273 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, in ath_legacy_rxbuf_init()
274 bf->bf_dmamap, m, in ath_legacy_rxbuf_init()
275 bf->bf_segs, &bf->bf_nseg, in ath_legacy_rxbuf_init()
281 sc->sc_stats.ast_rx_busdma++; in ath_legacy_rxbuf_init()
285 KASSERT(bf->bf_nseg == 1, in ath_legacy_rxbuf_init()
286 ("multi-segment packet; nseg %u", bf->bf_nseg)); in ath_legacy_rxbuf_init()
287 bf->bf_m = m; in ath_legacy_rxbuf_init()
289 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD); in ath_legacy_rxbuf_init()
293 * the descriptor list with a self-linked entry so we'll in ath_legacy_rxbuf_init()
297 * To insure the last descriptor is self-linked we create in ath_legacy_rxbuf_init()
298 * each descriptor as self-linked and add it to the end. As in ath_legacy_rxbuf_init()
299 * each additional descriptor is added the previous self-linked in ath_legacy_rxbuf_init()
302 * never remove/process the last, self-linked, entry on the in ath_legacy_rxbuf_init()
310 * to a sender if last desc is self-linked. in ath_legacy_rxbuf_init()
312 ds = bf->bf_desc; in ath_legacy_rxbuf_init()
313 if (sc->sc_rxslink) in ath_legacy_rxbuf_init()
314 ds->ds_link = bf->bf_daddr; /* link to self */ in ath_legacy_rxbuf_init()
316 ds->ds_link = 0; /* terminate the list */ in ath_legacy_rxbuf_init()
317 ds->ds_data = bf->bf_segs[0].ds_addr; in ath_legacy_rxbuf_init()
319 , m->m_len /* buffer size */ in ath_legacy_rxbuf_init()
323 if (sc->sc_rxlink != NULL) in ath_legacy_rxbuf_init()
324 *sc->sc_rxlink = bf->bf_daddr; in ath_legacy_rxbuf_init()
325 sc->sc_rxlink = &ds->ds_link; in ath_legacy_rxbuf_init()
337 struct ieee80211vap *vap = ni->ni_vap; in ath_recv_mgmt()
338 struct ath_softc *sc = vap->iv_ic->ic_softc; in ath_recv_mgmt()
347 tsf_beacon_old = ((uint64_t) le32dec(ni->ni_tstamp.data + 4)) << 32; in ath_recv_mgmt()
348 tsf_beacon_old |= le32dec(ni->ni_tstamp.data); in ath_recv_mgmt()
352 if (ni->ni_intval > 0) { in ath_recv_mgmt()
353 tsf_intval = TU_TO_TSF(ni->ni_intval); in ath_recv_mgmt()
361 ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rxs, rssi, nf); in ath_recv_mgmt()
365 * Always update the per-node beacon RSSI if we're hearing in ath_recv_mgmt()
368 ATH_RSSI_LPF(ATH_NODE(ni)->an_node_stats.ns_avgbrssi, rssi); in ath_recv_mgmt()
374 * In scan and IBSS mode we receive all beacons, in ath_recv_mgmt()
380 if ((vap->iv_opmode != IEEE80211_M_HOSTAP) && in ath_recv_mgmt()
381 IEEE80211_ADDR_EQ(ni->ni_bssid, vap->iv_bss->ni_bssid)) { in ath_recv_mgmt()
383 /* XXX unlocked check against vap->iv_bss? */ in ath_recv_mgmt()
384 ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi); in ath_recv_mgmt()
386 tsf_beacon = ((uint64_t) le32dec(ni->ni_tstamp.data + 4)) << 32; in ath_recv_mgmt()
387 tsf_beacon |= le32dec(ni->ni_tstamp.data); in ath_recv_mgmt()
389 nexttbtt = ath_hal_getnexttbtt(sc->sc_ah); in ath_recv_mgmt()
396 tsf_delta = (long long) tsf_beacon - (long long) tsf_beacon_old; in ath_recv_mgmt()
413 * The remainder using '%' is between 0 .. intval-1. in ath_recv_mgmt()
415 * will be some large number just under intval-1. in ath_recv_mgmt()
423 -(tsf_intval - ((tsf_beacon - tsf_beacon_old) % tsf_intval)); in ath_recv_mgmt()
425 tsf_remainder = (tsf_beacon - tsf_beacon_old) % tsf_intval; in ath_recv_mgmt()
444 ni->ni_bssid, ":", in ath_recv_mgmt()
445 vap->iv_bss->ni_bssid, ":", in ath_recv_mgmt()
450 (int32_t) tsf_beacon - (int32_t) nexttbtt + tsf_intval); in ath_recv_mgmt()
455 * may end up overwriting AP mode beacon config. in ath_recv_mgmt()
460 if (vap->iv_opmode == IEEE80211_M_STA && in ath_recv_mgmt()
461 sc->sc_syncbeacon && in ath_recv_mgmt()
462 (!sc->sc_swbmiss) && in ath_recv_mgmt()
463 ni == vap->iv_bss && in ath_recv_mgmt()
464 ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) && in ath_recv_mgmt()
465 (vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP)) { in ath_recv_mgmt()
474 sc->sc_syncbeacon = 0; in ath_recv_mgmt()
480 if (vap->iv_opmode == IEEE80211_M_IBSS && in ath_recv_mgmt()
481 vap->iv_state == IEEE80211_S_RUN && in ath_recv_mgmt()
483 uint32_t rstamp = sc->sc_lastrs->rs_tstamp; in ath_recv_mgmt()
485 ath_hal_gettsf64(sc->sc_ah)); in ath_recv_mgmt()
494 * RUN -> RUN when this happens. in ath_recv_mgmt()
496 if (le64toh(ni->ni_tstamp.tsf) >= tsf) { in ath_recv_mgmt()
500 (uintmax_t)ni->ni_tstamp.tsf); in ath_recv_mgmt()
515 sc->sc_rx_th.wr_ext_bitmap = htole32(1 << ATH_RADIOTAP_VENDOR_HEADER); in ath_rx_tap_vendor()
518 sc->sc_rx_th.wr_vh.vh_oui[0] = 0x7f; in ath_rx_tap_vendor()
519 sc->sc_rx_th.wr_vh.vh_oui[1] = 0x03; in ath_rx_tap_vendor()
520 sc->sc_rx_th.wr_vh.vh_oui[2] = 0x00; in ath_rx_tap_vendor()
522 /* XXX what should this be? */ in ath_rx_tap_vendor()
523 sc->sc_rx_th.wr_vh.vh_sub_ns = 0; in ath_rx_tap_vendor()
524 sc->sc_rx_th.wr_vh.vh_skip_len = in ath_rx_tap_vendor()
528 sc->sc_rx_th.wr_v.vh_version = 1; in ath_rx_tap_vendor()
530 sc->sc_rx_th.wr_v.vh_rx_chainmask = sc->sc_rxchainmask; in ath_rx_tap_vendor()
533 sc->sc_rx_th.wr_v.rssi_ctl[0] = rs->rs_rssi_ctl[0]; in ath_rx_tap_vendor()
534 sc->sc_rx_th.wr_v.rssi_ctl[1] = rs->rs_rssi_ctl[1]; in ath_rx_tap_vendor()
535 sc->sc_rx_th.wr_v.rssi_ctl[2] = rs->rs_rssi_ctl[2]; in ath_rx_tap_vendor()
536 sc->sc_rx_th.wr_v.rssi_ext[0] = rs->rs_rssi_ext[0]; in ath_rx_tap_vendor()
537 sc->sc_rx_th.wr_v.rssi_ext[1] = rs->rs_rssi_ext[1]; in ath_rx_tap_vendor()
538 sc->sc_rx_th.wr_v.rssi_ext[2] = rs->rs_rssi_ext[2]; in ath_rx_tap_vendor()
541 sc->sc_rx_th.wr_v.evm[0] = rs->rs_evm0; in ath_rx_tap_vendor()
542 sc->sc_rx_th.wr_v.evm[1] = rs->rs_evm1; in ath_rx_tap_vendor()
543 sc->sc_rx_th.wr_v.evm[2] = rs->rs_evm2; in ath_rx_tap_vendor()
545 sc->sc_rx_th.wr_v.evm[3] = rs->rs_evm3; in ath_rx_tap_vendor()
546 sc->sc_rx_th.wr_v.evm[4] = rs->rs_evm4; in ath_rx_tap_vendor()
549 sc->sc_rx_th.wr_v.vh_flags = ATH_VENDOR_PKT_RX; in ath_rx_tap_vendor()
552 sc->sc_rx_th.wr_v.vh_rx_hwrate = rs->rs_rate; in ath_rx_tap_vendor()
555 sc->sc_rx_th.wr_v.vh_rs_flags = rs->rs_flags; in ath_rx_tap_vendor()
557 if (rs->rs_isaggr) in ath_rx_tap_vendor()
558 sc->sc_rx_th.wr_v.vh_flags |= ATH_VENDOR_PKT_ISAGGR; in ath_rx_tap_vendor()
559 if (rs->rs_moreaggr) in ath_rx_tap_vendor()
560 sc->sc_rx_th.wr_v.vh_flags |= ATH_VENDOR_PKT_MOREAGGR; in ath_rx_tap_vendor()
563 if (rs->rs_status & HAL_RXERR_PHY) { in ath_rx_tap_vendor()
564 sc->sc_rx_th.wr_v.vh_phyerr_code = rs->rs_phyerr; in ath_rx_tap_vendor()
565 sc->sc_rx_th.wr_v.vh_flags |= ATH_VENDOR_PKT_RXPHYERR; in ath_rx_tap_vendor()
567 sc->sc_rx_th.wr_v.vh_phyerr_code = 0xff; in ath_rx_tap_vendor()
569 sc->sc_rx_th.wr_v.vh_rs_status = rs->rs_status; in ath_rx_tap_vendor()
570 sc->sc_rx_th.wr_v.vh_rssi = rs->rs_rssi; in ath_rx_tap_vendor()
585 rt = sc->sc_currates; in ath_rx_tap()
586 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); in ath_rx_tap()
587 rix = rt->rateCodeToIndex[rs->rs_rate]; in ath_rx_tap()
588 sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate; in ath_rx_tap()
589 sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags; in ath_rx_tap()
592 sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT; in ath_rx_tap()
593 if (rs->rs_status & HAL_RXERR_PHY) { in ath_rx_tap()
595 * PHY error - make sure the channel flags in ath_rx_tap()
599 if (IEEE80211_IS_CHAN_HT40U(sc->sc_curchan)) in ath_rx_tap()
600 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U; in ath_rx_tap()
601 else if (IEEE80211_IS_CHAN_HT40D(sc->sc_curchan)) in ath_rx_tap()
602 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D; in ath_rx_tap()
603 else if (IEEE80211_IS_CHAN_HT20(sc->sc_curchan)) in ath_rx_tap()
604 sc->sc_rx_th.wr_chan_flags |= CHAN_HT20; in ath_rx_tap()
605 } else if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) { /* HT rate */ in ath_rx_tap()
606 struct ieee80211com *ic = &sc->sc_ic; in ath_rx_tap()
608 if ((rs->rs_flags & HAL_RX_2040) == 0) in ath_rx_tap()
609 sc->sc_rx_th.wr_chan_flags |= CHAN_HT20; in ath_rx_tap()
610 else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan)) in ath_rx_tap()
611 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U; in ath_rx_tap()
613 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D; in ath_rx_tap()
615 if (rs->rs_flags & HAL_RX_GI) in ath_rx_tap()
616 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI; in ath_rx_tap()
619 sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(sc, rs->rs_tstamp, tsf)); in ath_rx_tap()
620 if (rs->rs_status & HAL_RXERR_CRC) in ath_rx_tap()
621 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; in ath_rx_tap()
622 /* XXX propagate other error flags from descriptor */ in ath_rx_tap()
623 sc->sc_rx_th.wr_antnoise = nf; in ath_rx_tap()
624 sc->sc_rx_th.wr_antsignal = nf + rs->rs_rssi; in ath_rx_tap()
625 sc->sc_rx_th.wr_antenna = rs->rs_antenna; in ath_rx_tap()
638 /* XXX recheck MIC to deal w/ chips that lie */ in ath_handle_micerror()
639 /* XXX discard MIC errors on !data frames */ in ath_handle_micerror()
642 ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix); in ath_handle_micerror()
650 * The mbuf must already be synced, unmapped and removed from bf->bf_m
653 * The mbuf must be consumed by this routine - either passed up the
662 /* XXX TODO: make this an mbuf tag? */ in ath_rx_pkt()
665 struct ieee80211com *ic = &sc->sc_ic; in ath_rx_pkt()
668 struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; in ath_rx_pkt()
674 rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf); in ath_rx_pkt()
676 /* 802.11 return codes - These aren't specifically errors */ in ath_rx_pkt()
677 if (rs->rs_flags & HAL_RX_GI) in ath_rx_pkt()
678 sc->sc_stats.ast_rx_halfgi++; in ath_rx_pkt()
679 if (rs->rs_flags & HAL_RX_2040) in ath_rx_pkt()
680 sc->sc_stats.ast_rx_2040++; in ath_rx_pkt()
681 if (rs->rs_flags & HAL_RX_DELIM_CRC_PRE) in ath_rx_pkt()
682 sc->sc_stats.ast_rx_pre_crc_err++; in ath_rx_pkt()
683 if (rs->rs_flags & HAL_RX_DELIM_CRC_POST) in ath_rx_pkt()
684 sc->sc_stats.ast_rx_post_crc_err++; in ath_rx_pkt()
685 if (rs->rs_flags & HAL_RX_DECRYPT_BUSY) in ath_rx_pkt()
686 sc->sc_stats.ast_rx_decrypt_busy_err++; in ath_rx_pkt()
687 if (rs->rs_flags & HAL_RX_HI_RX_CHAIN) in ath_rx_pkt()
688 sc->sc_stats.ast_rx_hi_rx_chain++; in ath_rx_pkt()
689 if (rs->rs_flags & HAL_RX_STBC) in ath_rx_pkt()
690 sc->sc_stats.ast_rx_stbc++; in ath_rx_pkt()
692 if (rs->rs_status != 0) { in ath_rx_pkt()
693 if (rs->rs_status & HAL_RXERR_CRC) in ath_rx_pkt()
694 sc->sc_stats.ast_rx_crcerr++; in ath_rx_pkt()
695 if (rs->rs_status & HAL_RXERR_FIFO) in ath_rx_pkt()
696 sc->sc_stats.ast_rx_fifoerr++; in ath_rx_pkt()
697 if (rs->rs_status & HAL_RXERR_PHY) { in ath_rx_pkt()
698 sc->sc_stats.ast_rx_phyerr++; in ath_rx_pkt()
700 if ((rs->rs_phyerr == HAL_PHYERR_RADAR) || in ath_rx_pkt()
701 (rs->rs_phyerr == HAL_PHYERR_FALSE_RADAR_EXT)) { in ath_rx_pkt()
710 if (rs->rs_phyerr < ATH_IOCTL_STATS_NUM_RX_PHYERR) in ath_rx_pkt()
711 sc->sc_stats.ast_rx_phy[rs->rs_phyerr]++; in ath_rx_pkt()
714 if (rs->rs_status & HAL_RXERR_DECRYPT) { in ath_rx_pkt()
723 * XXX do key cache faulting in ath_rx_pkt()
725 if (rs->rs_keyix == HAL_RXKEYIX_INVALID) in ath_rx_pkt()
727 sc->sc_stats.ast_rx_badcrypt++; in ath_rx_pkt()
730 * Similar as above - if the failure was a keymiss in ath_rx_pkt()
733 if (rs->rs_status & HAL_RXERR_KEYMISS) { in ath_rx_pkt()
734 sc->sc_stats.ast_rx_keymiss++; in ath_rx_pkt()
737 if (rs->rs_status & HAL_RXERR_MIC) { in ath_rx_pkt()
738 sc->sc_stats.ast_rx_badmic++; in ath_rx_pkt()
743 /* XXX frag's and qos frames */ in ath_rx_pkt()
744 len = rs->rs_datalen; in ath_rx_pkt()
748 sc->sc_splitmic ? in ath_rx_pkt()
749 rs->rs_keyix-32 : rs->rs_keyix); in ath_rx_pkt()
752 counter_u64_add(ic->ic_ierrors, 1); in ath_rx_pkt()
757 if (re->m_rxpending != NULL) { in ath_rx_pkt()
758 m_freem(re->m_rxpending); in ath_rx_pkt()
759 re->m_rxpending = NULL; in ath_rx_pkt()
768 (rs->rs_status & sc->sc_monpass)) { in ath_rx_pkt()
770 len = rs->rs_datalen; in ath_rx_pkt()
771 m->m_pkthdr.len = m->m_len = len; in ath_rx_pkt()
778 /* XXX pass MIC errors up for s/w reclaculation */ in ath_rx_pkt()
783 len = rs->rs_datalen; in ath_rx_pkt()
784 m->m_len = len; in ath_rx_pkt()
786 if (rs->rs_more) { in ath_rx_pkt()
792 if (re->m_rxpending != NULL) { in ath_rx_pkt()
794 sc->sc_stats.ast_rx_toobig++; in ath_rx_pkt()
795 m_freem(re->m_rxpending); in ath_rx_pkt()
797 m->m_pkthdr.len = len; in ath_rx_pkt()
798 re->m_rxpending = m; in ath_rx_pkt()
801 } else if (re->m_rxpending != NULL) { in ath_rx_pkt()
807 re->m_rxpending->m_next = m; in ath_rx_pkt()
808 re->m_rxpending->m_pkthdr.len += len; in ath_rx_pkt()
809 m = re->m_rxpending; in ath_rx_pkt()
810 re->m_rxpending = NULL; in ath_rx_pkt()
813 * Normal single-descriptor receive; setup packet length. in ath_rx_pkt()
815 m->m_pkthdr.len = len; in ath_rx_pkt()
819 * Validate rs->rs_antenna. in ath_rx_pkt()
834 * "mostly" right. (This is a general statement - in ath_rx_pkt()
838 if (rs->rs_antenna >= ATH_IOCTL_STATS_NUM_RX_ANTENNA) { in ath_rx_pkt()
839 device_printf(sc->sc_dev, "%s: rs_antenna > 7 (%d)\n", in ath_rx_pkt()
840 __func__, rs->rs_antenna); in ath_rx_pkt()
844 rs->rs_antenna = 0; /* XXX better than nothing */ in ath_rx_pkt()
862 if (sc->sc_rx_lnamixer) { in ath_rx_pkt()
863 rs->rs_antenna = 0; in ath_rx_pkt()
865 /* Bits 0:1 - the LNA configuration used */ in ath_rx_pkt()
866 rs->rs_antenna |= in ath_rx_pkt()
867 ((rs->rs_rssi_ctl[2] & HAL_RX_LNA_CFG_USED) in ath_rx_pkt()
870 /* Bit 2 - the external RX antenna switch */ in ath_rx_pkt()
871 if (rs->rs_rssi_ctl[2] & HAL_RX_LNA_EXTCFG) in ath_rx_pkt()
872 rs->rs_antenna |= 0x4; in ath_rx_pkt()
875 sc->sc_stats.ast_ant_rx[rs->rs_antenna]++; in ath_rx_pkt()
899 sc->sc_stats.ast_rx_tooshort++; in ath_rx_pkt()
909 const HAL_RATE_TABLE *rt = sc->sc_currates; in ath_rx_pkt()
910 uint8_t rix = rt->rateCodeToIndex[rs->rs_rate]; in ath_rx_pkt()
913 sc->sc_hwmap[rix].ieeerate, rs->rs_rssi); in ath_rx_pkt()
916 m_adj(m, -IEEE80211_CRC_LEN); in ath_rx_pkt()
925 rs->rs_keyix == HAL_RXKEYIX_INVALID ? in ath_rx_pkt()
926 IEEE80211_KEYIX_NONE : rs->rs_keyix); in ath_rx_pkt()
927 sc->sc_lastrs = rs; in ath_rx_pkt()
929 if (rs->rs_isaggr) in ath_rx_pkt()
930 sc->sc_stats.ast_rx_agg++; in ath_rx_pkt()
933 * Populate the per-chain RSSI values where appropriate. in ath_rx_pkt()
941 IEEE80211_R_TSF_START; /* XXX TODO: validate */ in ath_rx_pkt()
942 rxs.c_rssi = rs->rs_rssi; in ath_rx_pkt()
944 rxs.c_chain = 3; /* XXX TODO: check */ in ath_rx_pkt()
948 rxs.c_rssi_ctl[i] = rs->rs_rssi_ctl[i]; in ath_rx_pkt()
949 rxs.c_rssi_ext[i] = rs->rs_rssi_ext[i]; in ath_rx_pkt()
951 * XXX note: we currently don't track in ath_rx_pkt()
952 * per-chain noisefloor. in ath_rx_pkt()
964 if (ni->ni_flags & IEEE80211_NODE_HT) in ath_rx_pkt()
965 m->m_flags |= M_AMPDU; in ath_rx_pkt()
977 ATH_RSSI_LPF(ATH_NODE(ni)->an_node_stats.ns_avgrssi, in ath_rx_pkt()
978 rs->rs_rssi); in ath_rx_pkt()
980 ATH_RSSI(ATH_NODE(ni)->an_node_stats.ns_avgrssi)); in ath_rx_pkt()
991 * frames from our ap when operating in station mode. in ath_rx_pkt()
995 if (ic->ic_opmode == IEEE80211_M_STA && in ath_rx_pkt()
996 rs->rs_keyix != HAL_RXKEYIX_INVALID) in ath_rx_pkt()
1012 ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi); in ath_rx_pkt()
1013 if (sc->sc_diversity) { in ath_rx_pkt()
1019 if (sc->sc_defant != rs->rs_antenna) { in ath_rx_pkt()
1020 if (++sc->sc_rxotherant >= 3) in ath_rx_pkt()
1021 ath_setdefantenna(sc, rs->rs_antenna); in ath_rx_pkt()
1023 sc->sc_rxotherant = 0; in ath_rx_pkt()
1027 if (sc->sc_dolnadiv) { in ath_rx_pkt()
1031 if (sc->sc_softled) { in ath_rx_pkt()
1034 * heartbeat-style blink when idle. The latter in ath_rx_pkt()
1035 * is mainly for station mode where we depend on in ath_rx_pkt()
1039 const HAL_RATE_TABLE *rt = sc->sc_currates; in ath_rx_pkt()
1041 rt->rateCodeToIndex[rs->rs_rate]); in ath_rx_pkt()
1042 } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle) in ath_rx_pkt()
1047 * Debugging - complain if we didn't NULL the mbuf pointer in ath_rx_pkt()
1051 device_printf(sc->sc_dev, in ath_rx_pkt()
1062 * XXX TODO: break out the "get buffers" from "call ath_rx_pkt()" like
1065 * XXX TODO: then, do all of the RX list management stuff inside
1073 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ in ath_rx_proc()
1074 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) in ath_rx_proc()
1076 struct ath_hal *ah = sc->sc_ah; in ath_rx_proc()
1078 struct ieee80211com *ic = &sc->sc_ic; in ath_rx_proc()
1091 /* XXX we must not hold the ATH_LOCK here */ in ath_rx_proc()
1096 sc->sc_rxproc_cnt++; in ath_rx_proc()
1097 kickpcu = sc->sc_kickpcu; in ath_rx_proc()
1106 nf = ath_hal_getchannoise(ah, sc->sc_curchan); in ath_rx_proc()
1107 sc->sc_stats.ast_rx_noise = nf; in ath_rx_proc()
1112 * TX thread time to also run - otherwise the TX in ath_rx_proc()
1119 bf = TAILQ_FIRST(&sc->sc_rxbuf); in ath_rx_proc()
1120 if (sc->sc_rxslink && bf == NULL) { /* NB: shouldn't happen */ in ath_rx_proc()
1121 device_printf(sc->sc_dev, "%s: no buffer!\n", __func__); in ath_rx_proc()
1126 * this can happen for non-self-linked RX chains in ath_rx_proc()
1128 sc->sc_stats.ast_rx_hitqueueend++; in ath_rx_proc()
1131 m = bf->bf_m; in ath_rx_proc()
1135 * will be no mbuf; try again to re-populate it. in ath_rx_proc()
1137 /* XXX make debug msg */ in ath_rx_proc()
1138 device_printf(sc->sc_dev, "%s: no mbuf!\n", __func__); in ath_rx_proc()
1139 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); in ath_rx_proc()
1142 ds = bf->bf_desc; in ath_rx_proc()
1143 if (ds->ds_link == bf->bf_daddr) { in ath_rx_proc()
1144 /* NB: never process the self-linked entry at the end */ in ath_rx_proc()
1145 sc->sc_stats.ast_rx_hitqueueend++; in ath_rx_proc()
1148 /* XXX sync descriptor memory */ in ath_rx_proc()
1158 * a self-linked list to avoid rx overruns. in ath_rx_proc()
1160 rs = &bf->bf_status.ds_rxstat; in ath_rx_proc()
1162 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); in ath_rx_proc()
1164 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) in ath_rx_proc()
1169 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS)) in ath_rx_proc()
1170 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS, in ath_rx_proc()
1171 sc->sc_rx_statuslen, (char *) ds); in ath_rx_proc()
1177 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); in ath_rx_proc()
1183 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTREAD); in ath_rx_proc()
1184 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); in ath_rx_proc()
1185 bf->bf_m = NULL; in ath_rx_proc()
1195 if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf != NULL) { in ath_rx_proc()
1196 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, in ath_rx_proc()
1197 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf, in ath_rx_proc()
1200 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf); in ath_rx_proc()
1207 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf = bf; in ath_rx_proc()
1211 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); in ath_rx_proc()
1213 sc->sc_lastrx = tsf; in ath_rx_proc()
1217 if (resched && ath_dfs_tasklet_needed(sc, sc->sc_curchan)) in ath_rx_proc()
1218 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); in ath_rx_proc()
1228 device_printf(sc->sc_dev, "%s: kickpcu; handled %d packets\n", in ath_rx_proc()
1241 * XXX Has RX DMA stopped enough here to just call in ath_rx_proc()
1243 * XXX Do we need to use the holding buffer to restart in ath_rx_proc()
1251 * Disabled for now - it'd be nice to be able to do in ath_rx_proc()
1258 * XXX can we hold the PCU lock here? in ath_rx_proc()
1261 bf = TAILQ_FIRST(&sc->sc_rxbuf); in ath_rx_proc()
1262 ath_hal_putrxbuf(ah, bf->bf_daddr, HAL_RX_QUEUE_HP); in ath_rx_proc()
1265 ath_hal_startpcurecv(ah, (!! sc->sc_scanning)); /* re-enable PCU/DMA engine */ in ath_rx_proc()
1268 ath_hal_intrset(ah, sc->sc_imask); in ath_rx_proc()
1269 sc->sc_kickpcu = 0; in ath_rx_proc()
1292 sc->sc_rx.recv_sched(sc, resched); in ath_rx_proc()
1295 sc->sc_rxproc_cnt--; in ath_rx_proc()
1314 if (sc->sc_inreset_cnt > 0) { in ath_legacy_rx_tasklet()
1315 device_printf(sc->sc_dev, in ath_legacy_rx_tasklet()
1336 /* XXX ATH_RX_LOCK_ASSERT(sc); */ in ath_legacy_flush_rxpending()
1338 if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending != NULL) { in ath_legacy_flush_rxpending()
1339 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending); in ath_legacy_flush_rxpending()
1340 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL; in ath_legacy_flush_rxpending()
1342 if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending != NULL) { in ath_legacy_flush_rxpending()
1343 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending); in ath_legacy_flush_rxpending()
1344 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL; in ath_legacy_flush_rxpending()
1353 /* XXX ATH_RX_LOCK_ASSERT(sc); */ in ath_legacy_flush_rxholdbf()
1358 * XXX should just verify that bf->bf_m is NULL, as it must in ath_legacy_flush_rxholdbf()
1361 bf = sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf; in ath_legacy_flush_rxholdbf()
1363 if (bf->bf_m != NULL) in ath_legacy_flush_rxholdbf()
1364 m_freem(bf->bf_m); in ath_legacy_flush_rxholdbf()
1365 bf->bf_m = NULL; in ath_legacy_flush_rxholdbf()
1366 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); in ath_legacy_flush_rxholdbf()
1369 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf = NULL; in ath_legacy_flush_rxholdbf()
1371 bf = sc->sc_rxedma[HAL_RX_QUEUE_LP].m_holdbf; in ath_legacy_flush_rxholdbf()
1373 if (bf->bf_m != NULL) in ath_legacy_flush_rxholdbf()
1374 m_freem(bf->bf_m); in ath_legacy_flush_rxholdbf()
1375 bf->bf_m = NULL; in ath_legacy_flush_rxholdbf()
1376 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); in ath_legacy_flush_rxholdbf()
1379 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_holdbf = NULL; in ath_legacy_flush_rxholdbf()
1391 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ in ath_legacy_stoprecv()
1392 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) in ath_legacy_stoprecv()
1393 struct ath_hal *ah = sc->sc_ah; in ath_legacy_stoprecv()
1409 if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) { in ath_legacy_stoprecv()
1413 device_printf(sc->sc_dev, in ath_legacy_stoprecv()
1417 sc->sc_rxlink); in ath_legacy_stoprecv()
1419 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { in ath_legacy_stoprecv()
1420 struct ath_desc *ds = bf->bf_desc; in ath_legacy_stoprecv()
1421 struct ath_rx_status *rs = &bf->bf_status.ds_rxstat; in ath_legacy_stoprecv()
1423 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); in ath_legacy_stoprecv()
1424 if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL)) in ath_legacy_stoprecv()
1434 sc->sc_rxlink = NULL; /* just in case */ in ath_legacy_stoprecv()
1441 * XXX TODO: something was calling startrecv without calling
1452 struct ath_hal *ah = sc->sc_ah; in ath_legacy_startrecv()
1458 * XXX should verify these are already all NULL! in ath_legacy_startrecv()
1460 sc->sc_rxlink = NULL; in ath_legacy_startrecv()
1465 * Re-chain all of the buffers in the RX buffer list. in ath_legacy_startrecv()
1467 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { in ath_legacy_startrecv()
1477 bf = TAILQ_FIRST(&sc->sc_rxbuf); in ath_legacy_startrecv()
1478 ath_hal_putrxbuf(ah, bf->bf_daddr, HAL_RX_QUEUE_HP); in ath_legacy_startrecv()
1481 ath_hal_startpcurecv(ah, (!! sc->sc_scanning)); /* re-enable PCU/DMA engine */ in ath_legacy_startrecv()
1492 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, in ath_legacy_dma_rxsetup()
1504 if (sc->sc_rxdma.dd_desc_len != 0) in ath_legacy_dma_rxteardown()
1505 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); in ath_legacy_dma_rxteardown()
1513 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); in ath_legacy_recv_sched()
1521 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); in ath_legacy_recv_sched_queue()
1530 * XXX this should be changed to properly support the in ath_recv_setup_legacy()
1533 sc->sc_rx_statuslen = sizeof(struct ath_desc); in ath_recv_setup_legacy()
1535 sc->sc_rx.recv_start = ath_legacy_startrecv; in ath_recv_setup_legacy()
1536 sc->sc_rx.recv_stop = ath_legacy_stoprecv; in ath_recv_setup_legacy()
1537 sc->sc_rx.recv_flush = ath_legacy_flushrecv; in ath_recv_setup_legacy()
1538 sc->sc_rx.recv_tasklet = ath_legacy_rx_tasklet; in ath_recv_setup_legacy()
1539 sc->sc_rx.recv_rxbuf_init = ath_legacy_rxbuf_init; in ath_recv_setup_legacy()
1541 sc->sc_rx.recv_setup = ath_legacy_dma_rxsetup; in ath_recv_setup_legacy()
1542 sc->sc_rx.recv_teardown = ath_legacy_dma_rxteardown; in ath_recv_setup_legacy()
1543 sc->sc_rx.recv_sched = ath_legacy_recv_sched; in ath_recv_setup_legacy()
1544 sc->sc_rx.recv_sched_queue = ath_legacy_recv_sched_queue; in ath_recv_setup_legacy()