15591b213SSam Leffler /*- 210ad9a77SSam Leffler * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 35591b213SSam Leffler * All rights reserved. 45591b213SSam Leffler * 55591b213SSam Leffler * Redistribution and use in source and binary forms, with or without 65591b213SSam Leffler * modification, are permitted provided that the following conditions 75591b213SSam Leffler * are met: 85591b213SSam Leffler * 1. Redistributions of source code must retain the above copyright 95591b213SSam Leffler * notice, this list of conditions and the following disclaimer, 105591b213SSam Leffler * without modification. 115591b213SSam Leffler * 2. Redistributions in binary form must reproduce at minimum a disclaimer 125591b213SSam Leffler * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 135591b213SSam Leffler * redistribution must be conditioned upon including a substantially 145591b213SSam Leffler * similar Disclaimer requirement for further binary redistribution. 155591b213SSam Leffler * 165591b213SSam Leffler * NO WARRANTY 175591b213SSam Leffler * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 185591b213SSam Leffler * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 195591b213SSam Leffler * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 205591b213SSam Leffler * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 215591b213SSam Leffler * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 225591b213SSam Leffler * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 235591b213SSam Leffler * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 245591b213SSam Leffler * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 255591b213SSam Leffler * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 265591b213SSam Leffler * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 275591b213SSam Leffler * THE POSSIBILITY OF SUCH DAMAGES. 285591b213SSam Leffler */ 295591b213SSam Leffler 305591b213SSam Leffler #include <sys/cdefs.h> 315591b213SSam Leffler __FBSDID("$FreeBSD$"); 325591b213SSam Leffler 335591b213SSam Leffler /* 345591b213SSam Leffler * Driver for the Atheros Wireless LAN controller. 355f3721d5SSam Leffler * 365f3721d5SSam Leffler * This software is derived from work of Atsushi Onoe; his contribution 375f3721d5SSam Leffler * is greatly appreciated. 385591b213SSam Leffler */ 395591b213SSam Leffler 405591b213SSam Leffler #include "opt_inet.h" 41a585a9a1SSam Leffler #include "opt_ath.h" 423f3087fdSAdrian Chadd /* 433f3087fdSAdrian Chadd * This is needed for register operations which are performed 443f3087fdSAdrian Chadd * by the driver - eg, calls to ath_hal_gettsf32(). 45*58816f3fSAdrian Chadd * 46*58816f3fSAdrian Chadd * It's also required for any AH_DEBUG checks in here, eg the 47*58816f3fSAdrian Chadd * module dependencies. 483f3087fdSAdrian Chadd */ 493f3087fdSAdrian Chadd #include "opt_ah.h" 50584f7327SSam Leffler #include "opt_wlan.h" 515591b213SSam Leffler 525591b213SSam Leffler #include <sys/param.h> 535591b213SSam Leffler #include <sys/systm.h> 545591b213SSam Leffler #include <sys/sysctl.h> 555591b213SSam Leffler #include <sys/mbuf.h> 565591b213SSam Leffler #include <sys/malloc.h> 575591b213SSam Leffler #include <sys/lock.h> 585591b213SSam Leffler #include <sys/mutex.h> 595591b213SSam Leffler #include <sys/kernel.h> 605591b213SSam Leffler #include <sys/socket.h> 615591b213SSam Leffler #include <sys/sockio.h> 625591b213SSam Leffler #include <sys/errno.h> 635591b213SSam Leffler #include <sys/callout.h> 645591b213SSam Leffler #include <sys/bus.h> 655591b213SSam Leffler #include <sys/endian.h> 660bbf5441SSam Leffler #include <sys/kthread.h> 670bbf5441SSam Leffler #include <sys/taskqueue.h> 683fc21fedSSam Leffler #include <sys/priv.h> 69dba9c859SAdrian Chadd #include <sys/module.h> 70f52d3452SAdrian Chadd #include <sys/ktr.h> 71ddbe3036SAdrian Chadd #include <sys/smp.h> /* for mp_ncpus */ 725591b213SSam Leffler 735591b213SSam Leffler #include <machine/bus.h> 745591b213SSam Leffler 755591b213SSam Leffler #include <net/if.h> 765591b213SSam Leffler #include <net/if_dl.h> 775591b213SSam Leffler #include <net/if_media.h> 78fc74a9f9SBrooks Davis #include <net/if_types.h> 795591b213SSam Leffler #include <net/if_arp.h> 805591b213SSam Leffler #include <net/ethernet.h> 815591b213SSam Leffler #include <net/if_llc.h> 825591b213SSam Leffler 835591b213SSam Leffler #include <net80211/ieee80211_var.h> 8459efa8b5SSam Leffler #include <net80211/ieee80211_regdomain.h> 85339ccfb3SSam Leffler #ifdef IEEE80211_SUPPORT_SUPERG 86339ccfb3SSam Leffler #include <net80211/ieee80211_superg.h> 87339ccfb3SSam Leffler #endif 88584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 8910ad9a77SSam Leffler #include <net80211/ieee80211_tdma.h> 9010ad9a77SSam Leffler #endif 915591b213SSam Leffler 925591b213SSam Leffler #include <net/bpf.h> 935591b213SSam Leffler 945591b213SSam Leffler #ifdef INET 955591b213SSam Leffler #include <netinet/in.h> 965591b213SSam Leffler #include <netinet/if_ether.h> 975591b213SSam Leffler #endif 985591b213SSam Leffler 995591b213SSam Leffler #include <dev/ath/if_athvar.h> 10033644623SSam Leffler #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 1010dbe9289SAdrian Chadd #include <dev/ath/ath_hal/ah_diagcodes.h> 1025591b213SSam Leffler 1035bc8125aSAdrian Chadd #include <dev/ath/if_ath_debug.h> 104b8e788a5SAdrian Chadd #include <dev/ath/if_ath_misc.h> 105b8e788a5SAdrian Chadd #include <dev/ath/if_ath_tx.h> 1066079fdbeSAdrian Chadd #include <dev/ath/if_ath_sysctl.h> 107c65ee21dSAdrian Chadd #include <dev/ath/if_ath_led.h> 108d2d7a00aSAdrian Chadd #include <dev/ath/if_ath_keycache.h> 10948237774SAdrian Chadd #include <dev/ath/if_athdfs.h> 1105bc8125aSAdrian Chadd 11186e07743SSam Leffler #ifdef ATH_TX99_DIAG 11286e07743SSam Leffler #include <dev/ath/ath_tx99/ath_tx99.h> 11386e07743SSam Leffler #endif 11486e07743SSam Leffler 115f52d3452SAdrian Chadd #define ATH_KTR_INTR KTR_SPARE4 116f52d3452SAdrian Chadd #define ATH_KTR_ERR KTR_SPARE3 11748237774SAdrian Chadd 118b032f27cSSam Leffler /* 119b032f27cSSam Leffler * ATH_BCBUF determines the number of vap's that can transmit 120b032f27cSSam Leffler * beacons and also (currently) the number of vap's that can 121b032f27cSSam Leffler * have unique mac addresses/bssid. When staggering beacons 122b032f27cSSam Leffler * 4 is probably a good max as otherwise the beacons become 123b032f27cSSam Leffler * very closely spaced and there is limited time for cab q traffic 124b032f27cSSam Leffler * to go out. You can burst beacons instead but that is not good 125b032f27cSSam Leffler * for stations in power save and at some point you really want 126b032f27cSSam Leffler * another radio (and channel). 127b032f27cSSam Leffler * 128b032f27cSSam Leffler * The limit on the number of mac addresses is tied to our use of 129b032f27cSSam Leffler * the U/L bit and tracking addresses in a byte; it would be 130b032f27cSSam Leffler * worthwhile to allow more for applications like proxy sta. 131b032f27cSSam Leffler */ 132b032f27cSSam Leffler CTASSERT(ATH_BCBUF <= 8); 133b032f27cSSam Leffler 134b032f27cSSam Leffler static struct ieee80211vap *ath_vap_create(struct ieee80211com *, 135fcd9500fSBernhard Schmidt const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 136fcd9500fSBernhard Schmidt const uint8_t [IEEE80211_ADDR_LEN], 137fcd9500fSBernhard Schmidt const uint8_t [IEEE80211_ADDR_LEN]); 138b032f27cSSam Leffler static void ath_vap_delete(struct ieee80211vap *); 1395591b213SSam Leffler static void ath_init(void *); 140c42a7b7eSSam Leffler static void ath_stop_locked(struct ifnet *); 1415591b213SSam Leffler static void ath_stop(struct ifnet *); 1425591b213SSam Leffler static void ath_start(struct ifnet *); 143b032f27cSSam Leffler static int ath_reset_vap(struct ieee80211vap *, u_long); 1445591b213SSam Leffler static int ath_media_change(struct ifnet *); 1452e986da5SSam Leffler static void ath_watchdog(void *); 1465591b213SSam Leffler static int ath_ioctl(struct ifnet *, u_long, caddr_t); 1475591b213SSam Leffler static void ath_fatal_proc(void *, int); 148b032f27cSSam Leffler static void ath_bmiss_vap(struct ieee80211vap *); 1495591b213SSam Leffler static void ath_bmiss_proc(void *, int); 150b032f27cSSam Leffler static void ath_key_update_begin(struct ieee80211vap *); 151b032f27cSSam Leffler static void ath_key_update_end(struct ieee80211vap *); 152b032f27cSSam Leffler static void ath_update_mcast(struct ifnet *); 153b032f27cSSam Leffler static void ath_update_promisc(struct ifnet *); 1545591b213SSam Leffler static void ath_mode_init(struct ath_softc *); 155c42a7b7eSSam Leffler static void ath_setslottime(struct ath_softc *); 156c42a7b7eSSam Leffler static void ath_updateslot(struct ifnet *); 15780d2765fSSam Leffler static int ath_beaconq_setup(struct ath_hal *); 1585591b213SSam Leffler static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); 159b032f27cSSam Leffler static void ath_beacon_update(struct ieee80211vap *, int item); 160c42a7b7eSSam Leffler static void ath_beacon_setup(struct ath_softc *, struct ath_buf *); 1615591b213SSam Leffler static void ath_beacon_proc(void *, int); 162b032f27cSSam Leffler static struct ath_buf *ath_beacon_generate(struct ath_softc *, 163b032f27cSSam Leffler struct ieee80211vap *); 164c42a7b7eSSam Leffler static void ath_bstuck_proc(void *, int); 165d52f7132SAdrian Chadd static void ath_reset_proc(void *, int); 166b032f27cSSam Leffler static void ath_beacon_return(struct ath_softc *, struct ath_buf *); 1675591b213SSam Leffler static void ath_beacon_free(struct ath_softc *); 168b032f27cSSam Leffler static void ath_beacon_config(struct ath_softc *, struct ieee80211vap *); 169c42a7b7eSSam Leffler static void ath_descdma_cleanup(struct ath_softc *sc, 170c42a7b7eSSam Leffler struct ath_descdma *, ath_bufhead *); 1715591b213SSam Leffler static int ath_desc_alloc(struct ath_softc *); 1725591b213SSam Leffler static void ath_desc_free(struct ath_softc *); 17338c208f8SSam Leffler static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, 17438c208f8SSam Leffler const uint8_t [IEEE80211_ADDR_LEN]); 1754afa805eSAdrian Chadd static void ath_node_cleanup(struct ieee80211_node *); 176c42a7b7eSSam Leffler static void ath_node_free(struct ieee80211_node *); 17768e8e04eSSam Leffler static void ath_node_getsignal(const struct ieee80211_node *, 17868e8e04eSSam Leffler int8_t *, int8_t *); 1795591b213SSam Leffler static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); 180b032f27cSSam Leffler static void ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, 1815463c4a4SSam Leffler int subtype, int rssi, int nf); 182c42a7b7eSSam Leffler static void ath_setdefantenna(struct ath_softc *, u_int); 18396ff485dSAdrian Chadd static void ath_rx_proc(struct ath_softc *sc, int); 18496ff485dSAdrian Chadd static void ath_rx_tasklet(void *, int); 185622b3fd2SSam Leffler static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 186c42a7b7eSSam Leffler static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 187c42a7b7eSSam Leffler static int ath_tx_setup(struct ath_softc *, int, int); 188c42a7b7eSSam Leffler static int ath_wme_update(struct ieee80211com *); 189c42a7b7eSSam Leffler static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 190c42a7b7eSSam Leffler static void ath_tx_cleanup(struct ath_softc *); 191c42a7b7eSSam Leffler static void ath_tx_proc_q0(void *, int); 192c42a7b7eSSam Leffler static void ath_tx_proc_q0123(void *, int); 1935591b213SSam Leffler static void ath_tx_proc(void *, int); 1945591b213SSam Leffler static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 195517526efSAdrian Chadd static void ath_draintxq(struct ath_softc *, ATH_RESET_TYPE reset_type); 1969a842e8bSAdrian Chadd static void ath_stoprecv(struct ath_softc *, int); 1975591b213SSam Leffler static int ath_startrecv(struct ath_softc *); 198c42a7b7eSSam Leffler static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 19968e8e04eSSam Leffler static void ath_scan_start(struct ieee80211com *); 20068e8e04eSSam Leffler static void ath_scan_end(struct ieee80211com *); 20168e8e04eSSam Leffler static void ath_set_channel(struct ieee80211com *); 2025591b213SSam Leffler static void ath_calibrate(void *); 203b032f27cSSam Leffler static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); 204e8fd88a3SSam Leffler static void ath_setup_stationkey(struct ieee80211_node *); 205e9962332SSam Leffler static void ath_newassoc(struct ieee80211_node *, int); 206b032f27cSSam Leffler static int ath_setregdomain(struct ieee80211com *, 207b032f27cSSam Leffler struct ieee80211_regdomain *, int, 208b032f27cSSam Leffler struct ieee80211_channel []); 2095fe9f044SSam Leffler static void ath_getradiocaps(struct ieee80211com *, int, int *, 210b032f27cSSam Leffler struct ieee80211_channel []); 211b032f27cSSam Leffler static int ath_getchannels(struct ath_softc *); 2125591b213SSam Leffler 213c42a7b7eSSam Leffler static int ath_rate_setup(struct ath_softc *, u_int mode); 2145591b213SSam Leffler static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 215c42a7b7eSSam Leffler 216c42a7b7eSSam Leffler static void ath_announce(struct ath_softc *); 2175591b213SSam Leffler 21848237774SAdrian Chadd static void ath_dfs_tasklet(void *, int); 21948237774SAdrian Chadd 220584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 22110ad9a77SSam Leffler static void ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, 22210ad9a77SSam Leffler u_int32_t bintval); 22310ad9a77SSam Leffler static void ath_tdma_bintvalsetup(struct ath_softc *sc, 22410ad9a77SSam Leffler const struct ieee80211_tdma_state *tdma); 22510ad9a77SSam Leffler static void ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap); 22610ad9a77SSam Leffler static void ath_tdma_update(struct ieee80211_node *ni, 2272bc3ce77SSam Leffler const struct ieee80211_tdma_param *tdma, int); 22810ad9a77SSam Leffler static void ath_tdma_beacon_send(struct ath_softc *sc, 22910ad9a77SSam Leffler struct ieee80211vap *vap); 23010ad9a77SSam Leffler 23110ad9a77SSam Leffler #define TDMA_EP_MULTIPLIER (1<<10) /* pow2 to optimize out * and / */ 23210ad9a77SSam Leffler #define TDMA_LPF_LEN 6 23310ad9a77SSam Leffler #define TDMA_DUMMY_MARKER 0x127 23410ad9a77SSam Leffler #define TDMA_EP_MUL(x, mul) ((x) * (mul)) 23510ad9a77SSam Leffler #define TDMA_IN(x) (TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER)) 23610ad9a77SSam Leffler #define TDMA_LPF(x, y, len) \ 23710ad9a77SSam Leffler ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y)) 23810ad9a77SSam Leffler #define TDMA_SAMPLE(x, y) do { \ 23910ad9a77SSam Leffler x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN); \ 24010ad9a77SSam Leffler } while (0) 24110ad9a77SSam Leffler #define TDMA_EP_RND(x,mul) \ 24210ad9a77SSam Leffler ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 24310ad9a77SSam Leffler #define TDMA_AVG(x) TDMA_EP_RND(x, TDMA_EP_MULTIPLIER) 244584f7327SSam Leffler #endif /* IEEE80211_SUPPORT_TDMA */ 24510ad9a77SSam Leffler 2465591b213SSam Leffler SYSCTL_DECL(_hw_ath); 2475591b213SSam Leffler 2485591b213SSam Leffler /* XXX validate sysctl values */ 2492dc7fcc4SSam Leffler static int ath_longcalinterval = 30; /* long cals every 30 secs */ 2502dc7fcc4SSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, 2512dc7fcc4SSam Leffler 0, "long chip calibration interval (secs)"); 2522dc7fcc4SSam Leffler static int ath_shortcalinterval = 100; /* short cals every 100 ms */ 2532dc7fcc4SSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, 2542dc7fcc4SSam Leffler 0, "short chip calibration interval (msecs)"); 2552dc7fcc4SSam Leffler static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ 2562dc7fcc4SSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, 2572dc7fcc4SSam Leffler 0, "reset chip calibration results (secs)"); 258a108ab63SAdrian Chadd static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ 259a108ab63SAdrian Chadd SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, 260a108ab63SAdrian Chadd 0, "ANI calibration (msecs)"); 2615591b213SSam Leffler 262e2d787faSSam Leffler static int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 263aaa70f2fSSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf, 264e2d787faSSam Leffler 0, "rx buffers allocated"); 265e2d787faSSam Leffler TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf); 266e2d787faSSam Leffler static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 267aaa70f2fSSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf, 268e2d787faSSam Leffler 0, "tx buffers allocated"); 269e2d787faSSam Leffler TUNABLE_INT("hw.ath.txbuf", &ath_txbuf); 270e2d787faSSam Leffler 271a32ac9d3SSam Leffler static int ath_bstuck_threshold = 4; /* max missed beacons */ 272a32ac9d3SSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, 273a32ac9d3SSam Leffler 0, "max missed beacon xmits before chip reset"); 274a32ac9d3SSam Leffler 2756b349e5aSAdrian Chadd MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 276c42a7b7eSSam Leffler 27767397d39SAdrian Chadd #define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) 27867397d39SAdrian Chadd #define HAL_MODE_HT40 \ 27967397d39SAdrian Chadd (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ 28067397d39SAdrian Chadd HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) 2815591b213SSam Leffler int 2825591b213SSam Leffler ath_attach(u_int16_t devid, struct ath_softc *sc) 2835591b213SSam Leffler { 284fc74a9f9SBrooks Davis struct ifnet *ifp; 285b032f27cSSam Leffler struct ieee80211com *ic; 286fc74a9f9SBrooks Davis struct ath_hal *ah = NULL; 2875591b213SSam Leffler HAL_STATUS status; 288c42a7b7eSSam Leffler int error = 0, i; 289411373ebSSam Leffler u_int wmodes; 29029aca940SSam Leffler uint8_t macaddr[IEEE80211_ADDR_LEN]; 291a865860dSAdrian Chadd int rx_chainmask, tx_chainmask; 2925591b213SSam Leffler 293c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 2945591b213SSam Leffler 295b032f27cSSam Leffler ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 296fc74a9f9SBrooks Davis if (ifp == NULL) { 297fc74a9f9SBrooks Davis device_printf(sc->sc_dev, "can not if_alloc()\n"); 298fc74a9f9SBrooks Davis error = ENOSPC; 299fc74a9f9SBrooks Davis goto bad; 300fc74a9f9SBrooks Davis } 301b032f27cSSam Leffler ic = ifp->if_l2com; 302fc74a9f9SBrooks Davis 3035591b213SSam Leffler /* set these up early for if_printf use */ 3049bf40edeSBrooks Davis if_initname(ifp, device_get_name(sc->sc_dev), 3059bf40edeSBrooks Davis device_get_unit(sc->sc_dev)); 3065591b213SSam Leffler 3077e97436bSAdrian Chadd ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 3087e97436bSAdrian Chadd sc->sc_eepromdata, &status); 3095591b213SSam Leffler if (ah == NULL) { 3105591b213SSam Leffler if_printf(ifp, "unable to attach hardware; HAL status %u\n", 3115591b213SSam Leffler status); 3125591b213SSam Leffler error = ENXIO; 3135591b213SSam Leffler goto bad; 3145591b213SSam Leffler } 3155591b213SSam Leffler sc->sc_ah = ah; 316b58b3803SSam Leffler sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 3173297be13SSam Leffler #ifdef ATH_DEBUG 3183297be13SSam Leffler sc->sc_debug = ath_debug; 3193297be13SSam Leffler #endif 3205591b213SSam Leffler 3215591b213SSam Leffler /* 322c42a7b7eSSam Leffler * Check if the MAC has multi-rate retry support. 323c42a7b7eSSam Leffler * We do this by trying to setup a fake extended 324c42a7b7eSSam Leffler * descriptor. MAC's that don't have support will 325c42a7b7eSSam Leffler * return false w/o doing anything. MAC's that do 326c42a7b7eSSam Leffler * support it will return true w/o doing anything. 327c42a7b7eSSam Leffler */ 328c42a7b7eSSam Leffler sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 329c42a7b7eSSam Leffler 330c42a7b7eSSam Leffler /* 331c42a7b7eSSam Leffler * Check if the device has hardware counters for PHY 332c42a7b7eSSam Leffler * errors. If so we need to enable the MIB interrupt 333c42a7b7eSSam Leffler * so we can act on stat triggers. 334c42a7b7eSSam Leffler */ 335c42a7b7eSSam Leffler if (ath_hal_hwphycounters(ah)) 336c42a7b7eSSam Leffler sc->sc_needmib = 1; 337c42a7b7eSSam Leffler 338c42a7b7eSSam Leffler /* 339c42a7b7eSSam Leffler * Get the hardware key cache size. 340c42a7b7eSSam Leffler */ 341c42a7b7eSSam Leffler sc->sc_keymax = ath_hal_keycachesize(ah); 342e8fd88a3SSam Leffler if (sc->sc_keymax > ATH_KEYMAX) { 343e8fd88a3SSam Leffler if_printf(ifp, "Warning, using only %u of %u key cache slots\n", 344e8fd88a3SSam Leffler ATH_KEYMAX, sc->sc_keymax); 345e8fd88a3SSam Leffler sc->sc_keymax = ATH_KEYMAX; 346c42a7b7eSSam Leffler } 347c42a7b7eSSam Leffler /* 348c42a7b7eSSam Leffler * Reset the key cache since some parts do not 349c42a7b7eSSam Leffler * reset the contents on initial power up. 350c42a7b7eSSam Leffler */ 351c42a7b7eSSam Leffler for (i = 0; i < sc->sc_keymax; i++) 352c42a7b7eSSam Leffler ath_hal_keyreset(ah, i); 353c42a7b7eSSam Leffler 354c42a7b7eSSam Leffler /* 355b032f27cSSam Leffler * Collect the default channel list. 3565591b213SSam Leffler */ 357b032f27cSSam Leffler error = ath_getchannels(sc); 3585591b213SSam Leffler if (error != 0) 3595591b213SSam Leffler goto bad; 3605591b213SSam Leffler 3615591b213SSam Leffler /* 3625591b213SSam Leffler * Setup rate tables for all potential media types. 3635591b213SSam Leffler */ 3645591b213SSam Leffler ath_rate_setup(sc, IEEE80211_MODE_11A); 3655591b213SSam Leffler ath_rate_setup(sc, IEEE80211_MODE_11B); 3665591b213SSam Leffler ath_rate_setup(sc, IEEE80211_MODE_11G); 367c42a7b7eSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 368c42a7b7eSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 36968e8e04eSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); 37068e8e04eSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_11NA); 37168e8e04eSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_11NG); 372724c193aSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_HALF); 373724c193aSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 374aaa70f2fSSam Leffler 375c42a7b7eSSam Leffler /* NB: setup here so ath_rate_update is happy */ 376c42a7b7eSSam Leffler ath_setcurmode(sc, IEEE80211_MODE_11A); 3775591b213SSam Leffler 378c42a7b7eSSam Leffler /* 379c42a7b7eSSam Leffler * Allocate tx+rx descriptors and populate the lists. 380c42a7b7eSSam Leffler */ 3815591b213SSam Leffler error = ath_desc_alloc(sc); 3825591b213SSam Leffler if (error != 0) { 3835591b213SSam Leffler if_printf(ifp, "failed to allocate descriptors: %d\n", error); 3845591b213SSam Leffler goto bad; 3855591b213SSam Leffler } 3862e986da5SSam Leffler callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0); 3872e986da5SSam Leffler callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0); 3885591b213SSam Leffler 389f0b2a0beSSam Leffler ATH_TXBUF_LOCK_INIT(sc); 3905591b213SSam Leffler 3910bbf5441SSam Leffler sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, 3920bbf5441SSam Leffler taskqueue_thread_enqueue, &sc->sc_tq); 3930bbf5441SSam Leffler taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, 3940bbf5441SSam Leffler "%s taskq", ifp->if_xname); 3950bbf5441SSam Leffler 39696ff485dSAdrian Chadd TASK_INIT(&sc->sc_rxtask, 0, ath_rx_tasklet, sc); 3975591b213SSam Leffler TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 398c42a7b7eSSam Leffler TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 399d52f7132SAdrian Chadd TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); 4005591b213SSam Leffler 4015591b213SSam Leffler /* 402c42a7b7eSSam Leffler * Allocate hardware transmit queues: one queue for 403c42a7b7eSSam Leffler * beacon frames and one data queue for each QoS 4044fa8d4efSDaniel Eischen * priority. Note that the hal handles resetting 405c42a7b7eSSam Leffler * these queues at the needed time. 406c42a7b7eSSam Leffler * 407c42a7b7eSSam Leffler * XXX PS-Poll 4085591b213SSam Leffler */ 40980d2765fSSam Leffler sc->sc_bhalq = ath_beaconq_setup(ah); 4105591b213SSam Leffler if (sc->sc_bhalq == (u_int) -1) { 4115591b213SSam Leffler if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 412c42a7b7eSSam Leffler error = EIO; 413b28b4653SSam Leffler goto bad2; 4145591b213SSam Leffler } 415c42a7b7eSSam Leffler sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 416c42a7b7eSSam Leffler if (sc->sc_cabq == NULL) { 417c42a7b7eSSam Leffler if_printf(ifp, "unable to setup CAB xmit queue!\n"); 418c42a7b7eSSam Leffler error = EIO; 419c42a7b7eSSam Leffler goto bad2; 420c42a7b7eSSam Leffler } 421c42a7b7eSSam Leffler /* NB: insure BK queue is the lowest priority h/w queue */ 422c42a7b7eSSam Leffler if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 423c42a7b7eSSam Leffler if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 424c42a7b7eSSam Leffler ieee80211_wme_acnames[WME_AC_BK]); 425c42a7b7eSSam Leffler error = EIO; 426c42a7b7eSSam Leffler goto bad2; 427c42a7b7eSSam Leffler } 428c42a7b7eSSam Leffler if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 429c42a7b7eSSam Leffler !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 430c42a7b7eSSam Leffler !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 431c42a7b7eSSam Leffler /* 432c42a7b7eSSam Leffler * Not enough hardware tx queues to properly do WME; 433c42a7b7eSSam Leffler * just punt and assign them all to the same h/w queue. 434c42a7b7eSSam Leffler * We could do a better job of this if, for example, 435c42a7b7eSSam Leffler * we allocate queues when we switch from station to 436c42a7b7eSSam Leffler * AP mode. 437c42a7b7eSSam Leffler */ 438c42a7b7eSSam Leffler if (sc->sc_ac2q[WME_AC_VI] != NULL) 439c42a7b7eSSam Leffler ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 440c42a7b7eSSam Leffler if (sc->sc_ac2q[WME_AC_BE] != NULL) 441c42a7b7eSSam Leffler ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 442c42a7b7eSSam Leffler sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 443c42a7b7eSSam Leffler sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 444c42a7b7eSSam Leffler sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 445c42a7b7eSSam Leffler } 446c42a7b7eSSam Leffler 447c42a7b7eSSam Leffler /* 448c42a7b7eSSam Leffler * Special case certain configurations. Note the 449c42a7b7eSSam Leffler * CAB queue is handled by these specially so don't 450c42a7b7eSSam Leffler * include them when checking the txq setup mask. 451c42a7b7eSSam Leffler */ 452c42a7b7eSSam Leffler switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 453c42a7b7eSSam Leffler case 0x01: 454c42a7b7eSSam Leffler TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 455c42a7b7eSSam Leffler break; 456c42a7b7eSSam Leffler case 0x0f: 457c42a7b7eSSam Leffler TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 458c42a7b7eSSam Leffler break; 459c42a7b7eSSam Leffler default: 460c42a7b7eSSam Leffler TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 461c42a7b7eSSam Leffler break; 462c42a7b7eSSam Leffler } 463c42a7b7eSSam Leffler 464c42a7b7eSSam Leffler /* 465c42a7b7eSSam Leffler * Setup rate control. Some rate control modules 466c42a7b7eSSam Leffler * call back to change the anntena state so expose 467c42a7b7eSSam Leffler * the necessary entry points. 468c42a7b7eSSam Leffler * XXX maybe belongs in struct ath_ratectrl? 469c42a7b7eSSam Leffler */ 470c42a7b7eSSam Leffler sc->sc_setdefantenna = ath_setdefantenna; 471c42a7b7eSSam Leffler sc->sc_rc = ath_rate_attach(sc); 472c42a7b7eSSam Leffler if (sc->sc_rc == NULL) { 473c42a7b7eSSam Leffler error = EIO; 474c42a7b7eSSam Leffler goto bad2; 475c42a7b7eSSam Leffler } 476c42a7b7eSSam Leffler 47748237774SAdrian Chadd /* Attach DFS module */ 47848237774SAdrian Chadd if (! ath_dfs_attach(sc)) { 4797e97436bSAdrian Chadd device_printf(sc->sc_dev, 4807e97436bSAdrian Chadd "%s: unable to attach DFS\n", __func__); 48148237774SAdrian Chadd error = EIO; 48248237774SAdrian Chadd goto bad2; 48348237774SAdrian Chadd } 48448237774SAdrian Chadd 48548237774SAdrian Chadd /* Start DFS processing tasklet */ 48648237774SAdrian Chadd TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); 48748237774SAdrian Chadd 4883440495aSAdrian Chadd /* Configure LED state */ 4893e50ec2cSSam Leffler sc->sc_blinking = 0; 490c42a7b7eSSam Leffler sc->sc_ledstate = 1; 4913e50ec2cSSam Leffler sc->sc_ledon = 0; /* low true */ 4923e50ec2cSSam Leffler sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 4933e50ec2cSSam Leffler callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE); 4943440495aSAdrian Chadd 4953440495aSAdrian Chadd /* 4963440495aSAdrian Chadd * Don't setup hardware-based blinking. 4973440495aSAdrian Chadd * 4983440495aSAdrian Chadd * Although some NICs may have this configured in the 4993440495aSAdrian Chadd * default reset register values, the user may wish 5003440495aSAdrian Chadd * to alter which pins have which function. 5013440495aSAdrian Chadd * 5023440495aSAdrian Chadd * The reference driver attaches the MAC network LED to GPIO1 and 5033440495aSAdrian Chadd * the MAC power LED to GPIO2. However, the DWA-552 cardbus 5043440495aSAdrian Chadd * NIC has these reversed. 5053440495aSAdrian Chadd */ 5063440495aSAdrian Chadd sc->sc_hardled = (1 == 0); 5073440495aSAdrian Chadd sc->sc_led_net_pin = -1; 5083440495aSAdrian Chadd sc->sc_led_pwr_pin = -1; 509c42a7b7eSSam Leffler /* 510c42a7b7eSSam Leffler * Auto-enable soft led processing for IBM cards and for 511c42a7b7eSSam Leffler * 5211 minipci cards. Users can also manually enable/disable 512c42a7b7eSSam Leffler * support with a sysctl. 513c42a7b7eSSam Leffler */ 514c42a7b7eSSam Leffler sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 5156558ffd9SAdrian Chadd ath_led_config(sc); 516a497cd88SAdrian Chadd ath_hal_setledstate(ah, HAL_LED_INIT); 5175591b213SSam Leffler 5185591b213SSam Leffler ifp->if_softc = sc; 5195591b213SSam Leffler ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 5205591b213SSam Leffler ifp->if_start = ath_start; 5215591b213SSam Leffler ifp->if_ioctl = ath_ioctl; 5225591b213SSam Leffler ifp->if_init = ath_init; 523e50d35e6SMaxim Sobolev IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 524e50d35e6SMaxim Sobolev ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 525154b8df2SMax Laier IFQ_SET_READY(&ifp->if_snd); 5265591b213SSam Leffler 527c42a7b7eSSam Leffler ic->ic_ifp = ifp; 5285591b213SSam Leffler /* XXX not right but it's not used anywhere important */ 5295591b213SSam Leffler ic->ic_phytype = IEEE80211_T_OFDM; 5305591b213SSam Leffler ic->ic_opmode = IEEE80211_M_STA; 531c42a7b7eSSam Leffler ic->ic_caps = 532c43feedeSSam Leffler IEEE80211_C_STA /* station mode */ 533c43feedeSSam Leffler | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 534fe32c3efSSam Leffler | IEEE80211_C_HOSTAP /* hostap mode */ 535fe32c3efSSam Leffler | IEEE80211_C_MONITOR /* monitor mode */ 5367a04dc27SSam Leffler | IEEE80211_C_AHDEMO /* adhoc demo mode */ 537b032f27cSSam Leffler | IEEE80211_C_WDS /* 4-address traffic works */ 53859aa14a9SRui Paulo | IEEE80211_C_MBSS /* mesh point link mode */ 539fe32c3efSSam Leffler | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 540c42a7b7eSSam Leffler | IEEE80211_C_SHSLOT /* short slot time supported */ 541c42a7b7eSSam Leffler | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 54268e8e04eSSam Leffler | IEEE80211_C_BGSCAN /* capable of bg scanning */ 54368e8e04eSSam Leffler | IEEE80211_C_TXFRAG /* handle tx frags */ 54410dc8de4SAdrian Chadd #ifdef ATH_ENABLE_DFS 5457e97436bSAdrian Chadd | IEEE80211_C_DFS /* Enable radar detection */ 54610dc8de4SAdrian Chadd #endif 54701e7e035SSam Leffler ; 548c42a7b7eSSam Leffler /* 549c42a7b7eSSam Leffler * Query the hal to figure out h/w crypto support. 550c42a7b7eSSam Leffler */ 551c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 552b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; 553c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 554b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; 555c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 556b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 557c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 558b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; 559c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 560b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; 561c42a7b7eSSam Leffler /* 562c42a7b7eSSam Leffler * Check if h/w does the MIC and/or whether the 563c42a7b7eSSam Leffler * separate key cache entries are required to 564c42a7b7eSSam Leffler * handle both tx+rx MIC keys. 565c42a7b7eSSam Leffler */ 566c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 567b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 5685901d2d3SSam Leffler /* 5695901d2d3SSam Leffler * If the h/w supports storing tx+rx MIC keys 5705901d2d3SSam Leffler * in one cache slot automatically enable use. 5715901d2d3SSam Leffler */ 5725901d2d3SSam Leffler if (ath_hal_hastkipsplit(ah) || 5735901d2d3SSam Leffler !ath_hal_settkipsplit(ah, AH_FALSE)) 574c42a7b7eSSam Leffler sc->sc_splitmic = 1; 575b032f27cSSam Leffler /* 576b032f27cSSam Leffler * If the h/w can do TKIP MIC together with WME then 577b032f27cSSam Leffler * we use it; otherwise we force the MIC to be done 578b032f27cSSam Leffler * in software by the net80211 layer. 579b032f27cSSam Leffler */ 580b032f27cSSam Leffler if (ath_hal_haswmetkipmic(ah)) 581b032f27cSSam Leffler sc->sc_wmetkipmic = 1; 582c42a7b7eSSam Leffler } 583e8fd88a3SSam Leffler sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 5849ac01d39SRui Paulo /* 5851ac5dac2SRui Paulo * Check for multicast key search support. 5869ac01d39SRui Paulo */ 5879ac01d39SRui Paulo if (ath_hal_hasmcastkeysearch(sc->sc_ah) && 5889ac01d39SRui Paulo !ath_hal_getmcastkeysearch(sc->sc_ah)) { 5899ac01d39SRui Paulo ath_hal_setmcastkeysearch(sc->sc_ah, 1); 5909ac01d39SRui Paulo } 591e8fd88a3SSam Leffler sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 592c42a7b7eSSam Leffler /* 5935901d2d3SSam Leffler * Mark key cache slots associated with global keys 5945901d2d3SSam Leffler * as in use. If we knew TKIP was not to be used we 5955901d2d3SSam Leffler * could leave the +32, +64, and +32+64 slots free. 5965901d2d3SSam Leffler */ 5975901d2d3SSam Leffler for (i = 0; i < IEEE80211_WEP_NKID; i++) { 5985901d2d3SSam Leffler setbit(sc->sc_keymap, i); 5995901d2d3SSam Leffler setbit(sc->sc_keymap, i+64); 6005901d2d3SSam Leffler if (sc->sc_splitmic) { 6015901d2d3SSam Leffler setbit(sc->sc_keymap, i+32); 6025901d2d3SSam Leffler setbit(sc->sc_keymap, i+32+64); 6035901d2d3SSam Leffler } 6045901d2d3SSam Leffler } 6055901d2d3SSam Leffler /* 606c42a7b7eSSam Leffler * TPC support can be done either with a global cap or 607c42a7b7eSSam Leffler * per-packet support. The latter is not available on 608c42a7b7eSSam Leffler * all parts. We're a bit pedantic here as all parts 609c42a7b7eSSam Leffler * support a global cap. 610c42a7b7eSSam Leffler */ 611c59005e9SSam Leffler if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 612c42a7b7eSSam Leffler ic->ic_caps |= IEEE80211_C_TXPMGT; 613c42a7b7eSSam Leffler 614c42a7b7eSSam Leffler /* 615c42a7b7eSSam Leffler * Mark WME capability only if we have sufficient 616c42a7b7eSSam Leffler * hardware queues to do proper priority scheduling. 617c42a7b7eSSam Leffler */ 618c42a7b7eSSam Leffler if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 619c42a7b7eSSam Leffler ic->ic_caps |= IEEE80211_C_WME; 620c42a7b7eSSam Leffler /* 621e8fd88a3SSam Leffler * Check for misc other capabilities. 622c42a7b7eSSam Leffler */ 623c42a7b7eSSam Leffler if (ath_hal_hasbursting(ah)) 624c42a7b7eSSam Leffler ic->ic_caps |= IEEE80211_C_BURST; 625b032f27cSSam Leffler sc->sc_hasbmask = ath_hal_hasbssidmask(ah); 62659aa14a9SRui Paulo sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); 627b032f27cSSam Leffler sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); 6288a2a6beeSAdrian Chadd sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); 629fc4de9b7SAdrian Chadd sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah); 63068e8e04eSSam Leffler if (ath_hal_hasfastframes(ah)) 63168e8e04eSSam Leffler ic->ic_caps |= IEEE80211_C_FF; 63259efa8b5SSam Leffler wmodes = ath_hal_getwirelessmodes(ah); 633411373ebSSam Leffler if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) 63468e8e04eSSam Leffler ic->ic_caps |= IEEE80211_C_TURBOP; 635584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 63610ad9a77SSam Leffler if (ath_hal_macversion(ah) > 0x78) { 63710ad9a77SSam Leffler ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ 63810ad9a77SSam Leffler ic->ic_tdma_update = ath_tdma_update; 63910ad9a77SSam Leffler } 64010ad9a77SSam Leffler #endif 64167397d39SAdrian Chadd 64267397d39SAdrian Chadd /* 6439c85ff91SAdrian Chadd * TODO: enforce that at least this many frames are available 6449c85ff91SAdrian Chadd * in the txbuf list before allowing data frames (raw or 6459c85ff91SAdrian Chadd * otherwise) to be transmitted. 6469c85ff91SAdrian Chadd */ 6479c85ff91SAdrian Chadd sc->sc_txq_data_minfree = 10; 6489c85ff91SAdrian Chadd /* 6499c85ff91SAdrian Chadd * Leave this as default to maintain legacy behaviour. 6509c85ff91SAdrian Chadd * Shortening the cabq/mcastq may end up causing some 6519c85ff91SAdrian Chadd * undesirable behaviour. 6529c85ff91SAdrian Chadd */ 6539c85ff91SAdrian Chadd sc->sc_txq_mcastq_maxdepth = ath_txbuf; 6549c85ff91SAdrian Chadd 6559c85ff91SAdrian Chadd /* 656a865860dSAdrian Chadd * Allow the TX and RX chainmasks to be overridden by 657a865860dSAdrian Chadd * environment variables and/or device.hints. 658a865860dSAdrian Chadd * 659a865860dSAdrian Chadd * This must be done early - before the hardware is 660a865860dSAdrian Chadd * calibrated or before the 802.11n stream calculation 661a865860dSAdrian Chadd * is done. 662a865860dSAdrian Chadd */ 663a865860dSAdrian Chadd if (resource_int_value(device_get_name(sc->sc_dev), 664a865860dSAdrian Chadd device_get_unit(sc->sc_dev), "rx_chainmask", 665a865860dSAdrian Chadd &rx_chainmask) == 0) { 666a865860dSAdrian Chadd device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n", 667a865860dSAdrian Chadd rx_chainmask); 668a865860dSAdrian Chadd (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask); 669a865860dSAdrian Chadd } 670a865860dSAdrian Chadd if (resource_int_value(device_get_name(sc->sc_dev), 671a865860dSAdrian Chadd device_get_unit(sc->sc_dev), "tx_chainmask", 672a865860dSAdrian Chadd &tx_chainmask) == 0) { 673a865860dSAdrian Chadd device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n", 674a865860dSAdrian Chadd tx_chainmask); 675dc8552d5SAdrian Chadd (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask); 676a865860dSAdrian Chadd } 677a865860dSAdrian Chadd 678a865860dSAdrian Chadd /* 67967397d39SAdrian Chadd * The if_ath 11n support is completely not ready for normal use. 68067397d39SAdrian Chadd * Enabling this option will likely break everything and everything. 68167397d39SAdrian Chadd * Don't think of doing that unless you know what you're doing. 68267397d39SAdrian Chadd */ 68367397d39SAdrian Chadd 6848fd67f92SAdrian Chadd #ifdef ATH_ENABLE_11N 68567397d39SAdrian Chadd /* 68667397d39SAdrian Chadd * Query HT capabilities 68767397d39SAdrian Chadd */ 68867397d39SAdrian Chadd if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && 68967397d39SAdrian Chadd (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { 69067397d39SAdrian Chadd int rxs, txs; 69167397d39SAdrian Chadd 69267397d39SAdrian Chadd device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); 69367397d39SAdrian Chadd ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ 69467397d39SAdrian Chadd | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ 69567397d39SAdrian Chadd | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ 6967e97436bSAdrian Chadd | IEEE80211_HTCAP_MAXAMSDU_3839 6977e97436bSAdrian Chadd /* max A-MSDU length */ 69867397d39SAdrian Chadd | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ 69967397d39SAdrian Chadd ; 70067397d39SAdrian Chadd 70176355edbSAdrian Chadd /* 70276355edbSAdrian Chadd * Enable short-GI for HT20 only if the hardware 70376355edbSAdrian Chadd * advertises support. 70476355edbSAdrian Chadd * Notably, anything earlier than the AR9287 doesn't. 70576355edbSAdrian Chadd */ 70676355edbSAdrian Chadd if ((ath_hal_getcapability(ah, 70776355edbSAdrian Chadd HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && 70876355edbSAdrian Chadd (wmodes & HAL_MODE_HT20)) { 70976355edbSAdrian Chadd device_printf(sc->sc_dev, 71076355edbSAdrian Chadd "[HT] enabling short-GI in 20MHz mode\n"); 71176355edbSAdrian Chadd ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; 71276355edbSAdrian Chadd } 71376355edbSAdrian Chadd 71467397d39SAdrian Chadd if (wmodes & HAL_MODE_HT40) 71567397d39SAdrian Chadd ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 71667397d39SAdrian Chadd | IEEE80211_HTCAP_SHORTGI40; 71767397d39SAdrian Chadd 71867397d39SAdrian Chadd /* 7197e97436bSAdrian Chadd * TX/RX streams need to be taken into account when 7207e97436bSAdrian Chadd * negotiating which MCS rates it'll receive and 72167397d39SAdrian Chadd * what MCS rates are available for TX. 72267397d39SAdrian Chadd */ 72354517070SAdrian Chadd (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs); 72454517070SAdrian Chadd (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs); 72567397d39SAdrian Chadd 72667397d39SAdrian Chadd ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); 72767397d39SAdrian Chadd ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); 72867397d39SAdrian Chadd 72967397d39SAdrian Chadd ic->ic_txstream = txs; 73067397d39SAdrian Chadd ic->ic_rxstream = rxs; 73167397d39SAdrian Chadd 7327e97436bSAdrian Chadd device_printf(sc->sc_dev, 7337e97436bSAdrian Chadd "[HT] %d RX streams; %d TX streams\n", rxs, txs); 73467397d39SAdrian Chadd } 73567397d39SAdrian Chadd #endif 73667397d39SAdrian Chadd 737c42a7b7eSSam Leffler /* 738ddbe3036SAdrian Chadd * Check if the hardware requires PCI register serialisation. 739ddbe3036SAdrian Chadd * Some of the Owl based MACs require this. 740ddbe3036SAdrian Chadd */ 741ddbe3036SAdrian Chadd if (mp_ncpus > 1 && 742ddbe3036SAdrian Chadd ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR, 743ddbe3036SAdrian Chadd 0, NULL) == HAL_OK) { 744ddbe3036SAdrian Chadd sc->sc_ah->ah_config.ah_serialise_reg_war = 1; 7457e97436bSAdrian Chadd device_printf(sc->sc_dev, 7467e97436bSAdrian Chadd "Enabling register serialisation\n"); 747ddbe3036SAdrian Chadd } 748ddbe3036SAdrian Chadd 749ddbe3036SAdrian Chadd /* 750c42a7b7eSSam Leffler * Indicate we need the 802.11 header padded to a 751c42a7b7eSSam Leffler * 32-bit boundary for 4-address and QoS frames. 752c42a7b7eSSam Leffler */ 753c42a7b7eSSam Leffler ic->ic_flags |= IEEE80211_F_DATAPAD; 754c42a7b7eSSam Leffler 755c42a7b7eSSam Leffler /* 756c42a7b7eSSam Leffler * Query the hal about antenna support. 757c42a7b7eSSam Leffler */ 758c42a7b7eSSam Leffler sc->sc_defant = ath_hal_getdefantenna(ah); 759c42a7b7eSSam Leffler 760c42a7b7eSSam Leffler /* 761c42a7b7eSSam Leffler * Not all chips have the VEOL support we want to 762c42a7b7eSSam Leffler * use with IBSS beacons; check here for it. 763c42a7b7eSSam Leffler */ 764c42a7b7eSSam Leffler sc->sc_hasveol = ath_hal_hasveol(ah); 7655591b213SSam Leffler 7665591b213SSam Leffler /* get mac address from hardware */ 76729aca940SSam Leffler ath_hal_getmac(ah, macaddr); 768b032f27cSSam Leffler if (sc->sc_hasbmask) 769b032f27cSSam Leffler ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); 7705591b213SSam Leffler 771b032f27cSSam Leffler /* NB: used to size node table key mapping array */ 772b032f27cSSam Leffler ic->ic_max_keyix = sc->sc_keymax; 7735591b213SSam Leffler /* call MI attach routine. */ 77429aca940SSam Leffler ieee80211_ifattach(ic, macaddr); 775b032f27cSSam Leffler ic->ic_setregdomain = ath_setregdomain; 776b032f27cSSam Leffler ic->ic_getradiocaps = ath_getradiocaps; 777b032f27cSSam Leffler sc->sc_opmode = HAL_M_STA; 778b032f27cSSam Leffler 7795591b213SSam Leffler /* override default methods */ 780b032f27cSSam Leffler ic->ic_newassoc = ath_newassoc; 781b032f27cSSam Leffler ic->ic_updateslot = ath_updateslot; 782b032f27cSSam Leffler ic->ic_wme.wme_update = ath_wme_update; 783b032f27cSSam Leffler ic->ic_vap_create = ath_vap_create; 784b032f27cSSam Leffler ic->ic_vap_delete = ath_vap_delete; 785b032f27cSSam Leffler ic->ic_raw_xmit = ath_raw_xmit; 786b032f27cSSam Leffler ic->ic_update_mcast = ath_update_mcast; 787b032f27cSSam Leffler ic->ic_update_promisc = ath_update_promisc; 7885591b213SSam Leffler ic->ic_node_alloc = ath_node_alloc; 7891e774079SSam Leffler sc->sc_node_free = ic->ic_node_free; 7905591b213SSam Leffler ic->ic_node_free = ath_node_free; 7914afa805eSAdrian Chadd sc->sc_node_cleanup = ic->ic_node_cleanup; 7924afa805eSAdrian Chadd ic->ic_node_cleanup = ath_node_cleanup; 79368e8e04eSSam Leffler ic->ic_node_getsignal = ath_node_getsignal; 79468e8e04eSSam Leffler ic->ic_scan_start = ath_scan_start; 79568e8e04eSSam Leffler ic->ic_scan_end = ath_scan_end; 79668e8e04eSSam Leffler ic->ic_set_channel = ath_set_channel; 7975591b213SSam Leffler 798eb6f0de0SAdrian Chadd /* 802.11n specific - but just override anyway */ 799eb6f0de0SAdrian Chadd sc->sc_addba_request = ic->ic_addba_request; 800eb6f0de0SAdrian Chadd sc->sc_addba_response = ic->ic_addba_response; 801eb6f0de0SAdrian Chadd sc->sc_addba_stop = ic->ic_addba_stop; 802eb6f0de0SAdrian Chadd sc->sc_bar_response = ic->ic_bar_response; 803eb6f0de0SAdrian Chadd sc->sc_addba_response_timeout = ic->ic_addba_response_timeout; 804eb6f0de0SAdrian Chadd 805eb6f0de0SAdrian Chadd ic->ic_addba_request = ath_addba_request; 806eb6f0de0SAdrian Chadd ic->ic_addba_response = ath_addba_response; 807eb6f0de0SAdrian Chadd ic->ic_addba_response_timeout = ath_addba_response_timeout; 808eb6f0de0SAdrian Chadd ic->ic_addba_stop = ath_addba_stop; 809eb6f0de0SAdrian Chadd ic->ic_bar_response = ath_bar_response; 810eb6f0de0SAdrian Chadd 8115463c4a4SSam Leffler ieee80211_radiotap_attach(ic, 8125463c4a4SSam Leffler &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 8135463c4a4SSam Leffler ATH_TX_RADIOTAP_PRESENT, 8145463c4a4SSam Leffler &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 8155463c4a4SSam Leffler ATH_RX_RADIOTAP_PRESENT); 8165463c4a4SSam Leffler 8174866e6c2SSam Leffler /* 8184866e6c2SSam Leffler * Setup dynamic sysctl's now that country code and 8194866e6c2SSam Leffler * regdomain are available from the hal. 8204866e6c2SSam Leffler */ 8214866e6c2SSam Leffler ath_sysctlattach(sc); 822e8dabfbeSAdrian Chadd ath_sysctl_stats_attach(sc); 82337931a35SAdrian Chadd ath_sysctl_hal_attach(sc); 82473454c73SSam Leffler 825c42a7b7eSSam Leffler if (bootverbose) 826c42a7b7eSSam Leffler ieee80211_announce(ic); 827c42a7b7eSSam Leffler ath_announce(sc); 8285591b213SSam Leffler return 0; 829b28b4653SSam Leffler bad2: 830c42a7b7eSSam Leffler ath_tx_cleanup(sc); 831b28b4653SSam Leffler ath_desc_free(sc); 8325591b213SSam Leffler bad: 8335591b213SSam Leffler if (ah) 8345591b213SSam Leffler ath_hal_detach(ah); 835fc74a9f9SBrooks Davis if (ifp != NULL) 836fc74a9f9SBrooks Davis if_free(ifp); 8375591b213SSam Leffler sc->sc_invalid = 1; 8385591b213SSam Leffler return error; 8395591b213SSam Leffler } 8405591b213SSam Leffler 8415591b213SSam Leffler int 8425591b213SSam Leffler ath_detach(struct ath_softc *sc) 8435591b213SSam Leffler { 844fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 8455591b213SSam Leffler 846c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 847c42a7b7eSSam Leffler __func__, ifp->if_flags); 8485591b213SSam Leffler 849c42a7b7eSSam Leffler /* 850c42a7b7eSSam Leffler * NB: the order of these is important: 85171b85077SSam Leffler * o stop the chip so no more interrupts will fire 852c42a7b7eSSam Leffler * o call the 802.11 layer before detaching the hal to 853c42a7b7eSSam Leffler * insure callbacks into the driver to delete global 854c42a7b7eSSam Leffler * key cache entries can be handled 85571b85077SSam Leffler * o free the taskqueue which drains any pending tasks 856c42a7b7eSSam Leffler * o reclaim the tx queue data structures after calling 857c42a7b7eSSam Leffler * the 802.11 layer as we'll get called back to reclaim 858c42a7b7eSSam Leffler * node state and potentially want to use them 859c42a7b7eSSam Leffler * o to cleanup the tx queues the hal is called, so detach 860c42a7b7eSSam Leffler * it last 861c42a7b7eSSam Leffler * Other than that, it's straightforward... 862c42a7b7eSSam Leffler */ 86371b85077SSam Leffler ath_stop(ifp); 864b032f27cSSam Leffler ieee80211_ifdetach(ifp->if_l2com); 86571b85077SSam Leffler taskqueue_free(sc->sc_tq); 86686e07743SSam Leffler #ifdef ATH_TX99_DIAG 86786e07743SSam Leffler if (sc->sc_tx99 != NULL) 86886e07743SSam Leffler sc->sc_tx99->detach(sc->sc_tx99); 86986e07743SSam Leffler #endif 870c42a7b7eSSam Leffler ath_rate_detach(sc->sc_rc); 87148237774SAdrian Chadd 87248237774SAdrian Chadd ath_dfs_detach(sc); 8735591b213SSam Leffler ath_desc_free(sc); 874c42a7b7eSSam Leffler ath_tx_cleanup(sc); 87571b85077SSam Leffler ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ 876c4c6f08fSRuslan Ermilov if_free(ifp); 877f0b2a0beSSam Leffler 8785591b213SSam Leffler return 0; 8795591b213SSam Leffler } 8805591b213SSam Leffler 881b032f27cSSam Leffler /* 882b032f27cSSam Leffler * MAC address handling for multiple BSS on the same radio. 883b032f27cSSam Leffler * The first vap uses the MAC address from the EEPROM. For 884b032f27cSSam Leffler * subsequent vap's we set the U/L bit (bit 1) in the MAC 885b032f27cSSam Leffler * address and use the next six bits as an index. 886b032f27cSSam Leffler */ 887b032f27cSSam Leffler static void 888b032f27cSSam Leffler assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) 889b032f27cSSam Leffler { 890b032f27cSSam Leffler int i; 891b032f27cSSam Leffler 892b032f27cSSam Leffler if (clone && sc->sc_hasbmask) { 893b032f27cSSam Leffler /* NB: we only do this if h/w supports multiple bssid */ 894b032f27cSSam Leffler for (i = 0; i < 8; i++) 895b032f27cSSam Leffler if ((sc->sc_bssidmask & (1<<i)) == 0) 896b032f27cSSam Leffler break; 897b032f27cSSam Leffler if (i != 0) 898b032f27cSSam Leffler mac[0] |= (i << 2)|0x2; 899b032f27cSSam Leffler } else 900b032f27cSSam Leffler i = 0; 901b032f27cSSam Leffler sc->sc_bssidmask |= 1<<i; 902b032f27cSSam Leffler sc->sc_hwbssidmask[0] &= ~mac[0]; 903b032f27cSSam Leffler if (i == 0) 904b032f27cSSam Leffler sc->sc_nbssid0++; 905b032f27cSSam Leffler } 906b032f27cSSam Leffler 907b032f27cSSam Leffler static void 908b032f27cSSam Leffler reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) 909b032f27cSSam Leffler { 910b032f27cSSam Leffler int i = mac[0] >> 2; 911b032f27cSSam Leffler uint8_t mask; 912b032f27cSSam Leffler 913b032f27cSSam Leffler if (i != 0 || --sc->sc_nbssid0 == 0) { 914b032f27cSSam Leffler sc->sc_bssidmask &= ~(1<<i); 915b032f27cSSam Leffler /* recalculate bssid mask from remaining addresses */ 916b032f27cSSam Leffler mask = 0xff; 917b032f27cSSam Leffler for (i = 1; i < 8; i++) 918b032f27cSSam Leffler if (sc->sc_bssidmask & (1<<i)) 919b032f27cSSam Leffler mask &= ~((i<<2)|0x2); 920b032f27cSSam Leffler sc->sc_hwbssidmask[0] |= mask; 921b032f27cSSam Leffler } 922b032f27cSSam Leffler } 923b032f27cSSam Leffler 924b032f27cSSam Leffler /* 925b032f27cSSam Leffler * Assign a beacon xmit slot. We try to space out 926b032f27cSSam Leffler * assignments so when beacons are staggered the 927b032f27cSSam Leffler * traffic coming out of the cab q has maximal time 928b032f27cSSam Leffler * to go out before the next beacon is scheduled. 929b032f27cSSam Leffler */ 930b032f27cSSam Leffler static int 931b032f27cSSam Leffler assign_bslot(struct ath_softc *sc) 932b032f27cSSam Leffler { 933b032f27cSSam Leffler u_int slot, free; 934b032f27cSSam Leffler 935b032f27cSSam Leffler free = 0; 936b032f27cSSam Leffler for (slot = 0; slot < ATH_BCBUF; slot++) 937b032f27cSSam Leffler if (sc->sc_bslot[slot] == NULL) { 938b032f27cSSam Leffler if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && 939b032f27cSSam Leffler sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) 940b032f27cSSam Leffler return slot; 941b032f27cSSam Leffler free = slot; 942b032f27cSSam Leffler /* NB: keep looking for a double slot */ 943b032f27cSSam Leffler } 944b032f27cSSam Leffler return free; 945b032f27cSSam Leffler } 946b032f27cSSam Leffler 947b032f27cSSam Leffler static struct ieee80211vap * 948fcd9500fSBernhard Schmidt ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 949fcd9500fSBernhard Schmidt enum ieee80211_opmode opmode, int flags, 950b032f27cSSam Leffler const uint8_t bssid[IEEE80211_ADDR_LEN], 951b032f27cSSam Leffler const uint8_t mac0[IEEE80211_ADDR_LEN]) 952b032f27cSSam Leffler { 953b032f27cSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 954b032f27cSSam Leffler struct ath_vap *avp; 955b032f27cSSam Leffler struct ieee80211vap *vap; 956b032f27cSSam Leffler uint8_t mac[IEEE80211_ADDR_LEN]; 957fcd9500fSBernhard Schmidt int needbeacon, error; 958fcd9500fSBernhard Schmidt enum ieee80211_opmode ic_opmode; 959b032f27cSSam Leffler 960b032f27cSSam Leffler avp = (struct ath_vap *) malloc(sizeof(struct ath_vap), 961b032f27cSSam Leffler M_80211_VAP, M_WAITOK | M_ZERO); 962b032f27cSSam Leffler needbeacon = 0; 963b032f27cSSam Leffler IEEE80211_ADDR_COPY(mac, mac0); 964b032f27cSSam Leffler 965b032f27cSSam Leffler ATH_LOCK(sc); 966a8962181SSam Leffler ic_opmode = opmode; /* default to opmode of new vap */ 967b032f27cSSam Leffler switch (opmode) { 968b032f27cSSam Leffler case IEEE80211_M_STA: 969a8962181SSam Leffler if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ 970b032f27cSSam Leffler device_printf(sc->sc_dev, "only 1 sta vap supported\n"); 971b032f27cSSam Leffler goto bad; 972b032f27cSSam Leffler } 973b032f27cSSam Leffler if (sc->sc_nvaps) { 974b032f27cSSam Leffler /* 975a8962181SSam Leffler * With multiple vaps we must fall back 976a8962181SSam Leffler * to s/w beacon miss handling. 977b032f27cSSam Leffler */ 978b032f27cSSam Leffler flags |= IEEE80211_CLONE_NOBEACONS; 979b032f27cSSam Leffler } 980a8962181SSam Leffler if (flags & IEEE80211_CLONE_NOBEACONS) { 981a8962181SSam Leffler /* 982a8962181SSam Leffler * Station mode w/o beacons are implemented w/ AP mode. 983a8962181SSam Leffler */ 984b032f27cSSam Leffler ic_opmode = IEEE80211_M_HOSTAP; 985a8962181SSam Leffler } 986b032f27cSSam Leffler break; 987b032f27cSSam Leffler case IEEE80211_M_IBSS: 988b032f27cSSam Leffler if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ 989b032f27cSSam Leffler device_printf(sc->sc_dev, 990b032f27cSSam Leffler "only 1 ibss vap supported\n"); 991b032f27cSSam Leffler goto bad; 992b032f27cSSam Leffler } 993b032f27cSSam Leffler needbeacon = 1; 994b032f27cSSam Leffler break; 995b032f27cSSam Leffler case IEEE80211_M_AHDEMO: 996584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 99710ad9a77SSam Leffler if (flags & IEEE80211_CLONE_TDMA) { 998a8962181SSam Leffler if (sc->sc_nvaps != 0) { 999a8962181SSam Leffler device_printf(sc->sc_dev, 1000a8962181SSam Leffler "only 1 tdma vap supported\n"); 1001a8962181SSam Leffler goto bad; 1002a8962181SSam Leffler } 100310ad9a77SSam Leffler needbeacon = 1; 100410ad9a77SSam Leffler flags |= IEEE80211_CLONE_NOBEACONS; 100510ad9a77SSam Leffler } 1006b032f27cSSam Leffler /* fall thru... */ 100710ad9a77SSam Leffler #endif 1008b032f27cSSam Leffler case IEEE80211_M_MONITOR: 1009b032f27cSSam Leffler if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { 1010a8962181SSam Leffler /* 1011a8962181SSam Leffler * Adopt existing mode. Adding a monitor or ahdemo 1012a8962181SSam Leffler * vap to an existing configuration is of dubious 1013a8962181SSam Leffler * value but should be ok. 1014a8962181SSam Leffler */ 1015b032f27cSSam Leffler /* XXX not right for monitor mode */ 1016b032f27cSSam Leffler ic_opmode = ic->ic_opmode; 1017a8962181SSam Leffler } 1018b032f27cSSam Leffler break; 1019b032f27cSSam Leffler case IEEE80211_M_HOSTAP: 102059aa14a9SRui Paulo case IEEE80211_M_MBSS: 1021b032f27cSSam Leffler needbeacon = 1; 1022a8962181SSam Leffler break; 1023b032f27cSSam Leffler case IEEE80211_M_WDS: 1024a8962181SSam Leffler if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { 1025b032f27cSSam Leffler device_printf(sc->sc_dev, 1026b032f27cSSam Leffler "wds not supported in sta mode\n"); 1027b032f27cSSam Leffler goto bad; 1028b032f27cSSam Leffler } 1029b032f27cSSam Leffler /* 1030b032f27cSSam Leffler * Silently remove any request for a unique 1031b032f27cSSam Leffler * bssid; WDS vap's always share the local 1032b032f27cSSam Leffler * mac address. 1033b032f27cSSam Leffler */ 1034b032f27cSSam Leffler flags &= ~IEEE80211_CLONE_BSSID; 1035a8962181SSam Leffler if (sc->sc_nvaps == 0) 1036b032f27cSSam Leffler ic_opmode = IEEE80211_M_HOSTAP; 1037a8962181SSam Leffler else 1038a8962181SSam Leffler ic_opmode = ic->ic_opmode; 10397d261891SRui Paulo break; 1040b032f27cSSam Leffler default: 1041b032f27cSSam Leffler device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 1042b032f27cSSam Leffler goto bad; 1043b032f27cSSam Leffler } 1044b032f27cSSam Leffler /* 1045b032f27cSSam Leffler * Check that a beacon buffer is available; the code below assumes it. 1046b032f27cSSam Leffler */ 10476b349e5aSAdrian Chadd if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { 1048b032f27cSSam Leffler device_printf(sc->sc_dev, "no beacon buffer available\n"); 1049b032f27cSSam Leffler goto bad; 1050b032f27cSSam Leffler } 1051b032f27cSSam Leffler 1052b032f27cSSam Leffler /* STA, AHDEMO? */ 105359aa14a9SRui Paulo if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { 1054b032f27cSSam Leffler assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 1055b032f27cSSam Leffler ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1056b032f27cSSam Leffler } 1057b032f27cSSam Leffler 1058b032f27cSSam Leffler vap = &avp->av_vap; 1059b032f27cSSam Leffler /* XXX can't hold mutex across if_alloc */ 1060b032f27cSSam Leffler ATH_UNLOCK(sc); 1061b032f27cSSam Leffler error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, 1062b032f27cSSam Leffler bssid, mac); 1063b032f27cSSam Leffler ATH_LOCK(sc); 1064b032f27cSSam Leffler if (error != 0) { 1065b032f27cSSam Leffler device_printf(sc->sc_dev, "%s: error %d creating vap\n", 1066b032f27cSSam Leffler __func__, error); 1067b032f27cSSam Leffler goto bad2; 1068b032f27cSSam Leffler } 1069b032f27cSSam Leffler 1070b032f27cSSam Leffler /* h/w crypto support */ 1071b032f27cSSam Leffler vap->iv_key_alloc = ath_key_alloc; 1072b032f27cSSam Leffler vap->iv_key_delete = ath_key_delete; 1073b032f27cSSam Leffler vap->iv_key_set = ath_key_set; 1074b032f27cSSam Leffler vap->iv_key_update_begin = ath_key_update_begin; 1075b032f27cSSam Leffler vap->iv_key_update_end = ath_key_update_end; 1076b032f27cSSam Leffler 1077b032f27cSSam Leffler /* override various methods */ 1078b032f27cSSam Leffler avp->av_recv_mgmt = vap->iv_recv_mgmt; 1079b032f27cSSam Leffler vap->iv_recv_mgmt = ath_recv_mgmt; 1080b032f27cSSam Leffler vap->iv_reset = ath_reset_vap; 1081b032f27cSSam Leffler vap->iv_update_beacon = ath_beacon_update; 1082b032f27cSSam Leffler avp->av_newstate = vap->iv_newstate; 1083b032f27cSSam Leffler vap->iv_newstate = ath_newstate; 1084b032f27cSSam Leffler avp->av_bmiss = vap->iv_bmiss; 1085b032f27cSSam Leffler vap->iv_bmiss = ath_bmiss_vap; 1086b032f27cSSam Leffler 10879be25f4aSAdrian Chadd /* Set default parameters */ 10889be25f4aSAdrian Chadd 10899be25f4aSAdrian Chadd /* 10909be25f4aSAdrian Chadd * Anything earlier than some AR9300 series MACs don't 10919be25f4aSAdrian Chadd * support a smaller MPDU density. 10929be25f4aSAdrian Chadd */ 10939be25f4aSAdrian Chadd vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; 10949be25f4aSAdrian Chadd /* 10959be25f4aSAdrian Chadd * All NICs can handle the maximum size, however 10969be25f4aSAdrian Chadd * AR5416 based MACs can only TX aggregates w/ RTS 10979be25f4aSAdrian Chadd * protection when the total aggregate size is <= 8k. 10989be25f4aSAdrian Chadd * However, for now that's enforced by the TX path. 10999be25f4aSAdrian Chadd */ 11009be25f4aSAdrian Chadd vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 11019be25f4aSAdrian Chadd 1102b032f27cSSam Leffler avp->av_bslot = -1; 1103b032f27cSSam Leffler if (needbeacon) { 1104b032f27cSSam Leffler /* 1105b032f27cSSam Leffler * Allocate beacon state and setup the q for buffered 1106b032f27cSSam Leffler * multicast frames. We know a beacon buffer is 1107b032f27cSSam Leffler * available because we checked above. 1108b032f27cSSam Leffler */ 11096b349e5aSAdrian Chadd avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); 11106b349e5aSAdrian Chadd TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); 1111b032f27cSSam Leffler if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { 1112b032f27cSSam Leffler /* 1113b032f27cSSam Leffler * Assign the vap to a beacon xmit slot. As above 1114b032f27cSSam Leffler * this cannot fail to find a free one. 1115b032f27cSSam Leffler */ 1116b032f27cSSam Leffler avp->av_bslot = assign_bslot(sc); 1117b032f27cSSam Leffler KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, 1118b032f27cSSam Leffler ("beacon slot %u not empty", avp->av_bslot)); 1119b032f27cSSam Leffler sc->sc_bslot[avp->av_bslot] = vap; 1120b032f27cSSam Leffler sc->sc_nbcnvaps++; 1121b032f27cSSam Leffler } 1122b032f27cSSam Leffler if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { 1123b032f27cSSam Leffler /* 1124b032f27cSSam Leffler * Multple vaps are to transmit beacons and we 1125b032f27cSSam Leffler * have h/w support for TSF adjusting; enable 1126b032f27cSSam Leffler * use of staggered beacons. 1127b032f27cSSam Leffler */ 1128b032f27cSSam Leffler sc->sc_stagbeacons = 1; 1129b032f27cSSam Leffler } 1130b032f27cSSam Leffler ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); 1131b032f27cSSam Leffler } 1132b032f27cSSam Leffler 1133b032f27cSSam Leffler ic->ic_opmode = ic_opmode; 1134b032f27cSSam Leffler if (opmode != IEEE80211_M_WDS) { 1135b032f27cSSam Leffler sc->sc_nvaps++; 1136b032f27cSSam Leffler if (opmode == IEEE80211_M_STA) 1137b032f27cSSam Leffler sc->sc_nstavaps++; 1138fe0dd789SSam Leffler if (opmode == IEEE80211_M_MBSS) 1139fe0dd789SSam Leffler sc->sc_nmeshvaps++; 1140b032f27cSSam Leffler } 1141b032f27cSSam Leffler switch (ic_opmode) { 1142b032f27cSSam Leffler case IEEE80211_M_IBSS: 1143b032f27cSSam Leffler sc->sc_opmode = HAL_M_IBSS; 1144b032f27cSSam Leffler break; 1145b032f27cSSam Leffler case IEEE80211_M_STA: 1146b032f27cSSam Leffler sc->sc_opmode = HAL_M_STA; 1147b032f27cSSam Leffler break; 1148b032f27cSSam Leffler case IEEE80211_M_AHDEMO: 1149584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 115010ad9a77SSam Leffler if (vap->iv_caps & IEEE80211_C_TDMA) { 115110ad9a77SSam Leffler sc->sc_tdma = 1; 115210ad9a77SSam Leffler /* NB: disable tsf adjust */ 115310ad9a77SSam Leffler sc->sc_stagbeacons = 0; 115410ad9a77SSam Leffler } 115510ad9a77SSam Leffler /* 115610ad9a77SSam Leffler * NB: adhoc demo mode is a pseudo mode; to the hal it's 115710ad9a77SSam Leffler * just ap mode. 115810ad9a77SSam Leffler */ 115910ad9a77SSam Leffler /* fall thru... */ 116010ad9a77SSam Leffler #endif 1161b032f27cSSam Leffler case IEEE80211_M_HOSTAP: 116259aa14a9SRui Paulo case IEEE80211_M_MBSS: 1163b032f27cSSam Leffler sc->sc_opmode = HAL_M_HOSTAP; 1164b032f27cSSam Leffler break; 1165b032f27cSSam Leffler case IEEE80211_M_MONITOR: 1166b032f27cSSam Leffler sc->sc_opmode = HAL_M_MONITOR; 1167b032f27cSSam Leffler break; 1168b032f27cSSam Leffler default: 1169b032f27cSSam Leffler /* XXX should not happen */ 1170b032f27cSSam Leffler break; 1171b032f27cSSam Leffler } 1172b032f27cSSam Leffler if (sc->sc_hastsfadd) { 1173b032f27cSSam Leffler /* 1174b032f27cSSam Leffler * Configure whether or not TSF adjust should be done. 1175b032f27cSSam Leffler */ 1176b032f27cSSam Leffler ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); 1177b032f27cSSam Leffler } 117810ad9a77SSam Leffler if (flags & IEEE80211_CLONE_NOBEACONS) { 117910ad9a77SSam Leffler /* 118010ad9a77SSam Leffler * Enable s/w beacon miss handling. 118110ad9a77SSam Leffler */ 118210ad9a77SSam Leffler sc->sc_swbmiss = 1; 118310ad9a77SSam Leffler } 1184b032f27cSSam Leffler ATH_UNLOCK(sc); 1185b032f27cSSam Leffler 1186b032f27cSSam Leffler /* complete setup */ 1187b032f27cSSam Leffler ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status); 1188b032f27cSSam Leffler return vap; 1189b032f27cSSam Leffler bad2: 1190b032f27cSSam Leffler reclaim_address(sc, mac); 1191b032f27cSSam Leffler ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1192b032f27cSSam Leffler bad: 1193b032f27cSSam Leffler free(avp, M_80211_VAP); 1194b032f27cSSam Leffler ATH_UNLOCK(sc); 1195b032f27cSSam Leffler return NULL; 1196b032f27cSSam Leffler } 1197b032f27cSSam Leffler 1198b032f27cSSam Leffler static void 1199b032f27cSSam Leffler ath_vap_delete(struct ieee80211vap *vap) 1200b032f27cSSam Leffler { 1201b032f27cSSam Leffler struct ieee80211com *ic = vap->iv_ic; 1202b032f27cSSam Leffler struct ifnet *ifp = ic->ic_ifp; 1203b032f27cSSam Leffler struct ath_softc *sc = ifp->if_softc; 1204b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 1205b032f27cSSam Leffler struct ath_vap *avp = ATH_VAP(vap); 1206b032f27cSSam Leffler 1207f52d3452SAdrian Chadd DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 1208b032f27cSSam Leffler if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1209b032f27cSSam Leffler /* 1210b032f27cSSam Leffler * Quiesce the hardware while we remove the vap. In 1211b032f27cSSam Leffler * particular we need to reclaim all references to 1212b032f27cSSam Leffler * the vap state by any frames pending on the tx queues. 1213b032f27cSSam Leffler */ 1214b032f27cSSam Leffler ath_hal_intrset(ah, 0); /* disable interrupts */ 1215517526efSAdrian Chadd ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */ 1216517526efSAdrian Chadd /* XXX Do all frames from all vaps/nodes need draining here? */ 12179a842e8bSAdrian Chadd ath_stoprecv(sc, 1); /* stop recv side */ 1218b032f27cSSam Leffler } 1219b032f27cSSam Leffler 1220b032f27cSSam Leffler ieee80211_vap_detach(vap); 122116d4de92SAdrian Chadd 122216d4de92SAdrian Chadd /* 122316d4de92SAdrian Chadd * XXX Danger Will Robinson! Danger! 122416d4de92SAdrian Chadd * 122516d4de92SAdrian Chadd * Because ieee80211_vap_detach() can queue a frame (the station 122616d4de92SAdrian Chadd * diassociate message?) after we've drained the TXQ and 122716d4de92SAdrian Chadd * flushed the software TXQ, we will end up with a frame queued 122816d4de92SAdrian Chadd * to a node whose vap is about to be freed. 122916d4de92SAdrian Chadd * 123016d4de92SAdrian Chadd * To work around this, flush the hardware/software again. 123116d4de92SAdrian Chadd * This may be racy - the ath task may be running and the packet 123216d4de92SAdrian Chadd * may be being scheduled between sw->hw txq. Tsk. 123316d4de92SAdrian Chadd * 123416d4de92SAdrian Chadd * TODO: figure out why a new node gets allocated somewhere around 123516d4de92SAdrian Chadd * here (after the ath_tx_swq() call; and after an ath_stop_locked() 123616d4de92SAdrian Chadd * call!) 123716d4de92SAdrian Chadd */ 123816d4de92SAdrian Chadd 123916d4de92SAdrian Chadd ath_draintxq(sc, ATH_RESET_DEFAULT); 124016d4de92SAdrian Chadd 1241b032f27cSSam Leffler ATH_LOCK(sc); 1242b032f27cSSam Leffler /* 1243b032f27cSSam Leffler * Reclaim beacon state. Note this must be done before 1244b032f27cSSam Leffler * the vap instance is reclaimed as we may have a reference 1245b032f27cSSam Leffler * to it in the buffer for the beacon frame. 1246b032f27cSSam Leffler */ 1247b032f27cSSam Leffler if (avp->av_bcbuf != NULL) { 1248b032f27cSSam Leffler if (avp->av_bslot != -1) { 1249b032f27cSSam Leffler sc->sc_bslot[avp->av_bslot] = NULL; 1250b032f27cSSam Leffler sc->sc_nbcnvaps--; 1251b032f27cSSam Leffler } 1252b032f27cSSam Leffler ath_beacon_return(sc, avp->av_bcbuf); 1253b032f27cSSam Leffler avp->av_bcbuf = NULL; 1254b032f27cSSam Leffler if (sc->sc_nbcnvaps == 0) { 1255b032f27cSSam Leffler sc->sc_stagbeacons = 0; 1256b032f27cSSam Leffler if (sc->sc_hastsfadd) 1257b032f27cSSam Leffler ath_hal_settsfadjust(sc->sc_ah, 0); 1258b032f27cSSam Leffler } 1259b032f27cSSam Leffler /* 1260b032f27cSSam Leffler * Reclaim any pending mcast frames for the vap. 1261b032f27cSSam Leffler */ 1262b032f27cSSam Leffler ath_tx_draintxq(sc, &avp->av_mcastq); 1263b032f27cSSam Leffler ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq); 1264b032f27cSSam Leffler } 1265b032f27cSSam Leffler /* 1266b032f27cSSam Leffler * Update bookkeeping. 1267b032f27cSSam Leffler */ 1268b032f27cSSam Leffler if (vap->iv_opmode == IEEE80211_M_STA) { 1269b032f27cSSam Leffler sc->sc_nstavaps--; 1270b032f27cSSam Leffler if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) 1271b032f27cSSam Leffler sc->sc_swbmiss = 0; 127259aa14a9SRui Paulo } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || 127359aa14a9SRui Paulo vap->iv_opmode == IEEE80211_M_MBSS) { 1274b032f27cSSam Leffler reclaim_address(sc, vap->iv_myaddr); 1275b032f27cSSam Leffler ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); 1276fe0dd789SSam Leffler if (vap->iv_opmode == IEEE80211_M_MBSS) 1277fe0dd789SSam Leffler sc->sc_nmeshvaps--; 1278b032f27cSSam Leffler } 1279b032f27cSSam Leffler if (vap->iv_opmode != IEEE80211_M_WDS) 1280b032f27cSSam Leffler sc->sc_nvaps--; 1281584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 128210ad9a77SSam Leffler /* TDMA operation ceases when the last vap is destroyed */ 128310ad9a77SSam Leffler if (sc->sc_tdma && sc->sc_nvaps == 0) { 128410ad9a77SSam Leffler sc->sc_tdma = 0; 128510ad9a77SSam Leffler sc->sc_swbmiss = 0; 128610ad9a77SSam Leffler } 128710ad9a77SSam Leffler #endif 1288b032f27cSSam Leffler free(avp, M_80211_VAP); 1289b032f27cSSam Leffler 1290b032f27cSSam Leffler if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1291b032f27cSSam Leffler /* 1292b032f27cSSam Leffler * Restart rx+tx machines if still running (RUNNING will 1293b032f27cSSam Leffler * be reset if we just destroyed the last vap). 1294b032f27cSSam Leffler */ 1295b032f27cSSam Leffler if (ath_startrecv(sc) != 0) 1296b032f27cSSam Leffler if_printf(ifp, "%s: unable to restart recv logic\n", 1297b032f27cSSam Leffler __func__); 1298c89b957aSSam Leffler if (sc->sc_beacons) { /* restart beacons */ 1299c89b957aSSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 1300c89b957aSSam Leffler if (sc->sc_tdma) 1301c89b957aSSam Leffler ath_tdma_config(sc, NULL); 1302c89b957aSSam Leffler else 1303c89b957aSSam Leffler #endif 1304b032f27cSSam Leffler ath_beacon_config(sc, NULL); 1305c89b957aSSam Leffler } 1306b032f27cSSam Leffler ath_hal_intrset(ah, sc->sc_imask); 1307b032f27cSSam Leffler } 130816d4de92SAdrian Chadd ATH_UNLOCK(sc); 1309b032f27cSSam Leffler } 1310b032f27cSSam Leffler 13115591b213SSam Leffler void 13125591b213SSam Leffler ath_suspend(struct ath_softc *sc) 13135591b213SSam Leffler { 1314fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 1315d3ac945bSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 13165591b213SSam Leffler 1317c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1318c42a7b7eSSam Leffler __func__, ifp->if_flags); 13195591b213SSam Leffler 1320d3ac945bSSam Leffler sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0; 1321d3ac945bSSam Leffler if (ic->ic_opmode == IEEE80211_M_STA) 13225591b213SSam Leffler ath_stop(ifp); 1323d3ac945bSSam Leffler else 1324d3ac945bSSam Leffler ieee80211_suspend_all(ic); 1325d3ac945bSSam Leffler /* 1326d3ac945bSSam Leffler * NB: don't worry about putting the chip in low power 1327d3ac945bSSam Leffler * mode; pci will power off our socket on suspend and 1328f29b8b7fSWarner Losh * CardBus detaches the device. 1329d3ac945bSSam Leffler */ 1330d3ac945bSSam Leffler } 1331d3ac945bSSam Leffler 1332d3ac945bSSam Leffler /* 1333d3ac945bSSam Leffler * Reset the key cache since some parts do not reset the 1334d3ac945bSSam Leffler * contents on resume. First we clear all entries, then 1335d3ac945bSSam Leffler * re-load keys that the 802.11 layer assumes are setup 1336d3ac945bSSam Leffler * in h/w. 1337d3ac945bSSam Leffler */ 1338d3ac945bSSam Leffler static void 1339d3ac945bSSam Leffler ath_reset_keycache(struct ath_softc *sc) 1340d3ac945bSSam Leffler { 1341d3ac945bSSam Leffler struct ifnet *ifp = sc->sc_ifp; 1342d3ac945bSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 1343d3ac945bSSam Leffler struct ath_hal *ah = sc->sc_ah; 1344d3ac945bSSam Leffler int i; 1345d3ac945bSSam Leffler 1346d3ac945bSSam Leffler for (i = 0; i < sc->sc_keymax; i++) 1347d3ac945bSSam Leffler ath_hal_keyreset(ah, i); 1348d3ac945bSSam Leffler ieee80211_crypto_reload_keys(ic); 13495591b213SSam Leffler } 13505591b213SSam Leffler 13515591b213SSam Leffler void 13525591b213SSam Leffler ath_resume(struct ath_softc *sc) 13535591b213SSam Leffler { 1354fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 1355d3ac945bSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 1356d3ac945bSSam Leffler struct ath_hal *ah = sc->sc_ah; 1357d3ac945bSSam Leffler HAL_STATUS status; 13585591b213SSam Leffler 1359c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1360c42a7b7eSSam Leffler __func__, ifp->if_flags); 13615591b213SSam Leffler 1362d3ac945bSSam Leffler /* 1363d3ac945bSSam Leffler * Must reset the chip before we reload the 1364d3ac945bSSam Leffler * keycache as we were powered down on suspend. 1365d3ac945bSSam Leffler */ 1366054d7b69SSam Leffler ath_hal_reset(ah, sc->sc_opmode, 1367054d7b69SSam Leffler sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, 1368054d7b69SSam Leffler AH_FALSE, &status); 1369d3ac945bSSam Leffler ath_reset_keycache(sc); 13707e5eb44dSAdrian Chadd 13717e5eb44dSAdrian Chadd /* Let DFS at it in case it's a DFS channel */ 13727e5eb44dSAdrian Chadd ath_dfs_radar_enable(sc, ic->ic_curchan); 13737e5eb44dSAdrian Chadd 1374a497cd88SAdrian Chadd /* Restore the LED configuration */ 1375a497cd88SAdrian Chadd ath_led_config(sc); 1376a497cd88SAdrian Chadd ath_hal_setledstate(ah, HAL_LED_INIT); 1377a497cd88SAdrian Chadd 1378d3ac945bSSam Leffler if (sc->sc_resume_up) { 1379d3ac945bSSam Leffler if (ic->ic_opmode == IEEE80211_M_STA) { 1380fc74a9f9SBrooks Davis ath_init(sc); 1381a497cd88SAdrian Chadd ath_hal_setledstate(ah, HAL_LED_RUN); 1382394f34a5SSam Leffler /* 1383394f34a5SSam Leffler * Program the beacon registers using the last rx'd 1384394f34a5SSam Leffler * beacon frame and enable sync on the next beacon 1385394f34a5SSam Leffler * we see. This should handle the case where we 1386394f34a5SSam Leffler * wakeup and find the same AP and also the case where 1387394f34a5SSam Leffler * we wakeup and need to roam. For the latter we 1388394f34a5SSam Leffler * should get bmiss events that trigger a roam. 1389394f34a5SSam Leffler */ 1390394f34a5SSam Leffler ath_beacon_config(sc, NULL); 1391394f34a5SSam Leffler sc->sc_syncbeacon = 1; 1392d3ac945bSSam Leffler } else 1393d3ac945bSSam Leffler ieee80211_resume_all(ic); 13945591b213SSam Leffler } 13952fd9aabbSAdrian Chadd 13962fd9aabbSAdrian Chadd /* XXX beacons ? */ 13976b59f5e3SSam Leffler } 13985591b213SSam Leffler 13995591b213SSam Leffler void 14005591b213SSam Leffler ath_shutdown(struct ath_softc *sc) 14015591b213SSam Leffler { 1402fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 14035591b213SSam Leffler 1404c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1405c42a7b7eSSam Leffler __func__, ifp->if_flags); 14065591b213SSam Leffler 14075591b213SSam Leffler ath_stop(ifp); 1408d3ac945bSSam Leffler /* NB: no point powering down chip as we're about to reboot */ 14095591b213SSam Leffler } 14105591b213SSam Leffler 1411c42a7b7eSSam Leffler /* 1412c42a7b7eSSam Leffler * Interrupt handler. Most of the actual processing is deferred. 1413c42a7b7eSSam Leffler */ 14145591b213SSam Leffler void 14155591b213SSam Leffler ath_intr(void *arg) 14165591b213SSam Leffler { 14175591b213SSam Leffler struct ath_softc *sc = arg; 1418fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 14195591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 14206f5fe81eSAdrian Chadd HAL_INT status = 0; 14218f939e79SAdrian Chadd uint32_t txqs; 14225591b213SSam Leffler 1423ef27340cSAdrian Chadd /* 1424ef27340cSAdrian Chadd * If we're inside a reset path, just print a warning and 1425ef27340cSAdrian Chadd * clear the ISR. The reset routine will finish it for us. 1426ef27340cSAdrian Chadd */ 1427ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 1428ef27340cSAdrian Chadd if (sc->sc_inreset_cnt) { 1429ef27340cSAdrian Chadd HAL_INT status; 1430ef27340cSAdrian Chadd ath_hal_getisr(ah, &status); /* clear ISR */ 1431ef27340cSAdrian Chadd ath_hal_intrset(ah, 0); /* disable further intr's */ 1432ef27340cSAdrian Chadd DPRINTF(sc, ATH_DEBUG_ANY, 1433ef27340cSAdrian Chadd "%s: in reset, ignoring: status=0x%x\n", 1434ef27340cSAdrian Chadd __func__, status); 1435ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 1436ef27340cSAdrian Chadd return; 1437ef27340cSAdrian Chadd } 1438ef27340cSAdrian Chadd 14395591b213SSam Leffler if (sc->sc_invalid) { 14405591b213SSam Leffler /* 1441b58b3803SSam Leffler * The hardware is not ready/present, don't touch anything. 1442b58b3803SSam Leffler * Note this can happen early on if the IRQ is shared. 14435591b213SSam Leffler */ 1444c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 1445ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 14465591b213SSam Leffler return; 14475591b213SSam Leffler } 1448ef27340cSAdrian Chadd if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */ 1449ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 1450fdd758d4SSam Leffler return; 1451ef27340cSAdrian Chadd } 1452ef27340cSAdrian Chadd 145368e8e04eSSam Leffler if ((ifp->if_flags & IFF_UP) == 0 || 145468e8e04eSSam Leffler (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 145568e8e04eSSam Leffler HAL_INT status; 145668e8e04eSSam Leffler 1457c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1458c42a7b7eSSam Leffler __func__, ifp->if_flags); 14595591b213SSam Leffler ath_hal_getisr(ah, &status); /* clear ISR */ 14605591b213SSam Leffler ath_hal_intrset(ah, 0); /* disable further intr's */ 1461ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 14625591b213SSam Leffler return; 14635591b213SSam Leffler } 1464ef27340cSAdrian Chadd 1465c42a7b7eSSam Leffler /* 1466c42a7b7eSSam Leffler * Figure out the reason(s) for the interrupt. Note 1467c42a7b7eSSam Leffler * that the hal returns a pseudo-ISR that may include 1468c42a7b7eSSam Leffler * bits we haven't explicitly enabled so we mask the 1469c42a7b7eSSam Leffler * value to insure we only process bits we requested. 1470c42a7b7eSSam Leffler */ 14715591b213SSam Leffler ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 1472c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 1473f52d3452SAdrian Chadd CTR1(ATH_KTR_INTR, "ath_intr: mask=0x%.8x", status); 147431fdf3d6SAdrian Chadd #ifdef ATH_KTR_INTR_DEBUG 1475f52d3452SAdrian Chadd CTR5(ATH_KTR_INTR, 1476f52d3452SAdrian Chadd "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x", 1477f52d3452SAdrian Chadd ah->ah_intrstate[0], 1478f52d3452SAdrian Chadd ah->ah_intrstate[1], 1479f52d3452SAdrian Chadd ah->ah_intrstate[2], 1480f52d3452SAdrian Chadd ah->ah_intrstate[3], 1481f52d3452SAdrian Chadd ah->ah_intrstate[6]); 148231fdf3d6SAdrian Chadd #endif 1483ecddff40SSam Leffler status &= sc->sc_imask; /* discard unasked for bits */ 14846f5fe81eSAdrian Chadd 14856f5fe81eSAdrian Chadd /* Short-circuit un-handled interrupts */ 1486ef27340cSAdrian Chadd if (status == 0x0) { 1487ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 14886f5fe81eSAdrian Chadd return; 1489ef27340cSAdrian Chadd } 14906f5fe81eSAdrian Chadd 1491ef27340cSAdrian Chadd /* 1492ef27340cSAdrian Chadd * Take a note that we're inside the interrupt handler, so 1493ef27340cSAdrian Chadd * the reset routines know to wait. 1494ef27340cSAdrian Chadd */ 1495ef27340cSAdrian Chadd sc->sc_intr_cnt++; 1496ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 1497ef27340cSAdrian Chadd 1498ef27340cSAdrian Chadd /* 1499ef27340cSAdrian Chadd * Handle the interrupt. We won't run concurrent with the reset 1500ef27340cSAdrian Chadd * or channel change routines as they'll wait for sc_intr_cnt 1501ef27340cSAdrian Chadd * to be 0 before continuing. 1502ef27340cSAdrian Chadd */ 15035591b213SSam Leffler if (status & HAL_INT_FATAL) { 15045591b213SSam Leffler sc->sc_stats.ast_hardware++; 15055591b213SSam Leffler ath_hal_intrset(ah, 0); /* disable intr's until reset */ 150616c8acaaSSam Leffler ath_fatal_proc(sc, 0); 15075591b213SSam Leffler } else { 1508c42a7b7eSSam Leffler if (status & HAL_INT_SWBA) { 1509c42a7b7eSSam Leffler /* 1510c42a7b7eSSam Leffler * Software beacon alert--time to send a beacon. 1511c42a7b7eSSam Leffler * Handle beacon transmission directly; deferring 1512c42a7b7eSSam Leffler * this is too slow to meet timing constraints 1513c42a7b7eSSam Leffler * under load. 1514c42a7b7eSSam Leffler */ 1515584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 151610ad9a77SSam Leffler if (sc->sc_tdma) { 151710ad9a77SSam Leffler if (sc->sc_tdmaswba == 0) { 151810ad9a77SSam Leffler struct ieee80211com *ic = ifp->if_l2com; 151910ad9a77SSam Leffler struct ieee80211vap *vap = 152010ad9a77SSam Leffler TAILQ_FIRST(&ic->ic_vaps); 152110ad9a77SSam Leffler ath_tdma_beacon_send(sc, vap); 152210ad9a77SSam Leffler sc->sc_tdmaswba = 152310ad9a77SSam Leffler vap->iv_tdma->tdma_bintval; 152410ad9a77SSam Leffler } else 152510ad9a77SSam Leffler sc->sc_tdmaswba--; 152610ad9a77SSam Leffler } else 152710ad9a77SSam Leffler #endif 1528339ccfb3SSam Leffler { 1529c42a7b7eSSam Leffler ath_beacon_proc(sc, 0); 1530339ccfb3SSam Leffler #ifdef IEEE80211_SUPPORT_SUPERG 1531339ccfb3SSam Leffler /* 1532339ccfb3SSam Leffler * Schedule the rx taskq in case there's no 1533339ccfb3SSam Leffler * traffic so any frames held on the staging 1534339ccfb3SSam Leffler * queue are aged and potentially flushed. 1535339ccfb3SSam Leffler */ 1536339ccfb3SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1537339ccfb3SSam Leffler #endif 1538339ccfb3SSam Leffler } 1539c42a7b7eSSam Leffler } 15405591b213SSam Leffler if (status & HAL_INT_RXEOL) { 15418f939e79SAdrian Chadd int imask; 1542f52d3452SAdrian Chadd CTR0(ATH_KTR_ERR, "ath_intr: RXEOL"); 1543ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 15445591b213SSam Leffler /* 15455591b213SSam Leffler * NB: the hardware should re-read the link when 15465591b213SSam Leffler * RXE bit is written, but it doesn't work at 15475591b213SSam Leffler * least on older hardware revs. 15485591b213SSam Leffler */ 15495591b213SSam Leffler sc->sc_stats.ast_rxeol++; 155073f895fcSAdrian Chadd /* 155173f895fcSAdrian Chadd * Disable RXEOL/RXORN - prevent an interrupt 155273f895fcSAdrian Chadd * storm until the PCU logic can be reset. 15531fdadc0fSAdrian Chadd * In case the interface is reset some other 15541fdadc0fSAdrian Chadd * way before "sc_kickpcu" is called, don't 15551fdadc0fSAdrian Chadd * modify sc_imask - that way if it is reset 15561fdadc0fSAdrian Chadd * by a call to ath_reset() somehow, the 15571fdadc0fSAdrian Chadd * interrupt mask will be correctly reprogrammed. 155873f895fcSAdrian Chadd */ 15598f939e79SAdrian Chadd imask = sc->sc_imask; 15601fdadc0fSAdrian Chadd imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); 15611fdadc0fSAdrian Chadd ath_hal_intrset(ah, imask); 15621fdadc0fSAdrian Chadd /* 15638f939e79SAdrian Chadd * Only blank sc_rxlink if we've not yet kicked 15648f939e79SAdrian Chadd * the PCU. 15658f939e79SAdrian Chadd * 15668f939e79SAdrian Chadd * This isn't entirely correct - the correct solution 15678f939e79SAdrian Chadd * would be to have a PCU lock and engage that for 15688f939e79SAdrian Chadd * the duration of the PCU fiddling; which would include 15698f939e79SAdrian Chadd * running the RX process. Otherwise we could end up 15708f939e79SAdrian Chadd * messing up the RX descriptor chain and making the 15718f939e79SAdrian Chadd * RX desc list much shorter. 15728f939e79SAdrian Chadd */ 15738f939e79SAdrian Chadd if (! sc->sc_kickpcu) 15748f939e79SAdrian Chadd sc->sc_rxlink = NULL; 15758f939e79SAdrian Chadd sc->sc_kickpcu = 1; 15768f939e79SAdrian Chadd /* 15771fdadc0fSAdrian Chadd * Enqueue an RX proc, to handled whatever 15781fdadc0fSAdrian Chadd * is in the RX queue. 15791fdadc0fSAdrian Chadd * This will then kick the PCU. 15801fdadc0fSAdrian Chadd */ 15811fdadc0fSAdrian Chadd taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1582ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 15835591b213SSam Leffler } 15845591b213SSam Leffler if (status & HAL_INT_TXURN) { 15855591b213SSam Leffler sc->sc_stats.ast_txurn++; 15865591b213SSam Leffler /* bump tx trigger level */ 15875591b213SSam Leffler ath_hal_updatetxtriglevel(ah, AH_TRUE); 15885591b213SSam Leffler } 15898f939e79SAdrian Chadd if (status & HAL_INT_RX) { 15908f939e79SAdrian Chadd sc->sc_stats.ast_rx_intr++; 15910bbf5441SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 15928f939e79SAdrian Chadd } 15938f939e79SAdrian Chadd if (status & HAL_INT_TX) { 15948f939e79SAdrian Chadd sc->sc_stats.ast_tx_intr++; 15958f939e79SAdrian Chadd /* 15968f939e79SAdrian Chadd * Grab all the currently set bits in the HAL txq bitmap 15978f939e79SAdrian Chadd * and blank them. This is the only place we should be 15988f939e79SAdrian Chadd * doing this. 15998f939e79SAdrian Chadd */ 1600ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 16018f939e79SAdrian Chadd txqs = 0xffffffff; 16028f939e79SAdrian Chadd ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); 16038f939e79SAdrian Chadd sc->sc_txq_active |= txqs; 16040bbf5441SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 1605ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 16068f939e79SAdrian Chadd } 16075591b213SSam Leffler if (status & HAL_INT_BMISS) { 16085591b213SSam Leffler sc->sc_stats.ast_bmiss++; 16090bbf5441SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 16105591b213SSam Leffler } 16116ad02dbaSAdrian Chadd if (status & HAL_INT_GTT) 16126ad02dbaSAdrian Chadd sc->sc_stats.ast_tx_timeout++; 16135594f5c0SAdrian Chadd if (status & HAL_INT_CST) 16145594f5c0SAdrian Chadd sc->sc_stats.ast_tx_cst++; 1615c42a7b7eSSam Leffler if (status & HAL_INT_MIB) { 1616c42a7b7eSSam Leffler sc->sc_stats.ast_mib++; 1617ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 1618c42a7b7eSSam Leffler /* 1619c42a7b7eSSam Leffler * Disable interrupts until we service the MIB 1620c42a7b7eSSam Leffler * interrupt; otherwise it will continue to fire. 1621c42a7b7eSSam Leffler */ 1622c42a7b7eSSam Leffler ath_hal_intrset(ah, 0); 1623c42a7b7eSSam Leffler /* 1624c42a7b7eSSam Leffler * Let the hal handle the event. We assume it will 1625c42a7b7eSSam Leffler * clear whatever condition caused the interrupt. 1626c42a7b7eSSam Leffler */ 1627ffa2cab6SSam Leffler ath_hal_mibevent(ah, &sc->sc_halstats); 16288f939e79SAdrian Chadd /* 16298f939e79SAdrian Chadd * Don't reset the interrupt if we've just 16308f939e79SAdrian Chadd * kicked the PCU, or we may get a nested 16318f939e79SAdrian Chadd * RXEOL before the rxproc has had a chance 16328f939e79SAdrian Chadd * to run. 16338f939e79SAdrian Chadd */ 16348f939e79SAdrian Chadd if (sc->sc_kickpcu == 0) 1635c42a7b7eSSam Leffler ath_hal_intrset(ah, sc->sc_imask); 1636ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 1637c42a7b7eSSam Leffler } 16389c4fc1e8SSam Leffler if (status & HAL_INT_RXORN) { 16399c4fc1e8SSam Leffler /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ 1640f52d3452SAdrian Chadd CTR0(ATH_KTR_ERR, "ath_intr: RXORN"); 16419c4fc1e8SSam Leffler sc->sc_stats.ast_rxorn++; 16429c4fc1e8SSam Leffler } 16435591b213SSam Leffler } 1644ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 1645ef27340cSAdrian Chadd sc->sc_intr_cnt--; 1646ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 16475591b213SSam Leffler } 16485591b213SSam Leffler 16495591b213SSam Leffler static void 16505591b213SSam Leffler ath_fatal_proc(void *arg, int pending) 16515591b213SSam Leffler { 16525591b213SSam Leffler struct ath_softc *sc = arg; 1653fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 165416c8acaaSSam Leffler u_int32_t *state; 165516c8acaaSSam Leffler u_int32_t len; 165668e8e04eSSam Leffler void *sp; 16575591b213SSam Leffler 1658c42a7b7eSSam Leffler if_printf(ifp, "hardware error; resetting\n"); 165916c8acaaSSam Leffler /* 166016c8acaaSSam Leffler * Fatal errors are unrecoverable. Typically these 166116c8acaaSSam Leffler * are caused by DMA errors. Collect h/w state from 166216c8acaaSSam Leffler * the hal so we can diagnose what's going on. 166316c8acaaSSam Leffler */ 166468e8e04eSSam Leffler if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { 166516c8acaaSSam Leffler KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 166668e8e04eSSam Leffler state = sp; 166716c8acaaSSam Leffler if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", 166816c8acaaSSam Leffler state[0], state[1] , state[2], state[3], 166916c8acaaSSam Leffler state[4], state[5]); 167016c8acaaSSam Leffler } 1671517526efSAdrian Chadd ath_reset(ifp, ATH_RESET_NOLOSS); 16725591b213SSam Leffler } 16735591b213SSam Leffler 16745591b213SSam Leffler static void 1675b032f27cSSam Leffler ath_bmiss_vap(struct ieee80211vap *vap) 16765591b213SSam Leffler { 167759fbb257SSam Leffler /* 167859fbb257SSam Leffler * Workaround phantom bmiss interrupts by sanity-checking 167959fbb257SSam Leffler * the time of our last rx'd frame. If it is within the 168059fbb257SSam Leffler * beacon miss interval then ignore the interrupt. If it's 168159fbb257SSam Leffler * truly a bmiss we'll get another interrupt soon and that'll 168259fbb257SSam Leffler * be dispatched up for processing. Note this applies only 168359fbb257SSam Leffler * for h/w beacon miss events. 168459fbb257SSam Leffler */ 168559fbb257SSam Leffler if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { 1686a7ace843SSam Leffler struct ifnet *ifp = vap->iv_ic->ic_ifp; 1687a7ace843SSam Leffler struct ath_softc *sc = ifp->if_softc; 1688d7736e13SSam Leffler u_int64_t lastrx = sc->sc_lastrx; 1689d7736e13SSam Leffler u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 169080767531SAdrian Chadd /* XXX should take a locked ref to iv_bss */ 1691d7736e13SSam Leffler u_int bmisstimeout = 1692b032f27cSSam Leffler vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; 1693d7736e13SSam Leffler 1694d7736e13SSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 1695d7736e13SSam Leffler "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 1696d7736e13SSam Leffler __func__, (unsigned long long) tsf, 1697d7736e13SSam Leffler (unsigned long long)(tsf - lastrx), 1698d7736e13SSam Leffler (unsigned long long) lastrx, bmisstimeout); 169959fbb257SSam Leffler 170059fbb257SSam Leffler if (tsf - lastrx <= bmisstimeout) { 1701d7736e13SSam Leffler sc->sc_stats.ast_bmiss_phantom++; 170259fbb257SSam Leffler return; 170359fbb257SSam Leffler } 170459fbb257SSam Leffler } 170559fbb257SSam Leffler ATH_VAP(vap)->av_bmiss(vap); 1706e585d188SSam Leffler } 1707b032f27cSSam Leffler 1708459bc4f0SSam Leffler static int 1709459bc4f0SSam Leffler ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) 1710459bc4f0SSam Leffler { 1711459bc4f0SSam Leffler uint32_t rsize; 1712459bc4f0SSam Leffler void *sp; 1713459bc4f0SSam Leffler 171425c96056SAdrian Chadd if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) 1715459bc4f0SSam Leffler return 0; 1716459bc4f0SSam Leffler KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); 1717459bc4f0SSam Leffler *hangs = *(uint32_t *)sp; 1718459bc4f0SSam Leffler return 1; 1719459bc4f0SSam Leffler } 1720459bc4f0SSam Leffler 1721b032f27cSSam Leffler static void 1722b032f27cSSam Leffler ath_bmiss_proc(void *arg, int pending) 1723b032f27cSSam Leffler { 1724b032f27cSSam Leffler struct ath_softc *sc = arg; 1725b032f27cSSam Leffler struct ifnet *ifp = sc->sc_ifp; 1726459bc4f0SSam Leffler uint32_t hangs; 1727b032f27cSSam Leffler 1728b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 1729459bc4f0SSam Leffler 1730459bc4f0SSam Leffler if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { 17314fa8d4efSDaniel Eischen if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs); 1732517526efSAdrian Chadd ath_reset(ifp, ATH_RESET_NOLOSS); 1733459bc4f0SSam Leffler } else 1734b032f27cSSam Leffler ieee80211_beacon_miss(ifp->if_l2com); 17355591b213SSam Leffler } 17365591b213SSam Leffler 1737724c193aSSam Leffler /* 1738b032f27cSSam Leffler * Handle TKIP MIC setup to deal hardware that doesn't do MIC 1739b032f27cSSam Leffler * calcs together with WME. If necessary disable the crypto 1740b032f27cSSam Leffler * hardware and mark the 802.11 state so keys will be setup 1741b032f27cSSam Leffler * with the MIC work done in software. 1742b032f27cSSam Leffler */ 1743b032f27cSSam Leffler static void 1744b032f27cSSam Leffler ath_settkipmic(struct ath_softc *sc) 1745b032f27cSSam Leffler { 1746b032f27cSSam Leffler struct ifnet *ifp = sc->sc_ifp; 1747b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 1748b032f27cSSam Leffler 1749b032f27cSSam Leffler if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { 1750b032f27cSSam Leffler if (ic->ic_flags & IEEE80211_F_WME) { 1751b032f27cSSam Leffler ath_hal_settkipmic(sc->sc_ah, AH_FALSE); 1752b032f27cSSam Leffler ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; 1753b032f27cSSam Leffler } else { 1754b032f27cSSam Leffler ath_hal_settkipmic(sc->sc_ah, AH_TRUE); 1755b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 1756b032f27cSSam Leffler } 1757b032f27cSSam Leffler } 1758b032f27cSSam Leffler } 1759b032f27cSSam Leffler 17605591b213SSam Leffler static void 17615591b213SSam Leffler ath_init(void *arg) 17625591b213SSam Leffler { 17635591b213SSam Leffler struct ath_softc *sc = (struct ath_softc *) arg; 1764fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 1765b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 17665591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 17675591b213SSam Leffler HAL_STATUS status; 17685591b213SSam Leffler 1769c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1770c42a7b7eSSam Leffler __func__, ifp->if_flags); 17715591b213SSam Leffler 1772f0b2a0beSSam Leffler ATH_LOCK(sc); 17735591b213SSam Leffler /* 17745591b213SSam Leffler * Stop anything previously setup. This is safe 17755591b213SSam Leffler * whether this is the first time through or not. 17765591b213SSam Leffler */ 1777c42a7b7eSSam Leffler ath_stop_locked(ifp); 17785591b213SSam Leffler 17795591b213SSam Leffler /* 17805591b213SSam Leffler * The basic interface to setting the hardware in a good 17815591b213SSam Leffler * state is ``reset''. On return the hardware is known to 17825591b213SSam Leffler * be powered up and with interrupts disabled. This must 17835591b213SSam Leffler * be followed by initialization of the appropriate bits 17845591b213SSam Leffler * and then setup of the interrupt mask. 17855591b213SSam Leffler */ 1786b032f27cSSam Leffler ath_settkipmic(sc); 178759efa8b5SSam Leffler if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) { 17885591b213SSam Leffler if_printf(ifp, "unable to reset hardware; hal status %u\n", 17895591b213SSam Leffler status); 1790b032f27cSSam Leffler ATH_UNLOCK(sc); 1791b032f27cSSam Leffler return; 17925591b213SSam Leffler } 1793b032f27cSSam Leffler ath_chan_change(sc, ic->ic_curchan); 17945591b213SSam Leffler 179548237774SAdrian Chadd /* Let DFS at it in case it's a DFS channel */ 179648237774SAdrian Chadd ath_dfs_radar_enable(sc, ic->ic_curchan); 179748237774SAdrian Chadd 17985591b213SSam Leffler /* 1799c59005e9SSam Leffler * Likewise this is set during reset so update 1800c59005e9SSam Leffler * state cached in the driver. 1801c59005e9SSam Leffler */ 1802c59005e9SSam Leffler sc->sc_diversity = ath_hal_getdiversity(ah); 18032dc7fcc4SSam Leffler sc->sc_lastlongcal = 0; 18042dc7fcc4SSam Leffler sc->sc_resetcal = 1; 18052dc7fcc4SSam Leffler sc->sc_lastcalreset = 0; 1806a108ab63SAdrian Chadd sc->sc_lastani = 0; 1807a108ab63SAdrian Chadd sc->sc_lastshortcal = 0; 1808a108ab63SAdrian Chadd sc->sc_doresetcal = AH_FALSE; 18092fd9aabbSAdrian Chadd /* 18102fd9aabbSAdrian Chadd * Beacon timers were cleared here; give ath_newstate() 18112fd9aabbSAdrian Chadd * a hint that the beacon timers should be poked when 18122fd9aabbSAdrian Chadd * things transition to the RUN state. 18132fd9aabbSAdrian Chadd */ 18142fd9aabbSAdrian Chadd sc->sc_beacons = 0; 1815c42a7b7eSSam Leffler 1816c42a7b7eSSam Leffler /* 18178f939e79SAdrian Chadd * Initial aggregation settings. 18188f939e79SAdrian Chadd */ 18198f939e79SAdrian Chadd sc->sc_hwq_limit = ATH_AGGR_MIN_QDEPTH; 18208f939e79SAdrian Chadd sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; 18218f939e79SAdrian Chadd sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; 18228f939e79SAdrian Chadd 18238f939e79SAdrian Chadd /* 18245591b213SSam Leffler * Setup the hardware after reset: the key cache 18255591b213SSam Leffler * is filled as needed and the receive engine is 18265591b213SSam Leffler * set going. Frame transmit is handled entirely 18275591b213SSam Leffler * in the frame output path; there's nothing to do 18285591b213SSam Leffler * here except setup the interrupt mask. 18295591b213SSam Leffler */ 18305591b213SSam Leffler if (ath_startrecv(sc) != 0) { 18315591b213SSam Leffler if_printf(ifp, "unable to start recv logic\n"); 1832b032f27cSSam Leffler ATH_UNLOCK(sc); 1833b032f27cSSam Leffler return; 18345591b213SSam Leffler } 18355591b213SSam Leffler 18365591b213SSam Leffler /* 18375591b213SSam Leffler * Enable interrupts. 18385591b213SSam Leffler */ 18395591b213SSam Leffler sc->sc_imask = HAL_INT_RX | HAL_INT_TX 18405591b213SSam Leffler | HAL_INT_RXEOL | HAL_INT_RXORN 18415591b213SSam Leffler | HAL_INT_FATAL | HAL_INT_GLOBAL; 1842c42a7b7eSSam Leffler /* 1843c42a7b7eSSam Leffler * Enable MIB interrupts when there are hardware phy counters. 1844c42a7b7eSSam Leffler * Note we only do this (at the moment) for station mode. 1845c42a7b7eSSam Leffler */ 1846c42a7b7eSSam Leffler if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 1847c42a7b7eSSam Leffler sc->sc_imask |= HAL_INT_MIB; 18485591b213SSam Leffler 18495594f5c0SAdrian Chadd /* Enable global TX timeout and carrier sense timeout if available */ 18506ad02dbaSAdrian Chadd if (ath_hal_gtxto_supported(ah)) 18513788ebedSAdrian Chadd sc->sc_imask |= HAL_INT_GTT; 1852d0a0ebc6SAdrian Chadd 1853d0a0ebc6SAdrian Chadd DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", 1854d0a0ebc6SAdrian Chadd __func__, sc->sc_imask); 18556ad02dbaSAdrian Chadd 185613f4c340SRobert Watson ifp->if_drv_flags |= IFF_DRV_RUNNING; 18572e986da5SSam Leffler callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 1858b032f27cSSam Leffler ath_hal_intrset(ah, sc->sc_imask); 18595591b213SSam Leffler 1860b032f27cSSam Leffler ATH_UNLOCK(sc); 1861b032f27cSSam Leffler 186286e07743SSam Leffler #ifdef ATH_TX99_DIAG 186386e07743SSam Leffler if (sc->sc_tx99 != NULL) 186486e07743SSam Leffler sc->sc_tx99->start(sc->sc_tx99); 186586e07743SSam Leffler else 186686e07743SSam Leffler #endif 1867b032f27cSSam Leffler ieee80211_start_all(ic); /* start all vap's */ 18685591b213SSam Leffler } 18695591b213SSam Leffler 18705591b213SSam Leffler static void 1871c42a7b7eSSam Leffler ath_stop_locked(struct ifnet *ifp) 18725591b213SSam Leffler { 18735591b213SSam Leffler struct ath_softc *sc = ifp->if_softc; 18745591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 18755591b213SSam Leffler 1876c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 1877c42a7b7eSSam Leffler __func__, sc->sc_invalid, ifp->if_flags); 18785591b213SSam Leffler 1879c42a7b7eSSam Leffler ATH_LOCK_ASSERT(sc); 188013f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 18815591b213SSam Leffler /* 18825591b213SSam Leffler * Shutdown the hardware and driver: 1883c42a7b7eSSam Leffler * reset 802.11 state machine 18845591b213SSam Leffler * turn off timers 1885c42a7b7eSSam Leffler * disable interrupts 1886c42a7b7eSSam Leffler * turn off the radio 18875591b213SSam Leffler * clear transmit machinery 18885591b213SSam Leffler * clear receive machinery 18895591b213SSam Leffler * drain and release tx queues 18905591b213SSam Leffler * reclaim beacon resources 18915591b213SSam Leffler * power down hardware 18925591b213SSam Leffler * 18935591b213SSam Leffler * Note that some of this work is not possible if the 18945591b213SSam Leffler * hardware is gone (invalid). 18955591b213SSam Leffler */ 189686e07743SSam Leffler #ifdef ATH_TX99_DIAG 189786e07743SSam Leffler if (sc->sc_tx99 != NULL) 189886e07743SSam Leffler sc->sc_tx99->stop(sc->sc_tx99); 189986e07743SSam Leffler #endif 19002e986da5SSam Leffler callout_stop(&sc->sc_wd_ch); 19012e986da5SSam Leffler sc->sc_wd_timer = 0; 190213f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1903c42a7b7eSSam Leffler if (!sc->sc_invalid) { 19043e50ec2cSSam Leffler if (sc->sc_softled) { 19053e50ec2cSSam Leffler callout_stop(&sc->sc_ledtimer); 19063e50ec2cSSam Leffler ath_hal_gpioset(ah, sc->sc_ledpin, 19073e50ec2cSSam Leffler !sc->sc_ledon); 19083e50ec2cSSam Leffler sc->sc_blinking = 0; 19093e50ec2cSSam Leffler } 19105591b213SSam Leffler ath_hal_intrset(ah, 0); 1911c42a7b7eSSam Leffler } 1912517526efSAdrian Chadd ath_draintxq(sc, ATH_RESET_DEFAULT); 1913c42a7b7eSSam Leffler if (!sc->sc_invalid) { 19149a842e8bSAdrian Chadd ath_stoprecv(sc, 1); 1915c42a7b7eSSam Leffler ath_hal_phydisable(ah); 1916c42a7b7eSSam Leffler } else 19175591b213SSam Leffler sc->sc_rxlink = NULL; 1918b032f27cSSam Leffler ath_beacon_free(sc); /* XXX not needed */ 1919c42a7b7eSSam Leffler } 1920c42a7b7eSSam Leffler } 1921c42a7b7eSSam Leffler 1922ef27340cSAdrian Chadd #define MAX_TXRX_ITERATIONS 1000 1923ef27340cSAdrian Chadd static void 192421008bf1SAdrian Chadd ath_txrx_stop_locked(struct ath_softc *sc) 1925ef27340cSAdrian Chadd { 1926ef27340cSAdrian Chadd int i = MAX_TXRX_ITERATIONS; 1927ef27340cSAdrian Chadd 1928ef27340cSAdrian Chadd ATH_UNLOCK_ASSERT(sc); 192921008bf1SAdrian Chadd ATH_PCU_LOCK_ASSERT(sc); 193021008bf1SAdrian Chadd 1931ef27340cSAdrian Chadd /* 1932ef27340cSAdrian Chadd * Sleep until all the pending operations have completed. 1933ef27340cSAdrian Chadd * 1934ef27340cSAdrian Chadd * The caller must ensure that reset has been incremented 1935ef27340cSAdrian Chadd * or the pending operations may continue being queued. 1936ef27340cSAdrian Chadd */ 1937ef27340cSAdrian Chadd while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt || 1938ef27340cSAdrian Chadd sc->sc_txstart_cnt || sc->sc_intr_cnt) { 1939ef27340cSAdrian Chadd if (i <= 0) 1940ef27340cSAdrian Chadd break; 1941a2d8240dSAdrian Chadd msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 1); 1942ef27340cSAdrian Chadd i--; 1943ef27340cSAdrian Chadd } 1944ef27340cSAdrian Chadd 1945ef27340cSAdrian Chadd if (i <= 0) 1946ef27340cSAdrian Chadd device_printf(sc->sc_dev, 1947ef27340cSAdrian Chadd "%s: didn't finish after %d iterations\n", 1948ef27340cSAdrian Chadd __func__, MAX_TXRX_ITERATIONS); 1949ef27340cSAdrian Chadd } 1950ef27340cSAdrian Chadd #undef MAX_TXRX_ITERATIONS 1951ef27340cSAdrian Chadd 1952e78719adSAdrian Chadd #if 0 1953ef27340cSAdrian Chadd static void 195421008bf1SAdrian Chadd ath_txrx_stop(struct ath_softc *sc) 195521008bf1SAdrian Chadd { 195621008bf1SAdrian Chadd ATH_UNLOCK_ASSERT(sc); 195721008bf1SAdrian Chadd ATH_PCU_UNLOCK_ASSERT(sc); 195821008bf1SAdrian Chadd 195921008bf1SAdrian Chadd ATH_PCU_LOCK(sc); 196021008bf1SAdrian Chadd ath_txrx_stop_locked(sc); 196121008bf1SAdrian Chadd ATH_PCU_UNLOCK(sc); 196221008bf1SAdrian Chadd } 1963e78719adSAdrian Chadd #endif 196421008bf1SAdrian Chadd 196521008bf1SAdrian Chadd static void 1966ef27340cSAdrian Chadd ath_txrx_start(struct ath_softc *sc) 1967ef27340cSAdrian Chadd { 1968ef27340cSAdrian Chadd 1969ef27340cSAdrian Chadd taskqueue_unblock(sc->sc_tq); 1970ef27340cSAdrian Chadd } 1971ef27340cSAdrian Chadd 1972ee321975SAdrian Chadd /* 1973ee321975SAdrian Chadd * Grab the reset lock, and wait around until noone else 1974ee321975SAdrian Chadd * is trying to do anything with it. 1975ee321975SAdrian Chadd * 1976ee321975SAdrian Chadd * This is totally horrible but we can't hold this lock for 1977ee321975SAdrian Chadd * long enough to do TX/RX or we end up with net80211/ip stack 1978ee321975SAdrian Chadd * LORs and eventual deadlock. 1979ee321975SAdrian Chadd * 1980ee321975SAdrian Chadd * "dowait" signals whether to spin, waiting for the reset 1981ee321975SAdrian Chadd * lock count to reach 0. This should (for now) only be used 1982ee321975SAdrian Chadd * during the reset path, as the rest of the code may not 1983ee321975SAdrian Chadd * be locking-reentrant enough to behave correctly. 1984ee321975SAdrian Chadd * 1985ee321975SAdrian Chadd * Another, cleaner way should be found to serialise all of 1986ee321975SAdrian Chadd * these operations. 1987ee321975SAdrian Chadd */ 1988ee321975SAdrian Chadd #define MAX_RESET_ITERATIONS 10 1989ee321975SAdrian Chadd static int 1990ee321975SAdrian Chadd ath_reset_grablock(struct ath_softc *sc, int dowait) 1991ee321975SAdrian Chadd { 1992ee321975SAdrian Chadd int w = 0; 1993ee321975SAdrian Chadd int i = MAX_RESET_ITERATIONS; 1994ee321975SAdrian Chadd 1995ee321975SAdrian Chadd ATH_PCU_LOCK_ASSERT(sc); 1996ee321975SAdrian Chadd do { 1997ee321975SAdrian Chadd if (sc->sc_inreset_cnt == 0) { 1998ee321975SAdrian Chadd w = 1; 1999ee321975SAdrian Chadd break; 2000ee321975SAdrian Chadd } 2001ee321975SAdrian Chadd if (dowait == 0) { 2002ee321975SAdrian Chadd w = 0; 2003ee321975SAdrian Chadd break; 2004ee321975SAdrian Chadd } 2005ee321975SAdrian Chadd ATH_PCU_UNLOCK(sc); 2006ee321975SAdrian Chadd pause("ath_reset_grablock", 1); 2007ee321975SAdrian Chadd i--; 2008ee321975SAdrian Chadd ATH_PCU_LOCK(sc); 2009ee321975SAdrian Chadd } while (i > 0); 2010ee321975SAdrian Chadd 2011ee321975SAdrian Chadd /* 2012ee321975SAdrian Chadd * We always increment the refcounter, regardless 2013ee321975SAdrian Chadd * of whether we succeeded to get it in an exclusive 2014ee321975SAdrian Chadd * way. 2015ee321975SAdrian Chadd */ 2016ee321975SAdrian Chadd sc->sc_inreset_cnt++; 2017ee321975SAdrian Chadd 2018ee321975SAdrian Chadd if (i <= 0) 2019ee321975SAdrian Chadd device_printf(sc->sc_dev, 2020ee321975SAdrian Chadd "%s: didn't finish after %d iterations\n", 2021ee321975SAdrian Chadd __func__, MAX_RESET_ITERATIONS); 2022ee321975SAdrian Chadd 2023ee321975SAdrian Chadd if (w == 0) 2024ee321975SAdrian Chadd device_printf(sc->sc_dev, 2025ee321975SAdrian Chadd "%s: warning, recursive reset path!\n", 2026ee321975SAdrian Chadd __func__); 2027ee321975SAdrian Chadd 2028ee321975SAdrian Chadd return w; 2029ee321975SAdrian Chadd } 2030ee321975SAdrian Chadd #undef MAX_RESET_ITERATIONS 2031ee321975SAdrian Chadd 2032ee321975SAdrian Chadd /* 2033ee321975SAdrian Chadd * XXX TODO: write ath_reset_releaselock 2034ee321975SAdrian Chadd */ 2035ee321975SAdrian Chadd 2036c42a7b7eSSam Leffler static void 2037c42a7b7eSSam Leffler ath_stop(struct ifnet *ifp) 2038c42a7b7eSSam Leffler { 2039c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 2040c42a7b7eSSam Leffler 2041c42a7b7eSSam Leffler ATH_LOCK(sc); 2042c42a7b7eSSam Leffler ath_stop_locked(ifp); 2043f0b2a0beSSam Leffler ATH_UNLOCK(sc); 20445591b213SSam Leffler } 20455591b213SSam Leffler 20465591b213SSam Leffler /* 20475591b213SSam Leffler * Reset the hardware w/o losing operational state. This is 20485591b213SSam Leffler * basically a more efficient way of doing ath_stop, ath_init, 20495591b213SSam Leffler * followed by state transitions to the current 802.11 2050c42a7b7eSSam Leffler * operational state. Used to recover from various errors and 2051c42a7b7eSSam Leffler * to reset or reload hardware state. 20525591b213SSam Leffler */ 20536079fdbeSAdrian Chadd int 2054517526efSAdrian Chadd ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type) 20555591b213SSam Leffler { 2056c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 2057b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 20585591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 20595591b213SSam Leffler HAL_STATUS status; 2060ef27340cSAdrian Chadd int i; 20615591b213SSam Leffler 2062f52d3452SAdrian Chadd DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 206316d4de92SAdrian Chadd 2064ee321975SAdrian Chadd /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */ 2065ef27340cSAdrian Chadd ATH_PCU_UNLOCK_ASSERT(sc); 2066ef27340cSAdrian Chadd ATH_UNLOCK_ASSERT(sc); 2067ef27340cSAdrian Chadd 2068d52f7132SAdrian Chadd /* Try to (stop any further TX/RX from occuring */ 2069d52f7132SAdrian Chadd taskqueue_block(sc->sc_tq); 2070d52f7132SAdrian Chadd 2071ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 2072e78719adSAdrian Chadd ath_hal_intrset(ah, 0); /* disable interrupts */ 2073e78719adSAdrian Chadd ath_txrx_stop_locked(sc); /* Ensure TX/RX is stopped */ 2074ee321975SAdrian Chadd if (ath_reset_grablock(sc, 1) == 0) { 2075ee321975SAdrian Chadd device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 2076ef27340cSAdrian Chadd __func__); 2077ef27340cSAdrian Chadd } 2078ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 2079ef27340cSAdrian Chadd 2080f52d3452SAdrian Chadd /* 20819a842e8bSAdrian Chadd * Should now wait for pending TX/RX to complete 20829a842e8bSAdrian Chadd * and block future ones from occuring. This needs to be 20839a842e8bSAdrian Chadd * done before the TX queue is drained. 2084f52d3452SAdrian Chadd */ 2085ef27340cSAdrian Chadd ath_draintxq(sc, reset_type); /* stop xmit side */ 2086ef27340cSAdrian Chadd 2087ef27340cSAdrian Chadd /* 2088ef27340cSAdrian Chadd * Regardless of whether we're doing a no-loss flush or 2089ef27340cSAdrian Chadd * not, stop the PCU and handle what's in the RX queue. 2090ef27340cSAdrian Chadd * That way frames aren't dropped which shouldn't be. 2091ef27340cSAdrian Chadd */ 20929a842e8bSAdrian Chadd ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS)); 2093ef27340cSAdrian Chadd ath_rx_proc(sc, 0); 2094ef27340cSAdrian Chadd 2095b032f27cSSam Leffler ath_settkipmic(sc); /* configure TKIP MIC handling */ 20965591b213SSam Leffler /* NB: indicate channel change so we do a full reset */ 209759efa8b5SSam Leffler if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status)) 20985591b213SSam Leffler if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 20995591b213SSam Leffler __func__, status); 2100c59005e9SSam Leffler sc->sc_diversity = ath_hal_getdiversity(ah); 210148237774SAdrian Chadd 210248237774SAdrian Chadd /* Let DFS at it in case it's a DFS channel */ 210348237774SAdrian Chadd ath_dfs_radar_enable(sc, ic->ic_curchan); 210448237774SAdrian Chadd 210568e8e04eSSam Leffler if (ath_startrecv(sc) != 0) /* restart recv */ 210668e8e04eSSam Leffler if_printf(ifp, "%s: unable to start recv logic\n", __func__); 2107c42a7b7eSSam Leffler /* 2108c42a7b7eSSam Leffler * We may be doing a reset in response to an ioctl 2109c42a7b7eSSam Leffler * that changes the channel so update any state that 2110c42a7b7eSSam Leffler * might change as a result. 2111c42a7b7eSSam Leffler */ 2112724c193aSSam Leffler ath_chan_change(sc, ic->ic_curchan); 2113c89b957aSSam Leffler if (sc->sc_beacons) { /* restart beacons */ 2114584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 211510ad9a77SSam Leffler if (sc->sc_tdma) 211610ad9a77SSam Leffler ath_tdma_config(sc, NULL); 211710ad9a77SSam Leffler else 211810ad9a77SSam Leffler #endif 2119c89b957aSSam Leffler ath_beacon_config(sc, NULL); 212010ad9a77SSam Leffler } 2121c42a7b7eSSam Leffler 2122ef27340cSAdrian Chadd /* 2123ef27340cSAdrian Chadd * Release the reset lock and re-enable interrupts here. 2124ef27340cSAdrian Chadd * If an interrupt was being processed in ath_intr(), 2125ef27340cSAdrian Chadd * it would disable interrupts at this point. So we have 2126ef27340cSAdrian Chadd * to atomically enable interrupts and decrement the 2127ef27340cSAdrian Chadd * reset counter - this way ath_intr() doesn't end up 2128ef27340cSAdrian Chadd * disabling interrupts without a corresponding enable 2129ef27340cSAdrian Chadd * in the rest or channel change path. 2130ef27340cSAdrian Chadd */ 2131ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 2132ef27340cSAdrian Chadd sc->sc_inreset_cnt--; 2133ef27340cSAdrian Chadd /* XXX only do this if sc_inreset_cnt == 0? */ 2134ef27340cSAdrian Chadd ath_hal_intrset(ah, sc->sc_imask); 2135ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 2136ef27340cSAdrian Chadd 2137ef27340cSAdrian Chadd /* 2138ef27340cSAdrian Chadd * TX and RX can be started here. If it were started with 2139ef27340cSAdrian Chadd * sc_inreset_cnt > 0, the TX and RX path would abort. 2140ef27340cSAdrian Chadd * Thus if this is a nested call through the reset or 2141ef27340cSAdrian Chadd * channel change code, TX completion will occur but 2142ef27340cSAdrian Chadd * RX completion and ath_start / ath_tx_start will not 2143ef27340cSAdrian Chadd * run. 2144ef27340cSAdrian Chadd */ 2145ef27340cSAdrian Chadd 2146ef27340cSAdrian Chadd /* Restart TX/RX as needed */ 2147ef27340cSAdrian Chadd ath_txrx_start(sc); 2148ef27340cSAdrian Chadd 2149ef27340cSAdrian Chadd /* XXX Restart TX completion and pending TX */ 2150ef27340cSAdrian Chadd if (reset_type == ATH_RESET_NOLOSS) { 2151ef27340cSAdrian Chadd for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 2152ef27340cSAdrian Chadd if (ATH_TXQ_SETUP(sc, i)) { 2153ef27340cSAdrian Chadd ATH_TXQ_LOCK(&sc->sc_txq[i]); 2154ef27340cSAdrian Chadd ath_txq_restart_dma(sc, &sc->sc_txq[i]); 2155ef27340cSAdrian Chadd ath_txq_sched(sc, &sc->sc_txq[i]); 2156ef27340cSAdrian Chadd ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 2157ef27340cSAdrian Chadd } 2158ef27340cSAdrian Chadd } 2159ef27340cSAdrian Chadd } 2160ef27340cSAdrian Chadd 2161ef27340cSAdrian Chadd /* 2162ef27340cSAdrian Chadd * This may have been set during an ath_start() call which 2163ef27340cSAdrian Chadd * set this once it detected a concurrent TX was going on. 2164ef27340cSAdrian Chadd * So, clear it. 2165ef27340cSAdrian Chadd */ 2166e4e7938aSAdrian Chadd IF_LOCK(&ifp->if_snd); 2167ef27340cSAdrian Chadd ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2168e4e7938aSAdrian Chadd IF_UNLOCK(&ifp->if_snd); 2169ef27340cSAdrian Chadd 2170ef27340cSAdrian Chadd /* Handle any frames in the TX queue */ 2171ef27340cSAdrian Chadd /* 2172ef27340cSAdrian Chadd * XXX should this be done by the caller, rather than 2173ef27340cSAdrian Chadd * ath_reset() ? 2174ef27340cSAdrian Chadd */ 2175c42a7b7eSSam Leffler ath_start(ifp); /* restart xmit */ 2176c42a7b7eSSam Leffler return 0; 21775591b213SSam Leffler } 21785591b213SSam Leffler 217968e8e04eSSam Leffler static int 2180b032f27cSSam Leffler ath_reset_vap(struct ieee80211vap *vap, u_long cmd) 2181b032f27cSSam Leffler { 21824b54a231SSam Leffler struct ieee80211com *ic = vap->iv_ic; 21834b54a231SSam Leffler struct ifnet *ifp = ic->ic_ifp; 21844b54a231SSam Leffler struct ath_softc *sc = ifp->if_softc; 21854b54a231SSam Leffler struct ath_hal *ah = sc->sc_ah; 21864b54a231SSam Leffler 21874b54a231SSam Leffler switch (cmd) { 21884b54a231SSam Leffler case IEEE80211_IOC_TXPOWER: 21894b54a231SSam Leffler /* 21904b54a231SSam Leffler * If per-packet TPC is enabled, then we have nothing 21914b54a231SSam Leffler * to do; otherwise we need to force the global limit. 21924b54a231SSam Leffler * All this can happen directly; no need to reset. 21934b54a231SSam Leffler */ 21944b54a231SSam Leffler if (!ath_hal_gettpc(ah)) 21954b54a231SSam Leffler ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 21964b54a231SSam Leffler return 0; 21974b54a231SSam Leffler } 2198517526efSAdrian Chadd /* XXX? Full or NOLOSS? */ 2199517526efSAdrian Chadd return ath_reset(ifp, ATH_RESET_FULL); 2200b032f27cSSam Leffler } 2201b032f27cSSam Leffler 2202b8e788a5SAdrian Chadd struct ath_buf * 220310ad9a77SSam Leffler _ath_getbuf_locked(struct ath_softc *sc) 220410ad9a77SSam Leffler { 220510ad9a77SSam Leffler struct ath_buf *bf; 220610ad9a77SSam Leffler 220710ad9a77SSam Leffler ATH_TXBUF_LOCK_ASSERT(sc); 220810ad9a77SSam Leffler 22096b349e5aSAdrian Chadd bf = TAILQ_FIRST(&sc->sc_txbuf); 2210e346b073SAdrian Chadd if (bf == NULL) { 2211e346b073SAdrian Chadd sc->sc_stats.ast_tx_getnobuf++; 2212e346b073SAdrian Chadd } else { 2213e346b073SAdrian Chadd if (bf->bf_flags & ATH_BUF_BUSY) { 2214e346b073SAdrian Chadd sc->sc_stats.ast_tx_getbusybuf++; 2215e346b073SAdrian Chadd bf = NULL; 2216e346b073SAdrian Chadd } 2217e346b073SAdrian Chadd } 2218e346b073SAdrian Chadd 221910ad9a77SSam Leffler if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) 22206b349e5aSAdrian Chadd TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 222110ad9a77SSam Leffler else 222210ad9a77SSam Leffler bf = NULL; 2223e346b073SAdrian Chadd 222410ad9a77SSam Leffler if (bf == NULL) { 222510ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, 22266b349e5aSAdrian Chadd TAILQ_FIRST(&sc->sc_txbuf) == NULL ? 222710ad9a77SSam Leffler "out of xmit buffers" : "xmit buffer busy"); 2228e346b073SAdrian Chadd return NULL; 222910ad9a77SSam Leffler } 2230e346b073SAdrian Chadd 2231e346b073SAdrian Chadd /* Valid bf here; clear some basic fields */ 2232e346b073SAdrian Chadd bf->bf_next = NULL; /* XXX just to be sure */ 2233e346b073SAdrian Chadd bf->bf_last = NULL; /* XXX again, just to be sure */ 2234e346b073SAdrian Chadd bf->bf_comp = NULL; /* XXX again, just to be sure */ 2235e346b073SAdrian Chadd bzero(&bf->bf_state, sizeof(bf->bf_state)); 2236e346b073SAdrian Chadd 223710ad9a77SSam Leffler return bf; 223810ad9a77SSam Leffler } 223910ad9a77SSam Leffler 2240e346b073SAdrian Chadd /* 2241e346b073SAdrian Chadd * When retrying a software frame, buffers marked ATH_BUF_BUSY 2242e346b073SAdrian Chadd * can't be thrown back on the queue as they could still be 2243e346b073SAdrian Chadd * in use by the hardware. 2244e346b073SAdrian Chadd * 2245e346b073SAdrian Chadd * This duplicates the buffer, or returns NULL. 2246e346b073SAdrian Chadd * 2247e346b073SAdrian Chadd * The descriptor is also copied but the link pointers and 2248e346b073SAdrian Chadd * the DMA segments aren't copied; this frame should thus 2249e346b073SAdrian Chadd * be again passed through the descriptor setup/chain routines 2250e346b073SAdrian Chadd * so the link is correct. 2251e346b073SAdrian Chadd * 2252e346b073SAdrian Chadd * The caller must free the buffer using ath_freebuf(). 2253e346b073SAdrian Chadd * 2254e346b073SAdrian Chadd * XXX TODO: this call shouldn't fail as it'll cause packet loss 2255e346b073SAdrian Chadd * XXX in the TX pathway when retries are needed. 2256e346b073SAdrian Chadd * XXX Figure out how to keep some buffers free, or factor the 2257e346b073SAdrian Chadd * XXX number of busy buffers into the xmit path (ath_start()) 2258e346b073SAdrian Chadd * XXX so we don't over-commit. 2259e346b073SAdrian Chadd */ 2260e346b073SAdrian Chadd struct ath_buf * 2261e346b073SAdrian Chadd ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf) 2262e346b073SAdrian Chadd { 2263e346b073SAdrian Chadd struct ath_buf *tbf; 2264e346b073SAdrian Chadd 2265e346b073SAdrian Chadd tbf = ath_getbuf(sc); 2266e346b073SAdrian Chadd if (tbf == NULL) 2267e346b073SAdrian Chadd return NULL; /* XXX failure? Why? */ 2268e346b073SAdrian Chadd 2269e346b073SAdrian Chadd /* Copy basics */ 2270e346b073SAdrian Chadd tbf->bf_next = NULL; 2271e346b073SAdrian Chadd tbf->bf_nseg = bf->bf_nseg; 2272e346b073SAdrian Chadd tbf->bf_txflags = bf->bf_txflags; 2273e346b073SAdrian Chadd tbf->bf_flags = bf->bf_flags & ~ATH_BUF_BUSY; 2274e346b073SAdrian Chadd tbf->bf_status = bf->bf_status; 2275e346b073SAdrian Chadd tbf->bf_m = bf->bf_m; 2276e346b073SAdrian Chadd tbf->bf_node = bf->bf_node; 2277e346b073SAdrian Chadd /* will be setup by the chain/setup function */ 2278e346b073SAdrian Chadd tbf->bf_lastds = NULL; 2279e346b073SAdrian Chadd /* for now, last == self */ 2280e346b073SAdrian Chadd tbf->bf_last = tbf; 2281e346b073SAdrian Chadd tbf->bf_comp = bf->bf_comp; 2282e346b073SAdrian Chadd 2283e346b073SAdrian Chadd /* NOTE: DMA segments will be setup by the setup/chain functions */ 2284e346b073SAdrian Chadd 2285e346b073SAdrian Chadd /* The caller has to re-init the descriptor + links */ 2286e346b073SAdrian Chadd 2287e346b073SAdrian Chadd /* Copy state */ 2288e346b073SAdrian Chadd memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state)); 2289e346b073SAdrian Chadd 2290e346b073SAdrian Chadd return tbf; 2291e346b073SAdrian Chadd } 2292e346b073SAdrian Chadd 2293b8e788a5SAdrian Chadd struct ath_buf * 229410ad9a77SSam Leffler ath_getbuf(struct ath_softc *sc) 229510ad9a77SSam Leffler { 229610ad9a77SSam Leffler struct ath_buf *bf; 229710ad9a77SSam Leffler 229810ad9a77SSam Leffler ATH_TXBUF_LOCK(sc); 229910ad9a77SSam Leffler bf = _ath_getbuf_locked(sc); 2300e4e7938aSAdrian Chadd ATH_TXBUF_UNLOCK(sc); 230110ad9a77SSam Leffler if (bf == NULL) { 230210ad9a77SSam Leffler struct ifnet *ifp = sc->sc_ifp; 230310ad9a77SSam Leffler 230410ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); 230510ad9a77SSam Leffler sc->sc_stats.ast_tx_qstop++; 2306e4e7938aSAdrian Chadd IF_LOCK(&ifp->if_snd); 230710ad9a77SSam Leffler ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2308e4e7938aSAdrian Chadd IF_UNLOCK(&ifp->if_snd); 230910ad9a77SSam Leffler } 231010ad9a77SSam Leffler return bf; 231110ad9a77SSam Leffler } 231210ad9a77SSam Leffler 23135591b213SSam Leffler static void 23145591b213SSam Leffler ath_start(struct ifnet *ifp) 23155591b213SSam Leffler { 23165591b213SSam Leffler struct ath_softc *sc = ifp->if_softc; 23175591b213SSam Leffler struct ieee80211_node *ni; 23185591b213SSam Leffler struct ath_buf *bf; 231968e8e04eSSam Leffler struct mbuf *m, *next; 232068e8e04eSSam Leffler ath_bufhead frags; 23215591b213SSam Leffler 232213f4c340SRobert Watson if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) 23235591b213SSam Leffler return; 2324ef27340cSAdrian Chadd 2325ef27340cSAdrian Chadd /* XXX is it ok to hold the ATH_LOCK here? */ 2326ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 2327ef27340cSAdrian Chadd if (sc->sc_inreset_cnt > 0) { 2328ef27340cSAdrian Chadd device_printf(sc->sc_dev, 2329ef27340cSAdrian Chadd "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2330ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 2331e4e7938aSAdrian Chadd IF_LOCK(&ifp->if_snd); 2332e4e7938aSAdrian Chadd ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2333e4e7938aSAdrian Chadd IF_UNLOCK(&ifp->if_snd); 2334ef27340cSAdrian Chadd return; 2335ef27340cSAdrian Chadd } 2336ef27340cSAdrian Chadd sc->sc_txstart_cnt++; 2337ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 2338ef27340cSAdrian Chadd 23395591b213SSam Leffler for (;;) { 23405591b213SSam Leffler /* 23415591b213SSam Leffler * Grab a TX buffer and associated resources. 23425591b213SSam Leffler */ 234310ad9a77SSam Leffler bf = ath_getbuf(sc); 234410ad9a77SSam Leffler if (bf == NULL) 23455591b213SSam Leffler break; 23462b9411e2SSam Leffler 2347b032f27cSSam Leffler IFQ_DEQUEUE(&ifp->if_snd, m); 2348b032f27cSSam Leffler if (m == NULL) { 2349b032f27cSSam Leffler ATH_TXBUF_LOCK(sc); 23506b349e5aSAdrian Chadd TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 2351b032f27cSSam Leffler ATH_TXBUF_UNLOCK(sc); 2352b032f27cSSam Leffler break; 2353b032f27cSSam Leffler } 2354b032f27cSSam Leffler ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 235568e8e04eSSam Leffler /* 235668e8e04eSSam Leffler * Check for fragmentation. If this frame 235768e8e04eSSam Leffler * has been broken up verify we have enough 235868e8e04eSSam Leffler * buffers to send all the fragments so all 235968e8e04eSSam Leffler * go out or none... 236068e8e04eSSam Leffler */ 23616b349e5aSAdrian Chadd TAILQ_INIT(&frags); 236268e8e04eSSam Leffler if ((m->m_flags & M_FRAG) && 236368e8e04eSSam Leffler !ath_txfrag_setup(sc, &frags, m, ni)) { 236468e8e04eSSam Leffler DPRINTF(sc, ATH_DEBUG_XMIT, 236568e8e04eSSam Leffler "%s: out of txfrag buffers\n", __func__); 236636c6be9aSSam Leffler sc->sc_stats.ast_tx_nofrag++; 23679cb93076SSam Leffler ifp->if_oerrors++; 236868e8e04eSSam Leffler ath_freetx(m); 236968e8e04eSSam Leffler goto bad; 237068e8e04eSSam Leffler } 2371339ccfb3SSam Leffler ifp->if_opackets++; 237268e8e04eSSam Leffler nextfrag: 237368e8e04eSSam Leffler /* 237468e8e04eSSam Leffler * Pass the frame to the h/w for transmission. 237568e8e04eSSam Leffler * Fragmented frames have each frag chained together 237668e8e04eSSam Leffler * with m_nextpkt. We know there are sufficient ath_buf's 237768e8e04eSSam Leffler * to send all the frags because of work done by 237868e8e04eSSam Leffler * ath_txfrag_setup. We leave m_nextpkt set while 237968e8e04eSSam Leffler * calling ath_tx_start so it can use it to extend the 238068e8e04eSSam Leffler * the tx duration to cover the subsequent frag and 238168e8e04eSSam Leffler * so it can reclaim all the mbufs in case of an error; 238268e8e04eSSam Leffler * ath_tx_start clears m_nextpkt once it commits to 238368e8e04eSSam Leffler * handing the frame to the hardware. 238468e8e04eSSam Leffler */ 238568e8e04eSSam Leffler next = m->m_nextpkt; 23865591b213SSam Leffler if (ath_tx_start(sc, ni, bf, m)) { 23875591b213SSam Leffler bad: 23885591b213SSam Leffler ifp->if_oerrors++; 2389c42a7b7eSSam Leffler reclaim: 239068e8e04eSSam Leffler bf->bf_m = NULL; 239168e8e04eSSam Leffler bf->bf_node = NULL; 2392c42a7b7eSSam Leffler ATH_TXBUF_LOCK(sc); 23936b349e5aSAdrian Chadd TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 239468e8e04eSSam Leffler ath_txfrag_cleanup(sc, &frags, ni); 2395c42a7b7eSSam Leffler ATH_TXBUF_UNLOCK(sc); 2396c42a7b7eSSam Leffler if (ni != NULL) 2397c42a7b7eSSam Leffler ieee80211_free_node(ni); 23985591b213SSam Leffler continue; 23995591b213SSam Leffler } 240068e8e04eSSam Leffler if (next != NULL) { 240168e8e04eSSam Leffler /* 240268e8e04eSSam Leffler * Beware of state changing between frags. 240368e8e04eSSam Leffler * XXX check sta power-save state? 240468e8e04eSSam Leffler */ 2405b032f27cSSam Leffler if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { 240668e8e04eSSam Leffler DPRINTF(sc, ATH_DEBUG_XMIT, 240768e8e04eSSam Leffler "%s: flush fragmented packet, state %s\n", 240868e8e04eSSam Leffler __func__, 2409b032f27cSSam Leffler ieee80211_state_name[ni->ni_vap->iv_state]); 241068e8e04eSSam Leffler ath_freetx(next); 241168e8e04eSSam Leffler goto reclaim; 241268e8e04eSSam Leffler } 241368e8e04eSSam Leffler m = next; 24146b349e5aSAdrian Chadd bf = TAILQ_FIRST(&frags); 241568e8e04eSSam Leffler KASSERT(bf != NULL, ("no buf for txfrag")); 24166b349e5aSAdrian Chadd TAILQ_REMOVE(&frags, bf, bf_list); 241768e8e04eSSam Leffler goto nextfrag; 241868e8e04eSSam Leffler } 24195591b213SSam Leffler 24202e986da5SSam Leffler sc->sc_wd_timer = 5; 24215591b213SSam Leffler } 2422ef27340cSAdrian Chadd 2423ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 2424ef27340cSAdrian Chadd sc->sc_txstart_cnt--; 2425ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 24265591b213SSam Leffler } 24275591b213SSam Leffler 24285591b213SSam Leffler static int 24295591b213SSam Leffler ath_media_change(struct ifnet *ifp) 24305591b213SSam Leffler { 2431b032f27cSSam Leffler int error = ieee80211_media_change(ifp); 2432b032f27cSSam Leffler /* NB: only the fixed rate can change and that doesn't need a reset */ 2433b032f27cSSam Leffler return (error == ENETRESET ? 0 : error); 24345591b213SSam Leffler } 24355591b213SSam Leffler 2436c42a7b7eSSam Leffler /* 2437c42a7b7eSSam Leffler * Block/unblock tx+rx processing while a key change is done. 2438c42a7b7eSSam Leffler * We assume the caller serializes key management operations 2439c42a7b7eSSam Leffler * so we only need to worry about synchronization with other 2440c42a7b7eSSam Leffler * uses that originate in the driver. 2441c42a7b7eSSam Leffler */ 2442c42a7b7eSSam Leffler static void 2443b032f27cSSam Leffler ath_key_update_begin(struct ieee80211vap *vap) 2444c42a7b7eSSam Leffler { 2445b032f27cSSam Leffler struct ifnet *ifp = vap->iv_ic->ic_ifp; 2446c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 2447c42a7b7eSSam Leffler 2448c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2449b032f27cSSam Leffler taskqueue_block(sc->sc_tq); 2450c42a7b7eSSam Leffler IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ 2451c42a7b7eSSam Leffler } 2452c42a7b7eSSam Leffler 2453c42a7b7eSSam Leffler static void 2454b032f27cSSam Leffler ath_key_update_end(struct ieee80211vap *vap) 2455c42a7b7eSSam Leffler { 2456b032f27cSSam Leffler struct ifnet *ifp = vap->iv_ic->ic_ifp; 2457c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 2458c42a7b7eSSam Leffler 2459c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2460c42a7b7eSSam Leffler IF_UNLOCK(&ifp->if_snd); 2461b032f27cSSam Leffler taskqueue_unblock(sc->sc_tq); 2462c42a7b7eSSam Leffler } 24635591b213SSam Leffler 24644bc0e754SSam Leffler /* 24654bc0e754SSam Leffler * Calculate the receive filter according to the 24664bc0e754SSam Leffler * operating mode and state: 24674bc0e754SSam Leffler * 24684bc0e754SSam Leffler * o always accept unicast, broadcast, and multicast traffic 2469b032f27cSSam Leffler * o accept PHY error frames when hardware doesn't have MIB support 2470411373ebSSam Leffler * to count and we need them for ANI (sta mode only until recently) 2471b032f27cSSam Leffler * and we are not scanning (ANI is disabled) 2472411373ebSSam Leffler * NB: older hal's add rx filter bits out of sight and we need to 2473411373ebSSam Leffler * blindly preserve them 24744bc0e754SSam Leffler * o probe request frames are accepted only when operating in 247559aa14a9SRui Paulo * hostap, adhoc, mesh, or monitor modes 2476b032f27cSSam Leffler * o enable promiscuous mode 2477b032f27cSSam Leffler * - when in monitor mode 2478b032f27cSSam Leffler * - if interface marked PROMISC (assumes bridge setting is filtered) 24794bc0e754SSam Leffler * o accept beacons: 24804bc0e754SSam Leffler * - when operating in station mode for collecting rssi data when 24814bc0e754SSam Leffler * the station is otherwise quiet, or 2482b032f27cSSam Leffler * - when operating in adhoc mode so the 802.11 layer creates 2483b032f27cSSam Leffler * node table entries for peers, 24844bc0e754SSam Leffler * - when scanning 2485b032f27cSSam Leffler * - when doing s/w beacon miss (e.g. for ap+sta) 2486b032f27cSSam Leffler * - when operating in ap mode in 11g to detect overlapping bss that 2487b032f27cSSam Leffler * require protection 248859aa14a9SRui Paulo * - when operating in mesh mode to detect neighbors 24896f48c956SSam Leffler * o accept control frames: 24906f48c956SSam Leffler * - when in monitor mode 2491b032f27cSSam Leffler * XXX HT protection for 11n 24924bc0e754SSam Leffler */ 24934bc0e754SSam Leffler static u_int32_t 249468e8e04eSSam Leffler ath_calcrxfilter(struct ath_softc *sc) 24954bc0e754SSam Leffler { 2496fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 2497b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 24984bc0e754SSam Leffler u_int32_t rfilt; 24994bc0e754SSam Leffler 2500b032f27cSSam Leffler rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; 2501411373ebSSam Leffler if (!sc->sc_needmib && !sc->sc_scanning) 2502411373ebSSam Leffler rfilt |= HAL_RX_FILTER_PHYERR; 25034bc0e754SSam Leffler if (ic->ic_opmode != IEEE80211_M_STA) 25044bc0e754SSam Leffler rfilt |= HAL_RX_FILTER_PROBEREQ; 25055463c4a4SSam Leffler /* XXX ic->ic_monvaps != 0? */ 2506b032f27cSSam Leffler if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC)) 25074bc0e754SSam Leffler rfilt |= HAL_RX_FILTER_PROM; 25084bc0e754SSam Leffler if (ic->ic_opmode == IEEE80211_M_STA || 250947db982fSSam Leffler ic->ic_opmode == IEEE80211_M_IBSS || 2510b032f27cSSam Leffler sc->sc_swbmiss || sc->sc_scanning) 2511b032f27cSSam Leffler rfilt |= HAL_RX_FILTER_BEACON; 2512b032f27cSSam Leffler /* 2513b032f27cSSam Leffler * NB: We don't recalculate the rx filter when 2514b032f27cSSam Leffler * ic_protmode changes; otherwise we could do 2515b032f27cSSam Leffler * this only when ic_protmode != NONE. 2516b032f27cSSam Leffler */ 2517b032f27cSSam Leffler if (ic->ic_opmode == IEEE80211_M_HOSTAP && 2518b032f27cSSam Leffler IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) 25194bc0e754SSam Leffler rfilt |= HAL_RX_FILTER_BEACON; 2520f378d4c8SAdrian Chadd 2521f378d4c8SAdrian Chadd /* 25224aa18e9dSAdrian Chadd * Enable hardware PS-POLL RX only for hostap mode; 2523f378d4c8SAdrian Chadd * STA mode sends PS-POLL frames but never 25244aa18e9dSAdrian Chadd * receives them. 2525f378d4c8SAdrian Chadd */ 2526dce0bccaSAdrian Chadd if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PSPOLL, 2527f378d4c8SAdrian Chadd 0, NULL) == HAL_OK && 2528f378d4c8SAdrian Chadd ic->ic_opmode == IEEE80211_M_HOSTAP) 2529f378d4c8SAdrian Chadd rfilt |= HAL_RX_FILTER_PSPOLL; 2530f378d4c8SAdrian Chadd 2531fe0dd789SSam Leffler if (sc->sc_nmeshvaps) { 253259aa14a9SRui Paulo rfilt |= HAL_RX_FILTER_BEACON; 253359aa14a9SRui Paulo if (sc->sc_hasbmatch) 253459aa14a9SRui Paulo rfilt |= HAL_RX_FILTER_BSSID; 253559aa14a9SRui Paulo else 253659aa14a9SRui Paulo rfilt |= HAL_RX_FILTER_PROM; 253759aa14a9SRui Paulo } 25386f48c956SSam Leffler if (ic->ic_opmode == IEEE80211_M_MONITOR) 25396f48c956SSam Leffler rfilt |= HAL_RX_FILTER_CONTROL; 2540f378d4c8SAdrian Chadd 2541f378d4c8SAdrian Chadd /* 2542f378d4c8SAdrian Chadd * Enable RX of compressed BAR frames only when doing 2543f378d4c8SAdrian Chadd * 802.11n. Required for A-MPDU. 2544f378d4c8SAdrian Chadd */ 2545a83df4d3SAdrian Chadd if (IEEE80211_IS_CHAN_HT(ic->ic_curchan)) 2546a83df4d3SAdrian Chadd rfilt |= HAL_RX_FILTER_COMPBAR; 2547f378d4c8SAdrian Chadd 2548fad901ebSAdrian Chadd /* 2549fad901ebSAdrian Chadd * Enable radar PHY errors if requested by the 2550fad901ebSAdrian Chadd * DFS module. 2551fad901ebSAdrian Chadd */ 2552fad901ebSAdrian Chadd if (sc->sc_dodfs) 2553fad901ebSAdrian Chadd rfilt |= HAL_RX_FILTER_PHYRADAR; 2554fad901ebSAdrian Chadd 2555b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n", 2556b032f27cSSam Leffler __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags); 25574bc0e754SSam Leffler return rfilt; 2558b032f27cSSam Leffler } 2559b032f27cSSam Leffler 2560b032f27cSSam Leffler static void 2561b032f27cSSam Leffler ath_update_promisc(struct ifnet *ifp) 2562b032f27cSSam Leffler { 2563b032f27cSSam Leffler struct ath_softc *sc = ifp->if_softc; 2564b032f27cSSam Leffler u_int32_t rfilt; 2565b032f27cSSam Leffler 2566b032f27cSSam Leffler /* configure rx filter */ 2567b032f27cSSam Leffler rfilt = ath_calcrxfilter(sc); 2568b032f27cSSam Leffler ath_hal_setrxfilter(sc->sc_ah, rfilt); 2569b032f27cSSam Leffler 2570b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); 2571b032f27cSSam Leffler } 2572b032f27cSSam Leffler 2573b032f27cSSam Leffler static void 2574b032f27cSSam Leffler ath_update_mcast(struct ifnet *ifp) 2575b032f27cSSam Leffler { 2576b032f27cSSam Leffler struct ath_softc *sc = ifp->if_softc; 2577b032f27cSSam Leffler u_int32_t mfilt[2]; 2578b032f27cSSam Leffler 2579b032f27cSSam Leffler /* calculate and install multicast filter */ 2580b032f27cSSam Leffler if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 2581b032f27cSSam Leffler struct ifmultiaddr *ifma; 2582b032f27cSSam Leffler /* 2583b032f27cSSam Leffler * Merge multicast addresses to form the hardware filter. 2584b032f27cSSam Leffler */ 2585b032f27cSSam Leffler mfilt[0] = mfilt[1] = 0; 2586eb956cd0SRobert Watson if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */ 2587b032f27cSSam Leffler TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2588b032f27cSSam Leffler caddr_t dl; 2589b032f27cSSam Leffler u_int32_t val; 2590b032f27cSSam Leffler u_int8_t pos; 2591b032f27cSSam Leffler 2592b032f27cSSam Leffler /* calculate XOR of eight 6bit values */ 2593b032f27cSSam Leffler dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 2594b032f27cSSam Leffler val = LE_READ_4(dl + 0); 2595b032f27cSSam Leffler pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2596b032f27cSSam Leffler val = LE_READ_4(dl + 3); 2597b032f27cSSam Leffler pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2598b032f27cSSam Leffler pos &= 0x3f; 2599b032f27cSSam Leffler mfilt[pos / 32] |= (1 << (pos % 32)); 2600b032f27cSSam Leffler } 2601eb956cd0SRobert Watson if_maddr_runlock(ifp); 2602b032f27cSSam Leffler } else 2603b032f27cSSam Leffler mfilt[0] = mfilt[1] = ~0; 2604b032f27cSSam Leffler ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); 2605b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", 2606b032f27cSSam Leffler __func__, mfilt[0], mfilt[1]); 26074bc0e754SSam Leffler } 26084bc0e754SSam Leffler 26095591b213SSam Leffler static void 26105591b213SSam Leffler ath_mode_init(struct ath_softc *sc) 26115591b213SSam Leffler { 2612fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 2613b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 2614b032f27cSSam Leffler u_int32_t rfilt; 26155591b213SSam Leffler 26164bc0e754SSam Leffler /* configure rx filter */ 261768e8e04eSSam Leffler rfilt = ath_calcrxfilter(sc); 26184bc0e754SSam Leffler ath_hal_setrxfilter(ah, rfilt); 26194bc0e754SSam Leffler 26205591b213SSam Leffler /* configure operational mode */ 2621c42a7b7eSSam Leffler ath_hal_setopmode(ah); 2622c42a7b7eSSam Leffler 262329aca940SSam Leffler /* handle any link-level address change */ 262429aca940SSam Leffler ath_hal_setmac(ah, IF_LLADDR(ifp)); 26255591b213SSam Leffler 26265591b213SSam Leffler /* calculate and install multicast filter */ 2627b032f27cSSam Leffler ath_update_mcast(ifp); 26285591b213SSam Leffler } 26295591b213SSam Leffler 2630c42a7b7eSSam Leffler /* 2631c42a7b7eSSam Leffler * Set the slot time based on the current setting. 2632c42a7b7eSSam Leffler */ 2633c42a7b7eSSam Leffler static void 2634c42a7b7eSSam Leffler ath_setslottime(struct ath_softc *sc) 2635c42a7b7eSSam Leffler { 2636b032f27cSSam Leffler struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2637c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 2638aaa70f2fSSam Leffler u_int usec; 2639c42a7b7eSSam Leffler 2640aaa70f2fSSam Leffler if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 2641aaa70f2fSSam Leffler usec = 13; 2642aaa70f2fSSam Leffler else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 2643aaa70f2fSSam Leffler usec = 21; 2644724c193aSSam Leffler else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 2645724c193aSSam Leffler /* honor short/long slot time only in 11g */ 2646724c193aSSam Leffler /* XXX shouldn't honor on pure g or turbo g channel */ 2647724c193aSSam Leffler if (ic->ic_flags & IEEE80211_F_SHSLOT) 2648aaa70f2fSSam Leffler usec = HAL_SLOT_TIME_9; 2649aaa70f2fSSam Leffler else 2650aaa70f2fSSam Leffler usec = HAL_SLOT_TIME_20; 2651724c193aSSam Leffler } else 2652724c193aSSam Leffler usec = HAL_SLOT_TIME_9; 2653aaa70f2fSSam Leffler 2654aaa70f2fSSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, 2655aaa70f2fSSam Leffler "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 2656aaa70f2fSSam Leffler __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 2657aaa70f2fSSam Leffler ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 2658aaa70f2fSSam Leffler 2659aaa70f2fSSam Leffler ath_hal_setslottime(ah, usec); 2660c42a7b7eSSam Leffler sc->sc_updateslot = OK; 2661c42a7b7eSSam Leffler } 2662c42a7b7eSSam Leffler 2663c42a7b7eSSam Leffler /* 2664c42a7b7eSSam Leffler * Callback from the 802.11 layer to update the 2665c42a7b7eSSam Leffler * slot time based on the current setting. 2666c42a7b7eSSam Leffler */ 2667c42a7b7eSSam Leffler static void 2668c42a7b7eSSam Leffler ath_updateslot(struct ifnet *ifp) 2669c42a7b7eSSam Leffler { 2670c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 2671b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 2672c42a7b7eSSam Leffler 2673c42a7b7eSSam Leffler /* 2674c42a7b7eSSam Leffler * When not coordinating the BSS, change the hardware 2675c42a7b7eSSam Leffler * immediately. For other operation we defer the change 2676c42a7b7eSSam Leffler * until beacon updates have propagated to the stations. 2677c42a7b7eSSam Leffler */ 267859aa14a9SRui Paulo if (ic->ic_opmode == IEEE80211_M_HOSTAP || 267959aa14a9SRui Paulo ic->ic_opmode == IEEE80211_M_MBSS) 2680c42a7b7eSSam Leffler sc->sc_updateslot = UPDATE; 2681c42a7b7eSSam Leffler else 2682c42a7b7eSSam Leffler ath_setslottime(sc); 2683c42a7b7eSSam Leffler } 2684c42a7b7eSSam Leffler 2685c42a7b7eSSam Leffler /* 268680d2765fSSam Leffler * Setup a h/w transmit queue for beacons. 268780d2765fSSam Leffler */ 268880d2765fSSam Leffler static int 268980d2765fSSam Leffler ath_beaconq_setup(struct ath_hal *ah) 269080d2765fSSam Leffler { 269180d2765fSSam Leffler HAL_TXQ_INFO qi; 269280d2765fSSam Leffler 269380d2765fSSam Leffler memset(&qi, 0, sizeof(qi)); 269480d2765fSSam Leffler qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 269580d2765fSSam Leffler qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 269680d2765fSSam Leffler qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 26970f2e86fbSSam Leffler /* NB: for dynamic turbo, don't enable any other interrupts */ 2698bd5a9920SSam Leffler qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE; 269980d2765fSSam Leffler return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi); 270080d2765fSSam Leffler } 270180d2765fSSam Leffler 270280d2765fSSam Leffler /* 27030f2e86fbSSam Leffler * Setup the transmit queue parameters for the beacon queue. 27040f2e86fbSSam Leffler */ 27050f2e86fbSSam Leffler static int 27060f2e86fbSSam Leffler ath_beaconq_config(struct ath_softc *sc) 27070f2e86fbSSam Leffler { 27080f2e86fbSSam Leffler #define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1) 2709b032f27cSSam Leffler struct ieee80211com *ic = sc->sc_ifp->if_l2com; 27100f2e86fbSSam Leffler struct ath_hal *ah = sc->sc_ah; 27110f2e86fbSSam Leffler HAL_TXQ_INFO qi; 27120f2e86fbSSam Leffler 27130f2e86fbSSam Leffler ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi); 271459aa14a9SRui Paulo if (ic->ic_opmode == IEEE80211_M_HOSTAP || 271559aa14a9SRui Paulo ic->ic_opmode == IEEE80211_M_MBSS) { 27160f2e86fbSSam Leffler /* 27170f2e86fbSSam Leffler * Always burst out beacon and CAB traffic. 27180f2e86fbSSam Leffler */ 27190f2e86fbSSam Leffler qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT; 27200f2e86fbSSam Leffler qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT; 27210f2e86fbSSam Leffler qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT; 27220f2e86fbSSam Leffler } else { 27230f2e86fbSSam Leffler struct wmeParams *wmep = 27240f2e86fbSSam Leffler &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE]; 27250f2e86fbSSam Leffler /* 27260f2e86fbSSam Leffler * Adhoc mode; important thing is to use 2x cwmin. 27270f2e86fbSSam Leffler */ 27280f2e86fbSSam Leffler qi.tqi_aifs = wmep->wmep_aifsn; 27290f2e86fbSSam Leffler qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 27300f2e86fbSSam Leffler qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 27310f2e86fbSSam Leffler } 27320f2e86fbSSam Leffler 27330f2e86fbSSam Leffler if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) { 27340f2e86fbSSam Leffler device_printf(sc->sc_dev, "unable to update parameters for " 27350f2e86fbSSam Leffler "beacon hardware queue!\n"); 27360f2e86fbSSam Leffler return 0; 27370f2e86fbSSam Leffler } else { 27380f2e86fbSSam Leffler ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */ 27390f2e86fbSSam Leffler return 1; 27400f2e86fbSSam Leffler } 27410f2e86fbSSam Leffler #undef ATH_EXPONENT_TO_VALUE 27420f2e86fbSSam Leffler } 27430f2e86fbSSam Leffler 27440f2e86fbSSam Leffler /* 2745c42a7b7eSSam Leffler * Allocate and setup an initial beacon frame. 2746c42a7b7eSSam Leffler */ 27475591b213SSam Leffler static int 27485591b213SSam Leffler ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) 27495591b213SSam Leffler { 2750b032f27cSSam Leffler struct ieee80211vap *vap = ni->ni_vap; 2751b032f27cSSam Leffler struct ath_vap *avp = ATH_VAP(vap); 27525591b213SSam Leffler struct ath_buf *bf; 27535591b213SSam Leffler struct mbuf *m; 2754c42a7b7eSSam Leffler int error; 27555591b213SSam Leffler 2756b032f27cSSam Leffler bf = avp->av_bcbuf; 27577ebd03d7SAdrian Chadd DPRINTF(sc, ATH_DEBUG_NODE, "%s: bf_m=%p, bf_node=%p\n", 27587ebd03d7SAdrian Chadd __func__, bf->bf_m, bf->bf_node); 2759b032f27cSSam Leffler if (bf->bf_m != NULL) { 2760b032f27cSSam Leffler bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2761b032f27cSSam Leffler m_freem(bf->bf_m); 2762b032f27cSSam Leffler bf->bf_m = NULL; 2763c42a7b7eSSam Leffler } 2764b032f27cSSam Leffler if (bf->bf_node != NULL) { 2765b032f27cSSam Leffler ieee80211_free_node(bf->bf_node); 2766b032f27cSSam Leffler bf->bf_node = NULL; 2767b032f27cSSam Leffler } 2768b032f27cSSam Leffler 27695591b213SSam Leffler /* 27705591b213SSam Leffler * NB: the beacon data buffer must be 32-bit aligned; 27715591b213SSam Leffler * we assume the mbuf routines will return us something 27725591b213SSam Leffler * with this alignment (perhaps should assert). 27735591b213SSam Leffler */ 2774b032f27cSSam Leffler m = ieee80211_beacon_alloc(ni, &avp->av_boff); 27755591b213SSam Leffler if (m == NULL) { 2776b032f27cSSam Leffler device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__); 27775591b213SSam Leffler sc->sc_stats.ast_be_nombuf++; 27785591b213SSam Leffler return ENOMEM; 27795591b213SSam Leffler } 2780f9e6219bSSam Leffler error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 2781f9e6219bSSam Leffler bf->bf_segs, &bf->bf_nseg, 27825591b213SSam Leffler BUS_DMA_NOWAIT); 2783b032f27cSSam Leffler if (error != 0) { 2784b032f27cSSam Leffler device_printf(sc->sc_dev, 2785b032f27cSSam Leffler "%s: cannot map mbuf, bus_dmamap_load_mbuf_sg returns %d\n", 2786b032f27cSSam Leffler __func__, error); 2787b032f27cSSam Leffler m_freem(m); 2788b032f27cSSam Leffler return error; 2789b032f27cSSam Leffler } 2790b032f27cSSam Leffler 2791b032f27cSSam Leffler /* 2792b032f27cSSam Leffler * Calculate a TSF adjustment factor required for staggered 2793b032f27cSSam Leffler * beacons. Note that we assume the format of the beacon 2794b032f27cSSam Leffler * frame leaves the tstamp field immediately following the 2795b032f27cSSam Leffler * header. 2796b032f27cSSam Leffler */ 2797b032f27cSSam Leffler if (sc->sc_stagbeacons && avp->av_bslot > 0) { 2798b032f27cSSam Leffler uint64_t tsfadjust; 2799b032f27cSSam Leffler struct ieee80211_frame *wh; 2800b032f27cSSam Leffler 2801b032f27cSSam Leffler /* 2802b032f27cSSam Leffler * The beacon interval is in TU's; the TSF is in usecs. 2803b032f27cSSam Leffler * We figure out how many TU's to add to align the timestamp 2804b032f27cSSam Leffler * then convert to TSF units and handle byte swapping before 2805b032f27cSSam Leffler * inserting it in the frame. The hardware will then add this 2806b032f27cSSam Leffler * each time a beacon frame is sent. Note that we align vap's 2807b032f27cSSam Leffler * 1..N and leave vap 0 untouched. This means vap 0 has a 2808b032f27cSSam Leffler * timestamp in one beacon interval while the others get a 2809b032f27cSSam Leffler * timstamp aligned to the next interval. 2810b032f27cSSam Leffler */ 2811b032f27cSSam Leffler tsfadjust = ni->ni_intval * 2812b032f27cSSam Leffler (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF; 2813b032f27cSSam Leffler tsfadjust = htole64(tsfadjust << 10); /* TU -> TSF */ 2814b032f27cSSam Leffler 2815b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 2816b032f27cSSam Leffler "%s: %s beacons bslot %d intval %u tsfadjust %llu\n", 2817b032f27cSSam Leffler __func__, sc->sc_stagbeacons ? "stagger" : "burst", 28183627e321SSam Leffler avp->av_bslot, ni->ni_intval, 28193627e321SSam Leffler (long long unsigned) le64toh(tsfadjust)); 2820b032f27cSSam Leffler 2821b032f27cSSam Leffler wh = mtod(m, struct ieee80211_frame *); 2822b032f27cSSam Leffler memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust)); 2823b032f27cSSam Leffler } 2824c42a7b7eSSam Leffler bf->bf_m = m; 2825f818612bSSam Leffler bf->bf_node = ieee80211_ref_node(ni); 2826b032f27cSSam Leffler 2827b032f27cSSam Leffler return 0; 28285591b213SSam Leffler } 2829c42a7b7eSSam Leffler 2830c42a7b7eSSam Leffler /* 2831c42a7b7eSSam Leffler * Setup the beacon frame for transmit. 2832c42a7b7eSSam Leffler */ 2833c42a7b7eSSam Leffler static void 2834c42a7b7eSSam Leffler ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf) 2835c42a7b7eSSam Leffler { 2836c42a7b7eSSam Leffler #define USE_SHPREAMBLE(_ic) \ 2837c42a7b7eSSam Leffler (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\ 2838c42a7b7eSSam Leffler == IEEE80211_F_SHPREAMBLE) 2839c42a7b7eSSam Leffler struct ieee80211_node *ni = bf->bf_node; 2840c42a7b7eSSam Leffler struct ieee80211com *ic = ni->ni_ic; 2841c42a7b7eSSam Leffler struct mbuf *m = bf->bf_m; 2842c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 2843c42a7b7eSSam Leffler struct ath_desc *ds; 2844c42a7b7eSSam Leffler int flags, antenna; 284555f63772SSam Leffler const HAL_RATE_TABLE *rt; 284655f63772SSam Leffler u_int8_t rix, rate; 2847c42a7b7eSSam Leffler 28484a3ac3fcSSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n", 2849c42a7b7eSSam Leffler __func__, m, m->m_len); 28505591b213SSam Leffler 28515591b213SSam Leffler /* setup descriptors */ 28525591b213SSam Leffler ds = bf->bf_desc; 28536edf1dc7SAdrian Chadd bf->bf_last = bf; 28546edf1dc7SAdrian Chadd bf->bf_lastds = ds; 28555591b213SSam Leffler 2856c42a7b7eSSam Leffler flags = HAL_TXDESC_NOACK; 2857c42a7b7eSSam Leffler if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) { 2858c42a7b7eSSam Leffler ds->ds_link = bf->bf_daddr; /* self-linked */ 2859c42a7b7eSSam Leffler flags |= HAL_TXDESC_VEOL; 2860c42a7b7eSSam Leffler /* 2861c42a7b7eSSam Leffler * Let hardware handle antenna switching. 2862c42a7b7eSSam Leffler */ 28634866e6c2SSam Leffler antenna = sc->sc_txantenna; 2864c42a7b7eSSam Leffler } else { 28655591b213SSam Leffler ds->ds_link = 0; 2866c42a7b7eSSam Leffler /* 2867c42a7b7eSSam Leffler * Switch antenna every 4 beacons. 2868c42a7b7eSSam Leffler * XXX assumes two antenna 2869c42a7b7eSSam Leffler */ 2870b032f27cSSam Leffler if (sc->sc_txantenna != 0) 2871b032f27cSSam Leffler antenna = sc->sc_txantenna; 2872b032f27cSSam Leffler else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0) 2873b032f27cSSam Leffler antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1); 2874b032f27cSSam Leffler else 2875b032f27cSSam Leffler antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1); 2876c42a7b7eSSam Leffler } 2877c42a7b7eSSam Leffler 2878c42a7b7eSSam Leffler KASSERT(bf->bf_nseg == 1, 2879c42a7b7eSSam Leffler ("multi-segment beacon frame; nseg %u", bf->bf_nseg)); 28805591b213SSam Leffler ds->ds_data = bf->bf_segs[0].ds_addr; 28815591b213SSam Leffler /* 28825591b213SSam Leffler * Calculate rate code. 28835591b213SSam Leffler * XXX everything at min xmit rate 28845591b213SSam Leffler */ 2885b032f27cSSam Leffler rix = 0; 288655f63772SSam Leffler rt = sc->sc_currates; 288755f63772SSam Leffler rate = rt->info[rix].rateCode; 2888c42a7b7eSSam Leffler if (USE_SHPREAMBLE(ic)) 288955f63772SSam Leffler rate |= rt->info[rix].shortPreamble; 28905591b213SSam Leffler ath_hal_setuptxdesc(ah, ds 2891c42a7b7eSSam Leffler , m->m_len + IEEE80211_CRC_LEN /* frame length */ 28925591b213SSam Leffler , sizeof(struct ieee80211_frame)/* header length */ 28935591b213SSam Leffler , HAL_PKT_TYPE_BEACON /* Atheros packet type */ 2894c42a7b7eSSam Leffler , ni->ni_txpower /* txpower XXX */ 28955591b213SSam Leffler , rate, 1 /* series 0 rate/tries */ 28965591b213SSam Leffler , HAL_TXKEYIX_INVALID /* no encryption */ 2897c42a7b7eSSam Leffler , antenna /* antenna mode */ 2898c42a7b7eSSam Leffler , flags /* no ack, veol for beacons */ 28995591b213SSam Leffler , 0 /* rts/cts rate */ 29005591b213SSam Leffler , 0 /* rts/cts duration */ 29015591b213SSam Leffler ); 29025591b213SSam Leffler /* NB: beacon's BufLen must be a multiple of 4 bytes */ 29035591b213SSam Leffler ath_hal_filltxdesc(ah, ds 2904c42a7b7eSSam Leffler , roundup(m->m_len, 4) /* buffer length */ 29055591b213SSam Leffler , AH_TRUE /* first segment */ 29065591b213SSam Leffler , AH_TRUE /* last segment */ 2907c42a7b7eSSam Leffler , ds /* first descriptor */ 29085591b213SSam Leffler ); 2909b032f27cSSam Leffler #if 0 2910b032f27cSSam Leffler ath_desc_swap(ds); 2911b032f27cSSam Leffler #endif 2912c42a7b7eSSam Leffler #undef USE_SHPREAMBLE 29135591b213SSam Leffler } 29145591b213SSam Leffler 2915b105a069SSam Leffler static void 2916b032f27cSSam Leffler ath_beacon_update(struct ieee80211vap *vap, int item) 2917b105a069SSam Leffler { 2918b032f27cSSam Leffler struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff; 2919b105a069SSam Leffler 2920b105a069SSam Leffler setbit(bo->bo_flags, item); 2921b105a069SSam Leffler } 2922b105a069SSam Leffler 2923c42a7b7eSSam Leffler /* 2924622b3fd2SSam Leffler * Append the contents of src to dst; both queues 2925622b3fd2SSam Leffler * are assumed to be locked. 2926622b3fd2SSam Leffler */ 2927622b3fd2SSam Leffler static void 2928622b3fd2SSam Leffler ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 2929622b3fd2SSam Leffler { 2930e86fd7a7SAdrian Chadd 2931e86fd7a7SAdrian Chadd ATH_TXQ_LOCK_ASSERT(dst); 2932e86fd7a7SAdrian Chadd ATH_TXQ_LOCK_ASSERT(src); 2933e86fd7a7SAdrian Chadd 29346b349e5aSAdrian Chadd TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); 2935622b3fd2SSam Leffler dst->axq_link = src->axq_link; 2936622b3fd2SSam Leffler src->axq_link = NULL; 2937622b3fd2SSam Leffler dst->axq_depth += src->axq_depth; 29386edf1dc7SAdrian Chadd dst->axq_aggr_depth += src->axq_aggr_depth; 2939622b3fd2SSam Leffler src->axq_depth = 0; 29406edf1dc7SAdrian Chadd src->axq_aggr_depth = 0; 2941622b3fd2SSam Leffler } 2942622b3fd2SSam Leffler 2943622b3fd2SSam Leffler /* 2944c42a7b7eSSam Leffler * Transmit a beacon frame at SWBA. Dynamic updates to the 2945c42a7b7eSSam Leffler * frame contents are done as needed and the slot time is 2946c42a7b7eSSam Leffler * also adjusted based on current state. 2947c42a7b7eSSam Leffler */ 29485591b213SSam Leffler static void 29495591b213SSam Leffler ath_beacon_proc(void *arg, int pending) 29505591b213SSam Leffler { 29515591b213SSam Leffler struct ath_softc *sc = arg; 29525591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 2953b032f27cSSam Leffler struct ieee80211vap *vap; 2954b032f27cSSam Leffler struct ath_buf *bf; 2955b032f27cSSam Leffler int slot, otherant; 2956b032f27cSSam Leffler uint32_t bfaddr; 29575591b213SSam Leffler 2958c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n", 2959c42a7b7eSSam Leffler __func__, pending); 2960c42a7b7eSSam Leffler /* 2961c42a7b7eSSam Leffler * Check if the previous beacon has gone out. If 2962c66c48cbSSam Leffler * not don't try to post another, skip this period 2963c66c48cbSSam Leffler * and wait for the next. Missed beacons indicate 2964c66c48cbSSam Leffler * a problem and should not occur. If we miss too 2965c66c48cbSSam Leffler * many consecutive beacons reset the device. 2966c42a7b7eSSam Leffler */ 2967c42a7b7eSSam Leffler if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { 2968c42a7b7eSSam Leffler sc->sc_bmisscount++; 29697ec4e6b8SAdrian Chadd sc->sc_stats.ast_be_missed++; 29704a3ac3fcSSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 2971c42a7b7eSSam Leffler "%s: missed %u consecutive beacons\n", 2972c42a7b7eSSam Leffler __func__, sc->sc_bmisscount); 2973a32ac9d3SSam Leffler if (sc->sc_bmisscount >= ath_bstuck_threshold) 29740bbf5441SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask); 2975c42a7b7eSSam Leffler return; 2976c42a7b7eSSam Leffler } 2977c42a7b7eSSam Leffler if (sc->sc_bmisscount != 0) { 2978c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 2979c42a7b7eSSam Leffler "%s: resume beacon xmit after %u misses\n", 2980c42a7b7eSSam Leffler __func__, sc->sc_bmisscount); 2981c42a7b7eSSam Leffler sc->sc_bmisscount = 0; 2982c42a7b7eSSam Leffler } 2983c42a7b7eSSam Leffler 2984b032f27cSSam Leffler if (sc->sc_stagbeacons) { /* staggered beacons */ 2985b032f27cSSam Leffler struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2986b032f27cSSam Leffler uint32_t tsftu; 2987b032f27cSSam Leffler 2988b032f27cSSam Leffler tsftu = ath_hal_gettsf32(ah) >> 10; 2989b032f27cSSam Leffler /* XXX lintval */ 2990b032f27cSSam Leffler slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval; 2991b032f27cSSam Leffler vap = sc->sc_bslot[(slot+1) % ATH_BCBUF]; 2992b032f27cSSam Leffler bfaddr = 0; 2993309a3e45SSam Leffler if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) { 2994b032f27cSSam Leffler bf = ath_beacon_generate(sc, vap); 2995b032f27cSSam Leffler if (bf != NULL) 2996b032f27cSSam Leffler bfaddr = bf->bf_daddr; 2997b032f27cSSam Leffler } 2998b032f27cSSam Leffler } else { /* burst'd beacons */ 2999b032f27cSSam Leffler uint32_t *bflink = &bfaddr; 3000b032f27cSSam Leffler 3001b032f27cSSam Leffler for (slot = 0; slot < ATH_BCBUF; slot++) { 3002b032f27cSSam Leffler vap = sc->sc_bslot[slot]; 3003309a3e45SSam Leffler if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) { 3004b032f27cSSam Leffler bf = ath_beacon_generate(sc, vap); 3005b032f27cSSam Leffler if (bf != NULL) { 3006b032f27cSSam Leffler *bflink = bf->bf_daddr; 3007b032f27cSSam Leffler bflink = &bf->bf_desc->ds_link; 3008c42a7b7eSSam Leffler } 3009c42a7b7eSSam Leffler } 3010b032f27cSSam Leffler } 3011b032f27cSSam Leffler *bflink = 0; /* terminate list */ 3012622b3fd2SSam Leffler } 3013c42a7b7eSSam Leffler 3014c42a7b7eSSam Leffler /* 3015c42a7b7eSSam Leffler * Handle slot time change when a non-ERP station joins/leaves 3016c42a7b7eSSam Leffler * an 11g network. The 802.11 layer notifies us via callback, 3017c42a7b7eSSam Leffler * we mark updateslot, then wait one beacon before effecting 3018c42a7b7eSSam Leffler * the change. This gives associated stations at least one 3019c42a7b7eSSam Leffler * beacon interval to note the state change. 3020c42a7b7eSSam Leffler */ 3021c42a7b7eSSam Leffler /* XXX locking */ 3022b032f27cSSam Leffler if (sc->sc_updateslot == UPDATE) { 3023c42a7b7eSSam Leffler sc->sc_updateslot = COMMIT; /* commit next beacon */ 3024b032f27cSSam Leffler sc->sc_slotupdate = slot; 3025b032f27cSSam Leffler } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot) 3026c42a7b7eSSam Leffler ath_setslottime(sc); /* commit change to h/w */ 3027c42a7b7eSSam Leffler 3028c42a7b7eSSam Leffler /* 3029c42a7b7eSSam Leffler * Check recent per-antenna transmit statistics and flip 3030c42a7b7eSSam Leffler * the default antenna if noticeably more frames went out 3031c42a7b7eSSam Leffler * on the non-default antenna. 3032c42a7b7eSSam Leffler * XXX assumes 2 anntenae 3033c42a7b7eSSam Leffler */ 3034b032f27cSSam Leffler if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) { 3035c42a7b7eSSam Leffler otherant = sc->sc_defant & 1 ? 2 : 1; 3036c42a7b7eSSam Leffler if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) 3037c42a7b7eSSam Leffler ath_setdefantenna(sc, otherant); 3038c42a7b7eSSam Leffler sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; 3039b032f27cSSam Leffler } 3040c42a7b7eSSam Leffler 3041b032f27cSSam Leffler if (bfaddr != 0) { 3042c42a7b7eSSam Leffler /* 3043c42a7b7eSSam Leffler * Stop any current dma and put the new frame on the queue. 3044c42a7b7eSSam Leffler * This should never fail since we check above that no frames 3045c42a7b7eSSam Leffler * are still pending on the queue. 3046c42a7b7eSSam Leffler */ 30475591b213SSam Leffler if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { 3048c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, 3049c42a7b7eSSam Leffler "%s: beacon queue %u did not stop?\n", 3050c42a7b7eSSam Leffler __func__, sc->sc_bhalq); 30515591b213SSam Leffler } 3052b032f27cSSam Leffler /* NB: cabq traffic should already be queued and primed */ 3053b032f27cSSam Leffler ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr); 3054b032f27cSSam Leffler ath_hal_txstart(ah, sc->sc_bhalq); 3055b032f27cSSam Leffler 3056b032f27cSSam Leffler sc->sc_stats.ast_be_xmit++; 3057b032f27cSSam Leffler } 3058b032f27cSSam Leffler } 3059b032f27cSSam Leffler 3060b032f27cSSam Leffler static struct ath_buf * 3061b032f27cSSam Leffler ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap) 3062b032f27cSSam Leffler { 3063b032f27cSSam Leffler struct ath_vap *avp = ATH_VAP(vap); 3064b032f27cSSam Leffler struct ath_txq *cabq = sc->sc_cabq; 3065b032f27cSSam Leffler struct ath_buf *bf; 3066b032f27cSSam Leffler struct mbuf *m; 3067b032f27cSSam Leffler int nmcastq, error; 3068b032f27cSSam Leffler 3069309a3e45SSam Leffler KASSERT(vap->iv_state >= IEEE80211_S_RUN, 3070b032f27cSSam Leffler ("not running, state %d", vap->iv_state)); 3071b032f27cSSam Leffler KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer")); 3072b032f27cSSam Leffler 3073b032f27cSSam Leffler /* 3074b032f27cSSam Leffler * Update dynamic beacon contents. If this returns 3075b032f27cSSam Leffler * non-zero then we need to remap the memory because 3076b032f27cSSam Leffler * the beacon frame changed size (probably because 3077b032f27cSSam Leffler * of the TIM bitmap). 3078b032f27cSSam Leffler */ 3079b032f27cSSam Leffler bf = avp->av_bcbuf; 3080b032f27cSSam Leffler m = bf->bf_m; 308191d92caeSAdrian Chadd /* XXX lock mcastq? */ 3082b032f27cSSam Leffler nmcastq = avp->av_mcastq.axq_depth; 308391d92caeSAdrian Chadd 3084b032f27cSSam Leffler if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) { 3085b032f27cSSam Leffler /* XXX too conservative? */ 3086b032f27cSSam Leffler bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3087b032f27cSSam Leffler error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 3088b032f27cSSam Leffler bf->bf_segs, &bf->bf_nseg, 3089b032f27cSSam Leffler BUS_DMA_NOWAIT); 3090b032f27cSSam Leffler if (error != 0) { 3091b032f27cSSam Leffler if_printf(vap->iv_ifp, 3092b032f27cSSam Leffler "%s: bus_dmamap_load_mbuf_sg failed, error %u\n", 3093b032f27cSSam Leffler __func__, error); 3094b032f27cSSam Leffler return NULL; 3095b032f27cSSam Leffler } 3096b032f27cSSam Leffler } 3097b032f27cSSam Leffler if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) { 3098b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 3099b032f27cSSam Leffler "%s: cabq did not drain, mcastq %u cabq %u\n", 3100b032f27cSSam Leffler __func__, nmcastq, cabq->axq_depth); 3101b032f27cSSam Leffler sc->sc_stats.ast_cabq_busy++; 3102b032f27cSSam Leffler if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) { 3103b032f27cSSam Leffler /* 3104b032f27cSSam Leffler * CABQ traffic from a previous vap is still pending. 3105b032f27cSSam Leffler * We must drain the q before this beacon frame goes 3106b032f27cSSam Leffler * out as otherwise this vap's stations will get cab 3107b032f27cSSam Leffler * frames from a different vap. 3108b032f27cSSam Leffler * XXX could be slow causing us to miss DBA 3109b032f27cSSam Leffler */ 3110b032f27cSSam Leffler ath_tx_draintxq(sc, cabq); 3111b032f27cSSam Leffler } 3112b032f27cSSam Leffler } 3113b032f27cSSam Leffler ath_beacon_setup(sc, bf); 31145591b213SSam Leffler bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 31155591b213SSam Leffler 3116c42a7b7eSSam Leffler /* 3117c42a7b7eSSam Leffler * Enable the CAB queue before the beacon queue to 3118c42a7b7eSSam Leffler * insure cab frames are triggered by this beacon. 3119c42a7b7eSSam Leffler */ 3120b032f27cSSam Leffler if (avp->av_boff.bo_tim[4] & 1) { 3121b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 3122b032f27cSSam Leffler 3123f3af83f7SSam Leffler /* NB: only at DTIM */ 3124622b3fd2SSam Leffler ATH_TXQ_LOCK(cabq); 3125b032f27cSSam Leffler ATH_TXQ_LOCK(&avp->av_mcastq); 3126622b3fd2SSam Leffler if (nmcastq) { 3127622b3fd2SSam Leffler struct ath_buf *bfm; 3128622b3fd2SSam Leffler 3129622b3fd2SSam Leffler /* 3130622b3fd2SSam Leffler * Move frames from the s/w mcast q to the h/w cab q. 3131b032f27cSSam Leffler * XXX MORE_DATA bit 3132622b3fd2SSam Leffler */ 31336b349e5aSAdrian Chadd bfm = TAILQ_FIRST(&avp->av_mcastq.axq_q); 3134622b3fd2SSam Leffler if (cabq->axq_link != NULL) { 3135622b3fd2SSam Leffler *cabq->axq_link = bfm->bf_daddr; 3136622b3fd2SSam Leffler } else 3137622b3fd2SSam Leffler ath_hal_puttxbuf(ah, cabq->axq_qnum, 3138622b3fd2SSam Leffler bfm->bf_daddr); 3139b032f27cSSam Leffler ath_txqmove(cabq, &avp->av_mcastq); 3140622b3fd2SSam Leffler 3141622b3fd2SSam Leffler sc->sc_stats.ast_cabq_xmit += nmcastq; 3142622b3fd2SSam Leffler } 3143622b3fd2SSam Leffler /* NB: gated by beacon so safe to start here */ 31446b349e5aSAdrian Chadd if (! TAILQ_EMPTY(&(cabq->axq_q))) 3145622b3fd2SSam Leffler ath_hal_txstart(ah, cabq->axq_qnum); 3146b032f27cSSam Leffler ATH_TXQ_UNLOCK(&avp->av_mcastq); 31477b15790aSAdrian Chadd ATH_TXQ_UNLOCK(cabq); 3148622b3fd2SSam Leffler } 3149b032f27cSSam Leffler return bf; 3150b032f27cSSam Leffler } 3151b032f27cSSam Leffler 3152b032f27cSSam Leffler static void 3153b032f27cSSam Leffler ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap) 3154b032f27cSSam Leffler { 3155b032f27cSSam Leffler struct ath_vap *avp = ATH_VAP(vap); 3156b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 3157b032f27cSSam Leffler struct ath_buf *bf; 3158b032f27cSSam Leffler struct mbuf *m; 3159b032f27cSSam Leffler int error; 3160b032f27cSSam Leffler 3161b032f27cSSam Leffler KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer")); 3162b032f27cSSam Leffler 3163b032f27cSSam Leffler /* 3164b032f27cSSam Leffler * Update dynamic beacon contents. If this returns 3165b032f27cSSam Leffler * non-zero then we need to remap the memory because 3166b032f27cSSam Leffler * the beacon frame changed size (probably because 3167b032f27cSSam Leffler * of the TIM bitmap). 3168b032f27cSSam Leffler */ 3169b032f27cSSam Leffler bf = avp->av_bcbuf; 3170b032f27cSSam Leffler m = bf->bf_m; 3171b032f27cSSam Leffler if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) { 3172b032f27cSSam Leffler /* XXX too conservative? */ 3173b032f27cSSam Leffler bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3174b032f27cSSam Leffler error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 3175b032f27cSSam Leffler bf->bf_segs, &bf->bf_nseg, 3176b032f27cSSam Leffler BUS_DMA_NOWAIT); 3177b032f27cSSam Leffler if (error != 0) { 3178b032f27cSSam Leffler if_printf(vap->iv_ifp, 3179b032f27cSSam Leffler "%s: bus_dmamap_load_mbuf_sg failed, error %u\n", 3180b032f27cSSam Leffler __func__, error); 3181b032f27cSSam Leffler return; 3182b032f27cSSam Leffler } 3183b032f27cSSam Leffler } 3184b032f27cSSam Leffler ath_beacon_setup(sc, bf); 3185b032f27cSSam Leffler bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 3186b032f27cSSam Leffler 3187b032f27cSSam Leffler /* NB: caller is known to have already stopped tx dma */ 31885591b213SSam Leffler ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); 31895591b213SSam Leffler ath_hal_txstart(ah, sc->sc_bhalq); 31905591b213SSam Leffler } 31915591b213SSam Leffler 3192c42a7b7eSSam Leffler /* 3193d52f7132SAdrian Chadd * Reset the hardware, with no loss. 3194d52f7132SAdrian Chadd * 3195d52f7132SAdrian Chadd * This can't be used for a general case reset. 3196d52f7132SAdrian Chadd */ 3197d52f7132SAdrian Chadd static void 3198d52f7132SAdrian Chadd ath_reset_proc(void *arg, int pending) 3199d52f7132SAdrian Chadd { 3200d52f7132SAdrian Chadd struct ath_softc *sc = arg; 3201d52f7132SAdrian Chadd struct ifnet *ifp = sc->sc_ifp; 3202d52f7132SAdrian Chadd 3203d52f7132SAdrian Chadd #if 0 3204d52f7132SAdrian Chadd if_printf(ifp, "%s: resetting\n", __func__); 3205d52f7132SAdrian Chadd #endif 3206d52f7132SAdrian Chadd ath_reset(ifp, ATH_RESET_NOLOSS); 3207d52f7132SAdrian Chadd } 3208d52f7132SAdrian Chadd 3209d52f7132SAdrian Chadd /* 3210c42a7b7eSSam Leffler * Reset the hardware after detecting beacons have stopped. 3211c42a7b7eSSam Leffler */ 3212c42a7b7eSSam Leffler static void 3213c42a7b7eSSam Leffler ath_bstuck_proc(void *arg, int pending) 3214c42a7b7eSSam Leffler { 3215c42a7b7eSSam Leffler struct ath_softc *sc = arg; 3216fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 321716d4de92SAdrian Chadd uint32_t hangs = 0; 321816d4de92SAdrian Chadd 321916d4de92SAdrian Chadd if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) 322016d4de92SAdrian Chadd if_printf(ifp, "bb hang detected (0x%x)\n", hangs); 3221c42a7b7eSSam Leffler 3222c42a7b7eSSam Leffler if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 3223c42a7b7eSSam Leffler sc->sc_bmisscount); 3224c2e34459SSam Leffler sc->sc_stats.ast_bstuck++; 322516d4de92SAdrian Chadd /* 322616d4de92SAdrian Chadd * This assumes that there's no simultaneous channel mode change 322716d4de92SAdrian Chadd * occuring. 322816d4de92SAdrian Chadd */ 3229517526efSAdrian Chadd ath_reset(ifp, ATH_RESET_NOLOSS); 3230c42a7b7eSSam Leffler } 3231c42a7b7eSSam Leffler 3232c42a7b7eSSam Leffler /* 3233b032f27cSSam Leffler * Reclaim beacon resources and return buffer to the pool. 3234b032f27cSSam Leffler */ 3235b032f27cSSam Leffler static void 3236b032f27cSSam Leffler ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf) 3237b032f27cSSam Leffler { 3238b032f27cSSam Leffler 32397ebd03d7SAdrian Chadd DPRINTF(sc, ATH_DEBUG_NODE, "%s: free bf=%p, bf_m=%p, bf_node=%p\n", 32407ebd03d7SAdrian Chadd __func__, bf, bf->bf_m, bf->bf_node); 3241b032f27cSSam Leffler if (bf->bf_m != NULL) { 3242b032f27cSSam Leffler bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3243b032f27cSSam Leffler m_freem(bf->bf_m); 3244b032f27cSSam Leffler bf->bf_m = NULL; 3245b032f27cSSam Leffler } 3246b032f27cSSam Leffler if (bf->bf_node != NULL) { 3247b032f27cSSam Leffler ieee80211_free_node(bf->bf_node); 3248b032f27cSSam Leffler bf->bf_node = NULL; 3249b032f27cSSam Leffler } 32506b349e5aSAdrian Chadd TAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list); 3251b032f27cSSam Leffler } 3252b032f27cSSam Leffler 3253b032f27cSSam Leffler /* 3254c42a7b7eSSam Leffler * Reclaim beacon resources. 3255c42a7b7eSSam Leffler */ 32565591b213SSam Leffler static void 32575591b213SSam Leffler ath_beacon_free(struct ath_softc *sc) 32585591b213SSam Leffler { 3259c42a7b7eSSam Leffler struct ath_buf *bf; 32605591b213SSam Leffler 32616b349e5aSAdrian Chadd TAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) { 32627ebd03d7SAdrian Chadd DPRINTF(sc, ATH_DEBUG_NODE, 32637ebd03d7SAdrian Chadd "%s: free bf=%p, bf_m=%p, bf_node=%p\n", 32647ebd03d7SAdrian Chadd __func__, bf, bf->bf_m, bf->bf_node); 32655591b213SSam Leffler if (bf->bf_m != NULL) { 32665591b213SSam Leffler bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 32675591b213SSam Leffler m_freem(bf->bf_m); 32685591b213SSam Leffler bf->bf_m = NULL; 3269f818612bSSam Leffler } 3270f818612bSSam Leffler if (bf->bf_node != NULL) { 3271f818612bSSam Leffler ieee80211_free_node(bf->bf_node); 32725591b213SSam Leffler bf->bf_node = NULL; 32735591b213SSam Leffler } 32745591b213SSam Leffler } 3275f818612bSSam Leffler } 32765591b213SSam Leffler 32775591b213SSam Leffler /* 32785591b213SSam Leffler * Configure the beacon and sleep timers. 32795591b213SSam Leffler * 32805591b213SSam Leffler * When operating as an AP this resets the TSF and sets 32815591b213SSam Leffler * up the hardware to notify us when we need to issue beacons. 32825591b213SSam Leffler * 32835591b213SSam Leffler * When operating in station mode this sets up the beacon 32845591b213SSam Leffler * timers according to the timestamp of the last received 32855591b213SSam Leffler * beacon and the current TSF, configures PCF and DTIM 32865591b213SSam Leffler * handling, programs the sleep registers so the hardware 32875591b213SSam Leffler * will wakeup in time to receive beacons, and configures 32885591b213SSam Leffler * the beacon miss handling so we'll receive a BMISS 32895591b213SSam Leffler * interrupt when we stop seeing beacons from the AP 32905591b213SSam Leffler * we've associated with. 32915591b213SSam Leffler */ 32925591b213SSam Leffler static void 3293b032f27cSSam Leffler ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap) 32945591b213SSam Leffler { 329580d939bfSSam Leffler #define TSF_TO_TU(_h,_l) \ 329680d939bfSSam Leffler ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) 329780d939bfSSam Leffler #define FUDGE 2 32985591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 3299b032f27cSSam Leffler struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3300b032f27cSSam Leffler struct ieee80211_node *ni; 330180d939bfSSam Leffler u_int32_t nexttbtt, intval, tsftu; 330280d939bfSSam Leffler u_int64_t tsf; 33035591b213SSam Leffler 3304b032f27cSSam Leffler if (vap == NULL) 3305b032f27cSSam Leffler vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */ 330680767531SAdrian Chadd ni = ieee80211_ref_node(vap->iv_bss); 3307b032f27cSSam Leffler 33088371372bSSam Leffler /* extract tstamp from last beacon and convert to TU */ 33098371372bSSam Leffler nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4), 33108371372bSSam Leffler LE_READ_4(ni->ni_tstamp.data)); 331159aa14a9SRui Paulo if (ic->ic_opmode == IEEE80211_M_HOSTAP || 331259aa14a9SRui Paulo ic->ic_opmode == IEEE80211_M_MBSS) { 3313b032f27cSSam Leffler /* 331459aa14a9SRui Paulo * For multi-bss ap/mesh support beacons are either staggered 3315b032f27cSSam Leffler * evenly over N slots or burst together. For the former 3316b032f27cSSam Leffler * arrange for the SWBA to be delivered for each slot. 3317b032f27cSSam Leffler * Slots that are not occupied will generate nothing. 3318b032f27cSSam Leffler */ 33198371372bSSam Leffler /* NB: the beacon interval is kept internally in TU's */ 33204bacf7c1SSam Leffler intval = ni->ni_intval & HAL_BEACON_PERIOD; 3321b032f27cSSam Leffler if (sc->sc_stagbeacons) 3322b032f27cSSam Leffler intval /= ATH_BCBUF; 3323b032f27cSSam Leffler } else { 3324b032f27cSSam Leffler /* NB: the beacon interval is kept internally in TU's */ 3325b032f27cSSam Leffler intval = ni->ni_intval & HAL_BEACON_PERIOD; 3326b032f27cSSam Leffler } 3327a6c992f4SSam Leffler if (nexttbtt == 0) /* e.g. for ap mode */ 3328a6c992f4SSam Leffler nexttbtt = intval; 3329a6c992f4SSam Leffler else if (intval) /* NB: can be 0 for monitor mode */ 3330a6c992f4SSam Leffler nexttbtt = roundup(nexttbtt, intval); 3331a6c992f4SSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n", 3332a6c992f4SSam Leffler __func__, nexttbtt, intval, ni->ni_intval); 3333b032f27cSSam Leffler if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) { 33345591b213SSam Leffler HAL_BEACON_STATE bs; 33358371372bSSam Leffler int dtimperiod, dtimcount; 33368371372bSSam Leffler int cfpperiod, cfpcount; 33375591b213SSam Leffler 33388371372bSSam Leffler /* 33398371372bSSam Leffler * Setup dtim and cfp parameters according to 33408371372bSSam Leffler * last beacon we received (which may be none). 33418371372bSSam Leffler */ 33428371372bSSam Leffler dtimperiod = ni->ni_dtim_period; 33438371372bSSam Leffler if (dtimperiod <= 0) /* NB: 0 if not known */ 33448371372bSSam Leffler dtimperiod = 1; 33458371372bSSam Leffler dtimcount = ni->ni_dtim_count; 33468371372bSSam Leffler if (dtimcount >= dtimperiod) /* NB: sanity check */ 33478371372bSSam Leffler dtimcount = 0; /* XXX? */ 33488371372bSSam Leffler cfpperiod = 1; /* NB: no PCF support yet */ 33498371372bSSam Leffler cfpcount = 0; 33508371372bSSam Leffler /* 33518371372bSSam Leffler * Pull nexttbtt forward to reflect the current 33528371372bSSam Leffler * TSF and calculate dtim+cfp state for the result. 33538371372bSSam Leffler */ 33548371372bSSam Leffler tsf = ath_hal_gettsf64(ah); 335580d939bfSSam Leffler tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 33568371372bSSam Leffler do { 33578371372bSSam Leffler nexttbtt += intval; 33588371372bSSam Leffler if (--dtimcount < 0) { 33598371372bSSam Leffler dtimcount = dtimperiod - 1; 33608371372bSSam Leffler if (--cfpcount < 0) 33618371372bSSam Leffler cfpcount = cfpperiod - 1; 33628371372bSSam Leffler } 33638371372bSSam Leffler } while (nexttbtt < tsftu); 33645591b213SSam Leffler memset(&bs, 0, sizeof(bs)); 3365a6c992f4SSam Leffler bs.bs_intval = intval; 33665591b213SSam Leffler bs.bs_nexttbtt = nexttbtt; 33678371372bSSam Leffler bs.bs_dtimperiod = dtimperiod*intval; 33688371372bSSam Leffler bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval; 33698371372bSSam Leffler bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod; 33708371372bSSam Leffler bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod; 33718371372bSSam Leffler bs.bs_cfpmaxduration = 0; 33728371372bSSam Leffler #if 0 33735591b213SSam Leffler /* 3374c42a7b7eSSam Leffler * The 802.11 layer records the offset to the DTIM 3375c42a7b7eSSam Leffler * bitmap while receiving beacons; use it here to 3376c42a7b7eSSam Leffler * enable h/w detection of our AID being marked in 3377c42a7b7eSSam Leffler * the bitmap vector (to indicate frames for us are 3378c42a7b7eSSam Leffler * pending at the AP). 33798371372bSSam Leffler * XXX do DTIM handling in s/w to WAR old h/w bugs 33808371372bSSam Leffler * XXX enable based on h/w rev for newer chips 3381c42a7b7eSSam Leffler */ 3382c42a7b7eSSam Leffler bs.bs_timoffset = ni->ni_timoff; 33838371372bSSam Leffler #endif 3384c42a7b7eSSam Leffler /* 33855591b213SSam Leffler * Calculate the number of consecutive beacons to miss 338668e8e04eSSam Leffler * before taking a BMISS interrupt. 33875591b213SSam Leffler * Note that we clamp the result to at most 10 beacons. 33885591b213SSam Leffler */ 3389b032f27cSSam Leffler bs.bs_bmissthreshold = vap->iv_bmissthreshold; 33905591b213SSam Leffler if (bs.bs_bmissthreshold > 10) 33915591b213SSam Leffler bs.bs_bmissthreshold = 10; 33925591b213SSam Leffler else if (bs.bs_bmissthreshold <= 0) 33935591b213SSam Leffler bs.bs_bmissthreshold = 1; 33945591b213SSam Leffler 33955591b213SSam Leffler /* 33965591b213SSam Leffler * Calculate sleep duration. The configuration is 33975591b213SSam Leffler * given in ms. We insure a multiple of the beacon 33985591b213SSam Leffler * period is used. Also, if the sleep duration is 33995591b213SSam Leffler * greater than the DTIM period then it makes senses 34005591b213SSam Leffler * to make it a multiple of that. 34015591b213SSam Leffler * 34025591b213SSam Leffler * XXX fixed at 100ms 34035591b213SSam Leffler */ 34044bacf7c1SSam Leffler bs.bs_sleepduration = 34054bacf7c1SSam Leffler roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval); 34065591b213SSam Leffler if (bs.bs_sleepduration > bs.bs_dtimperiod) 34075591b213SSam Leffler bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod); 34085591b213SSam Leffler 3409c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 34108371372bSSam Leffler "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n" 34115591b213SSam Leffler , __func__ 34128371372bSSam Leffler , tsf, tsftu 34135591b213SSam Leffler , bs.bs_intval 34145591b213SSam Leffler , bs.bs_nexttbtt 34155591b213SSam Leffler , bs.bs_dtimperiod 34165591b213SSam Leffler , bs.bs_nextdtim 34175591b213SSam Leffler , bs.bs_bmissthreshold 34185591b213SSam Leffler , bs.bs_sleepduration 3419c42a7b7eSSam Leffler , bs.bs_cfpperiod 3420c42a7b7eSSam Leffler , bs.bs_cfpmaxduration 3421c42a7b7eSSam Leffler , bs.bs_cfpnext 3422c42a7b7eSSam Leffler , bs.bs_timoffset 3423c42a7b7eSSam Leffler ); 34245591b213SSam Leffler ath_hal_intrset(ah, 0); 3425c42a7b7eSSam Leffler ath_hal_beacontimers(ah, &bs); 34265591b213SSam Leffler sc->sc_imask |= HAL_INT_BMISS; 34275591b213SSam Leffler ath_hal_intrset(ah, sc->sc_imask); 34285591b213SSam Leffler } else { 34295591b213SSam Leffler ath_hal_intrset(ah, 0); 3430a6c992f4SSam Leffler if (nexttbtt == intval) 3431c42a7b7eSSam Leffler intval |= HAL_BEACON_RESET_TSF; 3432c42a7b7eSSam Leffler if (ic->ic_opmode == IEEE80211_M_IBSS) { 3433c42a7b7eSSam Leffler /* 3434c42a7b7eSSam Leffler * In IBSS mode enable the beacon timers but only 3435c42a7b7eSSam Leffler * enable SWBA interrupts if we need to manually 3436c42a7b7eSSam Leffler * prepare beacon frames. Otherwise we use a 3437c42a7b7eSSam Leffler * self-linked tx descriptor and let the hardware 3438c42a7b7eSSam Leffler * deal with things. 3439c42a7b7eSSam Leffler */ 3440c42a7b7eSSam Leffler intval |= HAL_BEACON_ENA; 3441c42a7b7eSSam Leffler if (!sc->sc_hasveol) 3442c42a7b7eSSam Leffler sc->sc_imask |= HAL_INT_SWBA; 344380d939bfSSam Leffler if ((intval & HAL_BEACON_RESET_TSF) == 0) { 344480d939bfSSam Leffler /* 344580d939bfSSam Leffler * Pull nexttbtt forward to reflect 344680d939bfSSam Leffler * the current TSF. 344780d939bfSSam Leffler */ 344880d939bfSSam Leffler tsf = ath_hal_gettsf64(ah); 344980d939bfSSam Leffler tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 345080d939bfSSam Leffler do { 345180d939bfSSam Leffler nexttbtt += intval; 345280d939bfSSam Leffler } while (nexttbtt < tsftu); 345380d939bfSSam Leffler } 34540f2e86fbSSam Leffler ath_beaconq_config(sc); 345559aa14a9SRui Paulo } else if (ic->ic_opmode == IEEE80211_M_HOSTAP || 345659aa14a9SRui Paulo ic->ic_opmode == IEEE80211_M_MBSS) { 3457c42a7b7eSSam Leffler /* 345859aa14a9SRui Paulo * In AP/mesh mode we enable the beacon timers 345959aa14a9SRui Paulo * and SWBA interrupts to prepare beacon frames. 3460c42a7b7eSSam Leffler */ 3461c42a7b7eSSam Leffler intval |= HAL_BEACON_ENA; 34625591b213SSam Leffler sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ 34630f2e86fbSSam Leffler ath_beaconq_config(sc); 3464c42a7b7eSSam Leffler } 3465c42a7b7eSSam Leffler ath_hal_beaconinit(ah, nexttbtt, intval); 3466c42a7b7eSSam Leffler sc->sc_bmisscount = 0; 34675591b213SSam Leffler ath_hal_intrset(ah, sc->sc_imask); 3468c42a7b7eSSam Leffler /* 3469c42a7b7eSSam Leffler * When using a self-linked beacon descriptor in 3470c42a7b7eSSam Leffler * ibss mode load it once here. 3471c42a7b7eSSam Leffler */ 3472c42a7b7eSSam Leffler if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) 3473b032f27cSSam Leffler ath_beacon_start_adhoc(sc, vap); 34745591b213SSam Leffler } 347580d939bfSSam Leffler sc->sc_syncbeacon = 0; 347680767531SAdrian Chadd ieee80211_free_node(ni); 347780d939bfSSam Leffler #undef FUDGE 34788371372bSSam Leffler #undef TSF_TO_TU 34795591b213SSam Leffler } 34805591b213SSam Leffler 34815591b213SSam Leffler static void 34825591b213SSam Leffler ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 34835591b213SSam Leffler { 34845591b213SSam Leffler bus_addr_t *paddr = (bus_addr_t*) arg; 3485d77367bfSSam Leffler KASSERT(error == 0, ("error %u on bus_dma callback", error)); 34865591b213SSam Leffler *paddr = segs->ds_addr; 34875591b213SSam Leffler } 34885591b213SSam Leffler 34895591b213SSam Leffler static int 3490c42a7b7eSSam Leffler ath_descdma_setup(struct ath_softc *sc, 3491c42a7b7eSSam Leffler struct ath_descdma *dd, ath_bufhead *head, 3492c42a7b7eSSam Leffler const char *name, int nbuf, int ndesc) 3493c42a7b7eSSam Leffler { 3494c42a7b7eSSam Leffler #define DS2PHYS(_dd, _ds) \ 3495c42a7b7eSSam Leffler ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 349645abcd6cSAdrian Chadd #define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 349745abcd6cSAdrian Chadd ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 3498fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 349945abcd6cSAdrian Chadd uint8_t *ds; 3500c42a7b7eSSam Leffler struct ath_buf *bf; 3501c42a7b7eSSam Leffler int i, bsize, error; 350245abcd6cSAdrian Chadd int desc_len; 350345abcd6cSAdrian Chadd 350445abcd6cSAdrian Chadd desc_len = sizeof(struct ath_desc); 3505c42a7b7eSSam Leffler 3506c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n", 3507c42a7b7eSSam Leffler __func__, name, nbuf, ndesc); 3508c42a7b7eSSam Leffler 3509c42a7b7eSSam Leffler dd->dd_name = name; 351045abcd6cSAdrian Chadd dd->dd_desc_len = desc_len * nbuf * ndesc; 351145abcd6cSAdrian Chadd 351245abcd6cSAdrian Chadd /* 351345abcd6cSAdrian Chadd * Merlin work-around: 351445abcd6cSAdrian Chadd * Descriptors that cross the 4KB boundary can't be used. 351545abcd6cSAdrian Chadd * Assume one skipped descriptor per 4KB page. 351645abcd6cSAdrian Chadd */ 351745abcd6cSAdrian Chadd if (! ath_hal_split4ktrans(sc->sc_ah)) { 351845abcd6cSAdrian Chadd int numdescpage = 4096 / (desc_len * ndesc); 351945abcd6cSAdrian Chadd dd->dd_desc_len = (nbuf / numdescpage + 1) * 4096; 352045abcd6cSAdrian Chadd } 3521c42a7b7eSSam Leffler 3522c42a7b7eSSam Leffler /* 3523c42a7b7eSSam Leffler * Setup DMA descriptor area. 3524c42a7b7eSSam Leffler */ 3525c2175ff5SMarius Strobl error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 3526c42a7b7eSSam Leffler PAGE_SIZE, 0, /* alignment, bounds */ 3527c42a7b7eSSam Leffler BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 3528c42a7b7eSSam Leffler BUS_SPACE_MAXADDR, /* highaddr */ 3529c42a7b7eSSam Leffler NULL, NULL, /* filter, filterarg */ 3530c42a7b7eSSam Leffler dd->dd_desc_len, /* maxsize */ 3531c42a7b7eSSam Leffler 1, /* nsegments */ 35326ccb8ea7SSam Leffler dd->dd_desc_len, /* maxsegsize */ 3533c42a7b7eSSam Leffler BUS_DMA_ALLOCNOW, /* flags */ 3534c42a7b7eSSam Leffler NULL, /* lockfunc */ 3535c42a7b7eSSam Leffler NULL, /* lockarg */ 3536c42a7b7eSSam Leffler &dd->dd_dmat); 3537c42a7b7eSSam Leffler if (error != 0) { 3538c42a7b7eSSam Leffler if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); 3539c42a7b7eSSam Leffler return error; 3540c42a7b7eSSam Leffler } 3541c42a7b7eSSam Leffler 3542c42a7b7eSSam Leffler /* allocate descriptors */ 3543c42a7b7eSSam Leffler error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap); 3544c42a7b7eSSam Leffler if (error != 0) { 3545c42a7b7eSSam Leffler if_printf(ifp, "unable to create dmamap for %s descriptors, " 3546c42a7b7eSSam Leffler "error %u\n", dd->dd_name, error); 3547c42a7b7eSSam Leffler goto fail0; 3548c42a7b7eSSam Leffler } 3549c42a7b7eSSam Leffler 3550c42a7b7eSSam Leffler error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 35510553a01fSSam Leffler BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 35520553a01fSSam Leffler &dd->dd_dmamap); 3553c42a7b7eSSam Leffler if (error != 0) { 3554c42a7b7eSSam Leffler if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 3555c42a7b7eSSam Leffler "error %u\n", nbuf * ndesc, dd->dd_name, error); 3556c42a7b7eSSam Leffler goto fail1; 3557c42a7b7eSSam Leffler } 3558c42a7b7eSSam Leffler 3559c42a7b7eSSam Leffler error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 3560c42a7b7eSSam Leffler dd->dd_desc, dd->dd_desc_len, 3561c42a7b7eSSam Leffler ath_load_cb, &dd->dd_desc_paddr, 3562c42a7b7eSSam Leffler BUS_DMA_NOWAIT); 3563c42a7b7eSSam Leffler if (error != 0) { 3564c42a7b7eSSam Leffler if_printf(ifp, "unable to map %s descriptors, error %u\n", 3565c42a7b7eSSam Leffler dd->dd_name, error); 3566c42a7b7eSSam Leffler goto fail2; 3567c42a7b7eSSam Leffler } 3568c42a7b7eSSam Leffler 356945abcd6cSAdrian Chadd ds = (uint8_t *) dd->dd_desc; 3570c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 3571c42a7b7eSSam Leffler __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, 3572c42a7b7eSSam Leffler (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); 3573c42a7b7eSSam Leffler 3574ebecf802SSam Leffler /* allocate rx buffers */ 3575c42a7b7eSSam Leffler bsize = sizeof(struct ath_buf) * nbuf; 3576c42a7b7eSSam Leffler bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 3577c42a7b7eSSam Leffler if (bf == NULL) { 3578c42a7b7eSSam Leffler if_printf(ifp, "malloc of %s buffers failed, size %u\n", 3579c42a7b7eSSam Leffler dd->dd_name, bsize); 3580c42a7b7eSSam Leffler goto fail3; 3581c42a7b7eSSam Leffler } 3582c42a7b7eSSam Leffler dd->dd_bufptr = bf; 3583c42a7b7eSSam Leffler 35846b349e5aSAdrian Chadd TAILQ_INIT(head); 358545abcd6cSAdrian Chadd for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * desc_len)) { 358645abcd6cSAdrian Chadd bf->bf_desc = (struct ath_desc *) ds; 3587c42a7b7eSSam Leffler bf->bf_daddr = DS2PHYS(dd, ds); 358845abcd6cSAdrian Chadd if (! ath_hal_split4ktrans(sc->sc_ah)) { 358945abcd6cSAdrian Chadd /* 359045abcd6cSAdrian Chadd * Merlin WAR: Skip descriptor addresses which 359145abcd6cSAdrian Chadd * cause 4KB boundary crossing along any point 359245abcd6cSAdrian Chadd * in the descriptor. 359345abcd6cSAdrian Chadd */ 359445abcd6cSAdrian Chadd if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr, 359545abcd6cSAdrian Chadd desc_len * ndesc)) { 359645abcd6cSAdrian Chadd /* Start at the next page */ 359745abcd6cSAdrian Chadd ds += 0x1000 - (bf->bf_daddr & 0xFFF); 359845abcd6cSAdrian Chadd bf->bf_desc = (struct ath_desc *) ds; 359945abcd6cSAdrian Chadd bf->bf_daddr = DS2PHYS(dd, ds); 360045abcd6cSAdrian Chadd } 360145abcd6cSAdrian Chadd } 3602c42a7b7eSSam Leffler error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 3603c42a7b7eSSam Leffler &bf->bf_dmamap); 3604c42a7b7eSSam Leffler if (error != 0) { 3605c42a7b7eSSam Leffler if_printf(ifp, "unable to create dmamap for %s " 3606c42a7b7eSSam Leffler "buffer %u, error %u\n", dd->dd_name, i, error); 3607c42a7b7eSSam Leffler ath_descdma_cleanup(sc, dd, head); 3608c42a7b7eSSam Leffler return error; 3609c42a7b7eSSam Leffler } 36106edf1dc7SAdrian Chadd bf->bf_lastds = bf->bf_desc; /* Just an initial value */ 36116b349e5aSAdrian Chadd TAILQ_INSERT_TAIL(head, bf, bf_list); 3612c42a7b7eSSam Leffler } 3613c42a7b7eSSam Leffler return 0; 3614c42a7b7eSSam Leffler fail3: 3615c42a7b7eSSam Leffler bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3616c42a7b7eSSam Leffler fail2: 3617c42a7b7eSSam Leffler bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3618c42a7b7eSSam Leffler fail1: 3619c42a7b7eSSam Leffler bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 3620c42a7b7eSSam Leffler fail0: 3621c42a7b7eSSam Leffler bus_dma_tag_destroy(dd->dd_dmat); 3622c42a7b7eSSam Leffler memset(dd, 0, sizeof(*dd)); 3623c42a7b7eSSam Leffler return error; 3624c42a7b7eSSam Leffler #undef DS2PHYS 362545abcd6cSAdrian Chadd #undef ATH_DESC_4KB_BOUND_CHECK 3626c42a7b7eSSam Leffler } 3627c42a7b7eSSam Leffler 3628c42a7b7eSSam Leffler static void 3629c42a7b7eSSam Leffler ath_descdma_cleanup(struct ath_softc *sc, 3630c42a7b7eSSam Leffler struct ath_descdma *dd, ath_bufhead *head) 3631c42a7b7eSSam Leffler { 3632c42a7b7eSSam Leffler struct ath_buf *bf; 3633c42a7b7eSSam Leffler struct ieee80211_node *ni; 3634c42a7b7eSSam Leffler 3635c42a7b7eSSam Leffler bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3636c42a7b7eSSam Leffler bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3637c42a7b7eSSam Leffler bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 3638c42a7b7eSSam Leffler bus_dma_tag_destroy(dd->dd_dmat); 3639c42a7b7eSSam Leffler 36406b349e5aSAdrian Chadd TAILQ_FOREACH(bf, head, bf_list) { 3641c42a7b7eSSam Leffler if (bf->bf_m) { 3642c42a7b7eSSam Leffler m_freem(bf->bf_m); 3643c42a7b7eSSam Leffler bf->bf_m = NULL; 3644c42a7b7eSSam Leffler } 3645c42a7b7eSSam Leffler if (bf->bf_dmamap != NULL) { 3646c42a7b7eSSam Leffler bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 3647c42a7b7eSSam Leffler bf->bf_dmamap = NULL; 3648c42a7b7eSSam Leffler } 3649c42a7b7eSSam Leffler ni = bf->bf_node; 3650c42a7b7eSSam Leffler bf->bf_node = NULL; 3651c42a7b7eSSam Leffler if (ni != NULL) { 3652c42a7b7eSSam Leffler /* 3653c42a7b7eSSam Leffler * Reclaim node reference. 3654c42a7b7eSSam Leffler */ 3655c42a7b7eSSam Leffler ieee80211_free_node(ni); 3656c42a7b7eSSam Leffler } 3657c42a7b7eSSam Leffler } 3658c42a7b7eSSam Leffler 36596b349e5aSAdrian Chadd TAILQ_INIT(head); 3660c42a7b7eSSam Leffler free(dd->dd_bufptr, M_ATHDEV); 3661c42a7b7eSSam Leffler memset(dd, 0, sizeof(*dd)); 3662c42a7b7eSSam Leffler } 3663c42a7b7eSSam Leffler 3664c42a7b7eSSam Leffler static int 36655591b213SSam Leffler ath_desc_alloc(struct ath_softc *sc) 36665591b213SSam Leffler { 3667c42a7b7eSSam Leffler int error; 36685591b213SSam Leffler 3669c42a7b7eSSam Leffler error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 3670e2d787faSSam Leffler "rx", ath_rxbuf, 1); 36715591b213SSam Leffler if (error != 0) 36725591b213SSam Leffler return error; 36735591b213SSam Leffler 3674c42a7b7eSSam Leffler error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 3675e2d787faSSam Leffler "tx", ath_txbuf, ATH_TXDESC); 3676c42a7b7eSSam Leffler if (error != 0) { 3677c42a7b7eSSam Leffler ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 36785591b213SSam Leffler return error; 3679c42a7b7eSSam Leffler } 3680c42a7b7eSSam Leffler 3681c42a7b7eSSam Leffler error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 3682b032f27cSSam Leffler "beacon", ATH_BCBUF, 1); 3683c42a7b7eSSam Leffler if (error != 0) { 3684c42a7b7eSSam Leffler ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3685c42a7b7eSSam Leffler ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 3686c42a7b7eSSam Leffler return error; 3687c42a7b7eSSam Leffler } 36885591b213SSam Leffler return 0; 36895591b213SSam Leffler } 36905591b213SSam Leffler 36915591b213SSam Leffler static void 36925591b213SSam Leffler ath_desc_free(struct ath_softc *sc) 36935591b213SSam Leffler { 36945591b213SSam Leffler 3695c42a7b7eSSam Leffler if (sc->sc_bdma.dd_desc_len != 0) 3696c42a7b7eSSam Leffler ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 3697c42a7b7eSSam Leffler if (sc->sc_txdma.dd_desc_len != 0) 3698c42a7b7eSSam Leffler ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3699c42a7b7eSSam Leffler if (sc->sc_rxdma.dd_desc_len != 0) 3700c42a7b7eSSam Leffler ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 37015591b213SSam Leffler } 37025591b213SSam Leffler 37035591b213SSam Leffler static struct ieee80211_node * 370438c208f8SSam Leffler ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 37055591b213SSam Leffler { 370638c208f8SSam Leffler struct ieee80211com *ic = vap->iv_ic; 3707c42a7b7eSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 3708c42a7b7eSSam Leffler const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 3709c42a7b7eSSam Leffler struct ath_node *an; 3710c42a7b7eSSam Leffler 3711c42a7b7eSSam Leffler an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 3712c42a7b7eSSam Leffler if (an == NULL) { 3713c42a7b7eSSam Leffler /* XXX stat+msg */ 3714de5af704SSam Leffler return NULL; 37155591b213SSam Leffler } 3716c42a7b7eSSam Leffler ath_rate_node_init(sc, an); 37175591b213SSam Leffler 37183dd85b26SAdrian Chadd /* Setup the mutex - there's no associd yet so set the name to NULL */ 37193dd85b26SAdrian Chadd snprintf(an->an_name, sizeof(an->an_name), "%s: node %p", 37203dd85b26SAdrian Chadd device_get_nameunit(sc->sc_dev), an); 37213dd85b26SAdrian Chadd mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); 37223dd85b26SAdrian Chadd 3723eb6f0de0SAdrian Chadd /* XXX setup ath_tid */ 3724eb6f0de0SAdrian Chadd ath_tx_tid_init(sc, an); 3725eb6f0de0SAdrian Chadd 3726c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); 3727c42a7b7eSSam Leffler return &an->an_node; 3728c42a7b7eSSam Leffler } 3729c42a7b7eSSam Leffler 37305591b213SSam Leffler static void 37314afa805eSAdrian Chadd ath_node_cleanup(struct ieee80211_node *ni) 37324afa805eSAdrian Chadd { 37334afa805eSAdrian Chadd struct ieee80211com *ic = ni->ni_ic; 37344afa805eSAdrian Chadd struct ath_softc *sc = ic->ic_ifp->if_softc; 37354afa805eSAdrian Chadd 37364afa805eSAdrian Chadd /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */ 3737eb6f0de0SAdrian Chadd ath_tx_node_flush(sc, ATH_NODE(ni)); 37384afa805eSAdrian Chadd ath_rate_node_cleanup(sc, ATH_NODE(ni)); 37394afa805eSAdrian Chadd sc->sc_node_cleanup(ni); 37404afa805eSAdrian Chadd } 37414afa805eSAdrian Chadd 37424afa805eSAdrian Chadd static void 3743c42a7b7eSSam Leffler ath_node_free(struct ieee80211_node *ni) 37445591b213SSam Leffler { 3745c42a7b7eSSam Leffler struct ieee80211com *ic = ni->ni_ic; 3746c42a7b7eSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 37471e774079SSam Leffler 3748c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni); 37493dd85b26SAdrian Chadd mtx_destroy(&ATH_NODE(ni)->an_mtx); 3750c42a7b7eSSam Leffler sc->sc_node_free(ni); 37515591b213SSam Leffler } 37525591b213SSam Leffler 375368e8e04eSSam Leffler static void 375468e8e04eSSam Leffler ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) 375568e8e04eSSam Leffler { 375668e8e04eSSam Leffler struct ieee80211com *ic = ni->ni_ic; 375768e8e04eSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 375868e8e04eSSam Leffler struct ath_hal *ah = sc->sc_ah; 375968e8e04eSSam Leffler 3760b032f27cSSam Leffler *rssi = ic->ic_node_getrssi(ni); 376159efa8b5SSam Leffler if (ni->ni_chan != IEEE80211_CHAN_ANYC) 376259efa8b5SSam Leffler *noise = ath_hal_getchannoise(ah, ni->ni_chan); 376359efa8b5SSam Leffler else 376468e8e04eSSam Leffler *noise = -95; /* nominally correct */ 376568e8e04eSSam Leffler } 376668e8e04eSSam Leffler 37675591b213SSam Leffler static int 37685591b213SSam Leffler ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 37695591b213SSam Leffler { 37705591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 37715591b213SSam Leffler int error; 37725591b213SSam Leffler struct mbuf *m; 37735591b213SSam Leffler struct ath_desc *ds; 37745591b213SSam Leffler 37755591b213SSam Leffler m = bf->bf_m; 37765591b213SSam Leffler if (m == NULL) { 37775591b213SSam Leffler /* 37785591b213SSam Leffler * NB: by assigning a page to the rx dma buffer we 37795591b213SSam Leffler * implicitly satisfy the Atheros requirement that 37805591b213SSam Leffler * this buffer be cache-line-aligned and sized to be 37815591b213SSam Leffler * multiple of the cache line size. Not doing this 37825591b213SSam Leffler * causes weird stuff to happen (for the 5210 at least). 37835591b213SSam Leffler */ 37845591b213SSam Leffler m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 37855591b213SSam Leffler if (m == NULL) { 3786c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, 3787c42a7b7eSSam Leffler "%s: no mbuf/cluster\n", __func__); 37885591b213SSam Leffler sc->sc_stats.ast_rx_nombuf++; 37895591b213SSam Leffler return ENOMEM; 37905591b213SSam Leffler } 37915591b213SSam Leffler m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 37925591b213SSam Leffler 3793f9e6219bSSam Leffler error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 3794c42a7b7eSSam Leffler bf->bf_dmamap, m, 3795f9e6219bSSam Leffler bf->bf_segs, &bf->bf_nseg, 37965591b213SSam Leffler BUS_DMA_NOWAIT); 37975591b213SSam Leffler if (error != 0) { 3798c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, 3799f9e6219bSSam Leffler "%s: bus_dmamap_load_mbuf_sg failed; error %d\n", 3800c42a7b7eSSam Leffler __func__, error); 38015591b213SSam Leffler sc->sc_stats.ast_rx_busdma++; 3802b2792ff6SSam Leffler m_freem(m); 38035591b213SSam Leffler return error; 38045591b213SSam Leffler } 3805d77367bfSSam Leffler KASSERT(bf->bf_nseg == 1, 3806d77367bfSSam Leffler ("multi-segment packet; nseg %u", bf->bf_nseg)); 3807b2792ff6SSam Leffler bf->bf_m = m; 38085591b213SSam Leffler } 38095591b213SSam Leffler bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD); 38105591b213SSam Leffler 381104e22a02SSam Leffler /* 381204e22a02SSam Leffler * Setup descriptors. For receive we always terminate 381304e22a02SSam Leffler * the descriptor list with a self-linked entry so we'll 381404e22a02SSam Leffler * not get overrun under high load (as can happen with a 3815c42a7b7eSSam Leffler * 5212 when ANI processing enables PHY error frames). 381604e22a02SSam Leffler * 381704e22a02SSam Leffler * To insure the last descriptor is self-linked we create 381804e22a02SSam Leffler * each descriptor as self-linked and add it to the end. As 381904e22a02SSam Leffler * each additional descriptor is added the previous self-linked 382004e22a02SSam Leffler * entry is ``fixed'' naturally. This should be safe even 382104e22a02SSam Leffler * if DMA is happening. When processing RX interrupts we 382204e22a02SSam Leffler * never remove/process the last, self-linked, entry on the 382304e22a02SSam Leffler * descriptor list. This insures the hardware always has 382404e22a02SSam Leffler * someplace to write a new frame. 382504e22a02SSam Leffler */ 38268a2a6beeSAdrian Chadd /* 38278a2a6beeSAdrian Chadd * 11N: we can no longer afford to self link the last descriptor. 38288a2a6beeSAdrian Chadd * MAC acknowledges BA status as long as it copies frames to host 38298a2a6beeSAdrian Chadd * buffer (or rx fifo). This can incorrectly acknowledge packets 38308a2a6beeSAdrian Chadd * to a sender if last desc is self-linked. 38318a2a6beeSAdrian Chadd */ 38325591b213SSam Leffler ds = bf->bf_desc; 38338a2a6beeSAdrian Chadd if (sc->sc_rxslink) 383404e22a02SSam Leffler ds->ds_link = bf->bf_daddr; /* link to self */ 38358a2a6beeSAdrian Chadd else 38368a2a6beeSAdrian Chadd ds->ds_link = 0; /* terminate the list */ 38375591b213SSam Leffler ds->ds_data = bf->bf_segs[0].ds_addr; 38385591b213SSam Leffler ath_hal_setuprxdesc(ah, ds 38395591b213SSam Leffler , m->m_len /* buffer size */ 38405591b213SSam Leffler , 0 38415591b213SSam Leffler ); 38425591b213SSam Leffler 38435591b213SSam Leffler if (sc->sc_rxlink != NULL) 38445591b213SSam Leffler *sc->sc_rxlink = bf->bf_daddr; 38455591b213SSam Leffler sc->sc_rxlink = &ds->ds_link; 38465591b213SSam Leffler return 0; 38475591b213SSam Leffler } 38485591b213SSam Leffler 3849c42a7b7eSSam Leffler /* 385003ed599aSSam Leffler * Extend 15-bit time stamp from rx descriptor to 38517b0c77ecSSam Leffler * a full 64-bit TSF using the specified TSF. 385203ed599aSSam Leffler */ 385303ed599aSSam Leffler static __inline u_int64_t 3854fc4de9b7SAdrian Chadd ath_extend_tsf15(u_int32_t rstamp, u_int64_t tsf) 385503ed599aSSam Leffler { 385603ed599aSSam Leffler if ((tsf & 0x7fff) < rstamp) 385703ed599aSSam Leffler tsf -= 0x8000; 3858fc4de9b7SAdrian Chadd 385903ed599aSSam Leffler return ((tsf &~ 0x7fff) | rstamp); 386003ed599aSSam Leffler } 386103ed599aSSam Leffler 386203ed599aSSam Leffler /* 3863fc4de9b7SAdrian Chadd * Extend 32-bit time stamp from rx descriptor to 3864fc4de9b7SAdrian Chadd * a full 64-bit TSF using the specified TSF. 3865fc4de9b7SAdrian Chadd */ 3866fc4de9b7SAdrian Chadd static __inline u_int64_t 3867fc4de9b7SAdrian Chadd ath_extend_tsf32(u_int32_t rstamp, u_int64_t tsf) 3868fc4de9b7SAdrian Chadd { 3869fc4de9b7SAdrian Chadd u_int32_t tsf_low = tsf & 0xffffffff; 3870fc4de9b7SAdrian Chadd u_int64_t tsf64 = (tsf & ~0xffffffffULL) | rstamp; 3871fc4de9b7SAdrian Chadd 3872fc4de9b7SAdrian Chadd if (rstamp > tsf_low && (rstamp - tsf_low > 0x10000000)) 3873fc4de9b7SAdrian Chadd tsf64 -= 0x100000000ULL; 3874fc4de9b7SAdrian Chadd 3875fc4de9b7SAdrian Chadd if (rstamp < tsf_low && (tsf_low - rstamp > 0x10000000)) 3876fc4de9b7SAdrian Chadd tsf64 += 0x100000000ULL; 3877fc4de9b7SAdrian Chadd 3878fc4de9b7SAdrian Chadd return tsf64; 3879fc4de9b7SAdrian Chadd } 3880fc4de9b7SAdrian Chadd 3881fc4de9b7SAdrian Chadd /* 3882fc4de9b7SAdrian Chadd * Extend the TSF from the RX descriptor to a full 64 bit TSF. 3883fc4de9b7SAdrian Chadd * Earlier hardware versions only wrote the low 15 bits of the 3884fc4de9b7SAdrian Chadd * TSF into the RX descriptor; later versions (AR5416 and up) 3885fc4de9b7SAdrian Chadd * include the 32 bit TSF value. 3886fc4de9b7SAdrian Chadd */ 3887fc4de9b7SAdrian Chadd static __inline u_int64_t 3888fc4de9b7SAdrian Chadd ath_extend_tsf(struct ath_softc *sc, u_int32_t rstamp, u_int64_t tsf) 3889fc4de9b7SAdrian Chadd { 3890fc4de9b7SAdrian Chadd if (sc->sc_rxtsf32) 3891fc4de9b7SAdrian Chadd return ath_extend_tsf32(rstamp, tsf); 3892fc4de9b7SAdrian Chadd else 3893fc4de9b7SAdrian Chadd return ath_extend_tsf15(rstamp, tsf); 3894fc4de9b7SAdrian Chadd } 3895fc4de9b7SAdrian Chadd 3896fc4de9b7SAdrian Chadd /* 3897c42a7b7eSSam Leffler * Intercept management frames to collect beacon rssi data 3898c42a7b7eSSam Leffler * and to do ibss merges. 3899c42a7b7eSSam Leffler */ 3900c42a7b7eSSam Leffler static void 3901b032f27cSSam Leffler ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, 39025463c4a4SSam Leffler int subtype, int rssi, int nf) 3903c42a7b7eSSam Leffler { 3904b032f27cSSam Leffler struct ieee80211vap *vap = ni->ni_vap; 3905b032f27cSSam Leffler struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 3906c42a7b7eSSam Leffler 3907c42a7b7eSSam Leffler /* 3908c42a7b7eSSam Leffler * Call up first so subsequent work can use information 3909c42a7b7eSSam Leffler * potentially stored in the node (e.g. for ibss merge). 3910c42a7b7eSSam Leffler */ 39115463c4a4SSam Leffler ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, nf); 3912c42a7b7eSSam Leffler switch (subtype) { 3913c42a7b7eSSam Leffler case IEEE80211_FC0_SUBTYPE_BEACON: 3914c42a7b7eSSam Leffler /* update rssi statistics for use by the hal */ 391580767531SAdrian Chadd /* XXX unlocked check against vap->iv_bss? */ 3916ffa2cab6SSam Leffler ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi); 391780d939bfSSam Leffler if (sc->sc_syncbeacon && 3918b032f27cSSam Leffler ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) { 391980d939bfSSam Leffler /* 392080d939bfSSam Leffler * Resync beacon timers using the tsf of the beacon 392180d939bfSSam Leffler * frame we just received. 392280d939bfSSam Leffler */ 3923b032f27cSSam Leffler ath_beacon_config(sc, vap); 392480d939bfSSam Leffler } 3925c42a7b7eSSam Leffler /* fall thru... */ 3926c42a7b7eSSam Leffler case IEEE80211_FC0_SUBTYPE_PROBE_RESP: 3927b032f27cSSam Leffler if (vap->iv_opmode == IEEE80211_M_IBSS && 3928b032f27cSSam Leffler vap->iv_state == IEEE80211_S_RUN) { 39297041d50cSBernhard Schmidt uint32_t rstamp = sc->sc_lastrs->rs_tstamp; 3930fc4de9b7SAdrian Chadd uint64_t tsf = ath_extend_tsf(sc, rstamp, 39317b0c77ecSSam Leffler ath_hal_gettsf64(sc->sc_ah)); 3932c42a7b7eSSam Leffler /* 3933c42a7b7eSSam Leffler * Handle ibss merge as needed; check the tsf on the 3934c42a7b7eSSam Leffler * frame before attempting the merge. The 802.11 spec 3935c42a7b7eSSam Leffler * says the station should change it's bssid to match 3936c42a7b7eSSam Leffler * the oldest station with the same ssid, where oldest 3937f818612bSSam Leffler * is determined by the tsf. Note that hardware 3938f818612bSSam Leffler * reconfiguration happens through callback to 393903ed599aSSam Leffler * ath_newstate as the state machine will go from 394003ed599aSSam Leffler * RUN -> RUN when this happens. 3941c42a7b7eSSam Leffler */ 394203ed599aSSam Leffler if (le64toh(ni->ni_tstamp.tsf) >= tsf) { 394303ed599aSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, 394433d7d80cSTai-hwa Liang "ibss merge, rstamp %u tsf %ju " 394533d7d80cSTai-hwa Liang "tstamp %ju\n", rstamp, (uintmax_t)tsf, 394633d7d80cSTai-hwa Liang (uintmax_t)ni->ni_tstamp.tsf); 3947641b4d0bSSam Leffler (void) ieee80211_ibss_merge(ni); 3948c42a7b7eSSam Leffler } 394903ed599aSSam Leffler } 3950c42a7b7eSSam Leffler break; 3951c42a7b7eSSam Leffler } 3952c42a7b7eSSam Leffler } 3953c42a7b7eSSam Leffler 3954c42a7b7eSSam Leffler /* 3955c42a7b7eSSam Leffler * Set the default antenna. 3956c42a7b7eSSam Leffler */ 3957c42a7b7eSSam Leffler static void 3958c42a7b7eSSam Leffler ath_setdefantenna(struct ath_softc *sc, u_int antenna) 3959c42a7b7eSSam Leffler { 3960c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 3961c42a7b7eSSam Leffler 3962c42a7b7eSSam Leffler /* XXX block beacon interrupts */ 3963c42a7b7eSSam Leffler ath_hal_setdefantenna(ah, antenna); 3964c42a7b7eSSam Leffler if (sc->sc_defant != antenna) 3965c42a7b7eSSam Leffler sc->sc_stats.ast_ant_defswitch++; 3966c42a7b7eSSam Leffler sc->sc_defant = antenna; 3967c42a7b7eSSam Leffler sc->sc_rxotherant = 0; 3968c42a7b7eSSam Leffler } 3969c42a7b7eSSam Leffler 39705463c4a4SSam Leffler static void 3971b032f27cSSam Leffler ath_rx_tap(struct ifnet *ifp, struct mbuf *m, 397265f9edeeSSam Leffler const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf) 39737b0c77ecSSam Leffler { 3974e387d629SSam Leffler #define CHAN_HT20 htole32(IEEE80211_CHAN_HT20) 3975e387d629SSam Leffler #define CHAN_HT40U htole32(IEEE80211_CHAN_HT40U) 3976e387d629SSam Leffler #define CHAN_HT40D htole32(IEEE80211_CHAN_HT40D) 397746d4d74cSSam Leffler #define CHAN_HT (CHAN_HT20|CHAN_HT40U|CHAN_HT40D) 3978b032f27cSSam Leffler struct ath_softc *sc = ifp->if_softc; 397946d4d74cSSam Leffler const HAL_RATE_TABLE *rt; 398046d4d74cSSam Leffler uint8_t rix; 39817b0c77ecSSam Leffler 398246d4d74cSSam Leffler rt = sc->sc_currates; 398346d4d74cSSam Leffler KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 398446d4d74cSSam Leffler rix = rt->rateCodeToIndex[rs->rs_rate]; 398568e8e04eSSam Leffler sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate; 39867b0c77ecSSam Leffler sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags; 398746d4d74cSSam Leffler #ifdef AH_SUPPORT_AR5416 3988e387d629SSam Leffler sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT; 398946d4d74cSSam Leffler if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) { /* HT rate */ 399059efa8b5SSam Leffler struct ieee80211com *ic = ifp->if_l2com; 399159efa8b5SSam Leffler 3992e387d629SSam Leffler if ((rs->rs_flags & HAL_RX_2040) == 0) 3993e387d629SSam Leffler sc->sc_rx_th.wr_chan_flags |= CHAN_HT20; 399459efa8b5SSam Leffler else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan)) 3995e387d629SSam Leffler sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U; 3996e387d629SSam Leffler else 3997e387d629SSam Leffler sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D; 399868e8e04eSSam Leffler if ((rs->rs_flags & HAL_RX_GI) == 0) 3999e387d629SSam Leffler sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI; 400068e8e04eSSam Leffler } 400168e8e04eSSam Leffler #endif 4002fc4de9b7SAdrian Chadd sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(sc, rs->rs_tstamp, tsf)); 400365f9edeeSSam Leffler if (rs->rs_status & HAL_RXERR_CRC) 40047b0c77ecSSam Leffler sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; 40057b0c77ecSSam Leffler /* XXX propagate other error flags from descriptor */ 40067b0c77ecSSam Leffler sc->sc_rx_th.wr_antnoise = nf; 40075463c4a4SSam Leffler sc->sc_rx_th.wr_antsignal = nf + rs->rs_rssi; 400865f9edeeSSam Leffler sc->sc_rx_th.wr_antenna = rs->rs_antenna; 400946d4d74cSSam Leffler #undef CHAN_HT 4010e387d629SSam Leffler #undef CHAN_HT20 4011e387d629SSam Leffler #undef CHAN_HT40U 4012e387d629SSam Leffler #undef CHAN_HT40D 40137b0c77ecSSam Leffler } 40147b0c77ecSSam Leffler 40155591b213SSam Leffler static void 4016b032f27cSSam Leffler ath_handle_micerror(struct ieee80211com *ic, 4017b032f27cSSam Leffler struct ieee80211_frame *wh, int keyix) 4018b032f27cSSam Leffler { 4019b032f27cSSam Leffler struct ieee80211_node *ni; 4020b032f27cSSam Leffler 4021b032f27cSSam Leffler /* XXX recheck MIC to deal w/ chips that lie */ 4022b032f27cSSam Leffler /* XXX discard MIC errors on !data frames */ 4023b032f27cSSam Leffler ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh); 4024b032f27cSSam Leffler if (ni != NULL) { 4025b032f27cSSam Leffler ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix); 4026b032f27cSSam Leffler ieee80211_free_node(ni); 4027b032f27cSSam Leffler } 4028b032f27cSSam Leffler } 4029b032f27cSSam Leffler 403096ff485dSAdrian Chadd /* 403196ff485dSAdrian Chadd * Only run the RX proc if it's not already running. 403296ff485dSAdrian Chadd * Since this may get run as part of the reset/flush path, 403396ff485dSAdrian Chadd * the task can't clash with an existing, running tasklet. 403496ff485dSAdrian Chadd */ 4035b032f27cSSam Leffler static void 403696ff485dSAdrian Chadd ath_rx_tasklet(void *arg, int npending) 403796ff485dSAdrian Chadd { 403896ff485dSAdrian Chadd struct ath_softc *sc = arg; 403996ff485dSAdrian Chadd 404096ff485dSAdrian Chadd CTR1(ATH_KTR_INTR, "ath_rx_proc: pending=%d", npending); 404196ff485dSAdrian Chadd DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending); 4042ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 4043ef27340cSAdrian Chadd if (sc->sc_inreset_cnt > 0) { 4044ef27340cSAdrian Chadd device_printf(sc->sc_dev, 4045ef27340cSAdrian Chadd "%s: sc_inreset_cnt > 0; skipping\n", __func__); 4046ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 4047ef27340cSAdrian Chadd return; 4048ef27340cSAdrian Chadd } 4049ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 405096ff485dSAdrian Chadd ath_rx_proc(sc, 1); 405196ff485dSAdrian Chadd } 405296ff485dSAdrian Chadd 405396ff485dSAdrian Chadd static void 405496ff485dSAdrian Chadd ath_rx_proc(struct ath_softc *sc, int resched) 40555591b213SSam Leffler { 40568cec0ab9SSam Leffler #define PA2DESC(_sc, _pa) \ 4057c42a7b7eSSam Leffler ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 4058c42a7b7eSSam Leffler ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 40595591b213SSam Leffler struct ath_buf *bf; 4060fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 4061b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 40625591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 40635591b213SSam Leffler struct ath_desc *ds; 406465f9edeeSSam Leffler struct ath_rx_status *rs; 40655591b213SSam Leffler struct mbuf *m; 40660a915fadSSam Leffler struct ieee80211_node *ni; 4067d7736e13SSam Leffler int len, type, ngood; 40685591b213SSam Leffler HAL_STATUS status; 40697b0c77ecSSam Leffler int16_t nf; 407006fc4a10SAdrian Chadd u_int64_t tsf, rstamp; 40718f939e79SAdrian Chadd int npkts = 0; 40725591b213SSam Leffler 4073ef27340cSAdrian Chadd /* XXX we must not hold the ATH_LOCK here */ 4074ef27340cSAdrian Chadd ATH_UNLOCK_ASSERT(sc); 4075ef27340cSAdrian Chadd ATH_PCU_UNLOCK_ASSERT(sc); 4076ef27340cSAdrian Chadd 4077ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 4078ef27340cSAdrian Chadd sc->sc_rxproc_cnt++; 4079ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 4080ef27340cSAdrian Chadd 408196ff485dSAdrian Chadd DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: called\n", __func__); 4082d7736e13SSam Leffler ngood = 0; 408359efa8b5SSam Leffler nf = ath_hal_getchannoise(ah, sc->sc_curchan); 408484784be1SSam Leffler sc->sc_stats.ast_rx_noise = nf; 40857b0c77ecSSam Leffler tsf = ath_hal_gettsf64(ah); 40865591b213SSam Leffler do { 40876b349e5aSAdrian Chadd bf = TAILQ_FIRST(&sc->sc_rxbuf); 40888a2a6beeSAdrian Chadd if (sc->sc_rxslink && bf == NULL) { /* NB: shouldn't happen */ 4089c42a7b7eSSam Leffler if_printf(ifp, "%s: no buffer!\n", __func__); 40905591b213SSam Leffler break; 40918a2a6beeSAdrian Chadd } else if (bf == NULL) { 40928a2a6beeSAdrian Chadd /* 40938a2a6beeSAdrian Chadd * End of List: 40948a2a6beeSAdrian Chadd * this can happen for non-self-linked RX chains 40958a2a6beeSAdrian Chadd */ 40968a2a6beeSAdrian Chadd sc->sc_stats.ast_rx_hitqueueend++; 40978a2a6beeSAdrian Chadd break; 40985591b213SSam Leffler } 4099b2792ff6SSam Leffler m = bf->bf_m; 4100b2792ff6SSam Leffler if (m == NULL) { /* NB: shouldn't happen */ 4101b2792ff6SSam Leffler /* 4102b2792ff6SSam Leffler * If mbuf allocation failed previously there 4103b2792ff6SSam Leffler * will be no mbuf; try again to re-populate it. 4104b2792ff6SSam Leffler */ 4105b2792ff6SSam Leffler /* XXX make debug msg */ 4106b2792ff6SSam Leffler if_printf(ifp, "%s: no mbuf!\n", __func__); 41076b349e5aSAdrian Chadd TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 4108b2792ff6SSam Leffler goto rx_next; 4109b2792ff6SSam Leffler } 411004e22a02SSam Leffler ds = bf->bf_desc; 411104e22a02SSam Leffler if (ds->ds_link == bf->bf_daddr) { 411204e22a02SSam Leffler /* NB: never process the self-linked entry at the end */ 4113f77057dbSAdrian Chadd sc->sc_stats.ast_rx_hitqueueend++; 411404e22a02SSam Leffler break; 411504e22a02SSam Leffler } 41168cec0ab9SSam Leffler /* XXX sync descriptor memory */ 41178cec0ab9SSam Leffler /* 41188cec0ab9SSam Leffler * Must provide the virtual address of the current 41198cec0ab9SSam Leffler * descriptor, the physical address, and the virtual 41208cec0ab9SSam Leffler * address of the next descriptor in the h/w chain. 41218cec0ab9SSam Leffler * This allows the HAL to look ahead to see if the 41228cec0ab9SSam Leffler * hardware is done with a descriptor by checking the 41238cec0ab9SSam Leffler * done bit in the following descriptor and the address 41248cec0ab9SSam Leffler * of the current descriptor the DMA engine is working 41258cec0ab9SSam Leffler * on. All this is necessary because of our use of 41268cec0ab9SSam Leffler * a self-linked list to avoid rx overruns. 41278cec0ab9SSam Leffler */ 412865f9edeeSSam Leffler rs = &bf->bf_status.ds_rxstat; 41298cec0ab9SSam Leffler status = ath_hal_rxprocdesc(ah, ds, 413065f9edeeSSam Leffler bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 4131a585a9a1SSam Leffler #ifdef ATH_DEBUG 4132c42a7b7eSSam Leffler if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 41336902009eSSam Leffler ath_printrxbuf(sc, bf, 0, status == HAL_OK); 41345591b213SSam Leffler #endif 41355591b213SSam Leffler if (status == HAL_EINPROGRESS) 41365591b213SSam Leffler break; 41376b349e5aSAdrian Chadd 41386b349e5aSAdrian Chadd TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 41398f939e79SAdrian Chadd npkts++; 4140f9aa1d90SAdrian Chadd 414106fc4a10SAdrian Chadd /* 414206fc4a10SAdrian Chadd * Calculate the correct 64 bit TSF given 414306fc4a10SAdrian Chadd * the TSF64 register value and rs_tstamp. 414406fc4a10SAdrian Chadd */ 414506fc4a10SAdrian Chadd rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf); 414606fc4a10SAdrian Chadd 4147f9aa1d90SAdrian Chadd /* These aren't specifically errors */ 41486e0f1168SAdrian Chadd #ifdef AH_SUPPORT_AR5416 4149f9aa1d90SAdrian Chadd if (rs->rs_flags & HAL_RX_GI) 4150f9aa1d90SAdrian Chadd sc->sc_stats.ast_rx_halfgi++; 4151f9aa1d90SAdrian Chadd if (rs->rs_flags & HAL_RX_2040) 4152f9aa1d90SAdrian Chadd sc->sc_stats.ast_rx_2040++; 4153f9aa1d90SAdrian Chadd if (rs->rs_flags & HAL_RX_DELIM_CRC_PRE) 4154f9aa1d90SAdrian Chadd sc->sc_stats.ast_rx_pre_crc_err++; 4155f9aa1d90SAdrian Chadd if (rs->rs_flags & HAL_RX_DELIM_CRC_POST) 4156f9aa1d90SAdrian Chadd sc->sc_stats.ast_rx_post_crc_err++; 4157f9aa1d90SAdrian Chadd if (rs->rs_flags & HAL_RX_DECRYPT_BUSY) 4158f9aa1d90SAdrian Chadd sc->sc_stats.ast_rx_decrypt_busy_err++; 4159f9aa1d90SAdrian Chadd if (rs->rs_flags & HAL_RX_HI_RX_CHAIN) 4160f9aa1d90SAdrian Chadd sc->sc_stats.ast_rx_hi_rx_chain++; 41616e0f1168SAdrian Chadd #endif /* AH_SUPPORT_AR5416 */ 4162f9aa1d90SAdrian Chadd 416368e8e04eSSam Leffler if (rs->rs_status != 0) { 416465f9edeeSSam Leffler if (rs->rs_status & HAL_RXERR_CRC) 41655591b213SSam Leffler sc->sc_stats.ast_rx_crcerr++; 416665f9edeeSSam Leffler if (rs->rs_status & HAL_RXERR_FIFO) 41675591b213SSam Leffler sc->sc_stats.ast_rx_fifoerr++; 416865f9edeeSSam Leffler if (rs->rs_status & HAL_RXERR_PHY) { 41695591b213SSam Leffler sc->sc_stats.ast_rx_phyerr++; 417048237774SAdrian Chadd /* Process DFS radar events */ 4171373815efSAdrian Chadd if ((rs->rs_phyerr == HAL_PHYERR_RADAR) || 4172373815efSAdrian Chadd (rs->rs_phyerr == HAL_PHYERR_FALSE_RADAR_EXT)) { 4173373815efSAdrian Chadd /* Since we're touching the frame data, sync it */ 4174373815efSAdrian Chadd bus_dmamap_sync(sc->sc_dmat, 4175373815efSAdrian Chadd bf->bf_dmamap, 4176373815efSAdrian Chadd BUS_DMASYNC_POSTREAD); 4177373815efSAdrian Chadd /* Now pass it to the radar processing code */ 417806fc4a10SAdrian Chadd ath_dfs_process_phy_err(sc, mtod(m, char *), rstamp, rs); 4179373815efSAdrian Chadd } 418048237774SAdrian Chadd 4181f9aa1d90SAdrian Chadd /* Be suitably paranoid about receiving phy errors out of the stats array bounds */ 4182f9aa1d90SAdrian Chadd if (rs->rs_phyerr < 64) 4183f9aa1d90SAdrian Chadd sc->sc_stats.ast_rx_phy[rs->rs_phyerr]++; 418468e8e04eSSam Leffler goto rx_error; /* NB: don't count in ierrors */ 4185c42a7b7eSSam Leffler } 418665f9edeeSSam Leffler if (rs->rs_status & HAL_RXERR_DECRYPT) { 418785643802SSam Leffler /* 4188c42a7b7eSSam Leffler * Decrypt error. If the error occurred 4189c42a7b7eSSam Leffler * because there was no hardware key, then 4190c42a7b7eSSam Leffler * let the frame through so the upper layers 4191c42a7b7eSSam Leffler * can process it. This is necessary for 5210 4192c42a7b7eSSam Leffler * parts which have no way to setup a ``clear'' 4193c42a7b7eSSam Leffler * key cache entry. 4194c42a7b7eSSam Leffler * 4195c42a7b7eSSam Leffler * XXX do key cache faulting 419685643802SSam Leffler */ 419765f9edeeSSam Leffler if (rs->rs_keyix == HAL_RXKEYIX_INVALID) 4198c42a7b7eSSam Leffler goto rx_accept; 4199c42a7b7eSSam Leffler sc->sc_stats.ast_rx_badcrypt++; 42005591b213SSam Leffler } 420165f9edeeSSam Leffler if (rs->rs_status & HAL_RXERR_MIC) { 4202c42a7b7eSSam Leffler sc->sc_stats.ast_rx_badmic++; 4203c42a7b7eSSam Leffler /* 4204c42a7b7eSSam Leffler * Do minimal work required to hand off 42055463c4a4SSam Leffler * the 802.11 header for notification. 4206c42a7b7eSSam Leffler */ 4207c42a7b7eSSam Leffler /* XXX frag's and qos frames */ 420865f9edeeSSam Leffler len = rs->rs_datalen; 4209c42a7b7eSSam Leffler if (len >= sizeof (struct ieee80211_frame)) { 4210c42a7b7eSSam Leffler bus_dmamap_sync(sc->sc_dmat, 4211c42a7b7eSSam Leffler bf->bf_dmamap, 4212c42a7b7eSSam Leffler BUS_DMASYNC_POSTREAD); 4213b032f27cSSam Leffler ath_handle_micerror(ic, 4214c42a7b7eSSam Leffler mtod(m, struct ieee80211_frame *), 42150ab4040aSSam Leffler sc->sc_splitmic ? 4216b032f27cSSam Leffler rs->rs_keyix-32 : rs->rs_keyix); 4217c42a7b7eSSam Leffler } 4218c42a7b7eSSam Leffler } 4219c42a7b7eSSam Leffler ifp->if_ierrors++; 422068e8e04eSSam Leffler rx_error: 422168e8e04eSSam Leffler /* 422268e8e04eSSam Leffler * Cleanup any pending partial frame. 422368e8e04eSSam Leffler */ 422468e8e04eSSam Leffler if (sc->sc_rxpending != NULL) { 422568e8e04eSSam Leffler m_freem(sc->sc_rxpending); 422668e8e04eSSam Leffler sc->sc_rxpending = NULL; 422768e8e04eSSam Leffler } 4228c42a7b7eSSam Leffler /* 42297b0c77ecSSam Leffler * When a tap is present pass error frames 42307b0c77ecSSam Leffler * that have been requested. By default we 42317b0c77ecSSam Leffler * pass decrypt+mic errors but others may be 42327b0c77ecSSam Leffler * interesting (e.g. crc). 4233c42a7b7eSSam Leffler */ 42345463c4a4SSam Leffler if (ieee80211_radiotap_active(ic) && 423565f9edeeSSam Leffler (rs->rs_status & sc->sc_monpass)) { 42367b0c77ecSSam Leffler bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 42377b0c77ecSSam Leffler BUS_DMASYNC_POSTREAD); 42387b0c77ecSSam Leffler /* NB: bpf needs the mbuf length setup */ 423965f9edeeSSam Leffler len = rs->rs_datalen; 42407b0c77ecSSam Leffler m->m_pkthdr.len = m->m_len = len; 4241dcfd99a7SAdrian Chadd bf->bf_m = NULL; 424206fc4a10SAdrian Chadd ath_rx_tap(ifp, m, rs, rstamp, nf); 42435463c4a4SSam Leffler ieee80211_radiotap_rx_all(ic, m); 4244dcfd99a7SAdrian Chadd m_freem(m); 42457b0c77ecSSam Leffler } 42467b0c77ecSSam Leffler /* XXX pass MIC errors up for s/w reclaculation */ 42475591b213SSam Leffler goto rx_next; 42485591b213SSam Leffler } 4249c42a7b7eSSam Leffler rx_accept: 4250c42a7b7eSSam Leffler /* 4251c42a7b7eSSam Leffler * Sync and unmap the frame. At this point we're 4252c42a7b7eSSam Leffler * committed to passing the mbuf somewhere so clear 4253c66c48cbSSam Leffler * bf_m; this means a new mbuf must be allocated 4254c42a7b7eSSam Leffler * when the rx descriptor is setup again to receive 4255c42a7b7eSSam Leffler * another frame. 4256c42a7b7eSSam Leffler */ 42575591b213SSam Leffler bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 42585591b213SSam Leffler BUS_DMASYNC_POSTREAD); 42595591b213SSam Leffler bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 42605591b213SSam Leffler bf->bf_m = NULL; 4261c42a7b7eSSam Leffler 426265f9edeeSSam Leffler len = rs->rs_datalen; 426368e8e04eSSam Leffler m->m_len = len; 426468e8e04eSSam Leffler 426568e8e04eSSam Leffler if (rs->rs_more) { 426668e8e04eSSam Leffler /* 426768e8e04eSSam Leffler * Frame spans multiple descriptors; save 426868e8e04eSSam Leffler * it for the next completed descriptor, it 426968e8e04eSSam Leffler * will be used to construct a jumbogram. 427068e8e04eSSam Leffler */ 427168e8e04eSSam Leffler if (sc->sc_rxpending != NULL) { 427268e8e04eSSam Leffler /* NB: max frame size is currently 2 clusters */ 427368e8e04eSSam Leffler sc->sc_stats.ast_rx_toobig++; 427468e8e04eSSam Leffler m_freem(sc->sc_rxpending); 427568e8e04eSSam Leffler } 427668e8e04eSSam Leffler m->m_pkthdr.rcvif = ifp; 427768e8e04eSSam Leffler m->m_pkthdr.len = len; 427868e8e04eSSam Leffler sc->sc_rxpending = m; 427968e8e04eSSam Leffler goto rx_next; 428068e8e04eSSam Leffler } else if (sc->sc_rxpending != NULL) { 428168e8e04eSSam Leffler /* 428268e8e04eSSam Leffler * This is the second part of a jumbogram, 428368e8e04eSSam Leffler * chain it to the first mbuf, adjust the 428468e8e04eSSam Leffler * frame length, and clear the rxpending state. 428568e8e04eSSam Leffler */ 428668e8e04eSSam Leffler sc->sc_rxpending->m_next = m; 428768e8e04eSSam Leffler sc->sc_rxpending->m_pkthdr.len += len; 428868e8e04eSSam Leffler m = sc->sc_rxpending; 428968e8e04eSSam Leffler sc->sc_rxpending = NULL; 429068e8e04eSSam Leffler } else { 429168e8e04eSSam Leffler /* 429268e8e04eSSam Leffler * Normal single-descriptor receive; setup 429368e8e04eSSam Leffler * the rcvif and packet length. 429468e8e04eSSam Leffler */ 429568e8e04eSSam Leffler m->m_pkthdr.rcvif = ifp; 429668e8e04eSSam Leffler m->m_pkthdr.len = len; 429768e8e04eSSam Leffler } 429873454c73SSam Leffler 4299197d53c5SAdrian Chadd /* 4300197d53c5SAdrian Chadd * Validate rs->rs_antenna. 4301197d53c5SAdrian Chadd * 4302197d53c5SAdrian Chadd * Some users w/ AR9285 NICs have reported crashes 4303197d53c5SAdrian Chadd * here because rs_antenna field is bogusly large. 4304197d53c5SAdrian Chadd * Let's enforce the maximum antenna limit of 8 4305197d53c5SAdrian Chadd * (and it shouldn't be hard coded, but that's a 4306197d53c5SAdrian Chadd * separate problem) and if there's an issue, print 4307197d53c5SAdrian Chadd * out an error and adjust rs_antenna to something 4308197d53c5SAdrian Chadd * sensible. 4309197d53c5SAdrian Chadd * 4310197d53c5SAdrian Chadd * This code should be removed once the actual 4311197d53c5SAdrian Chadd * root cause of the issue has been identified. 4312197d53c5SAdrian Chadd * For example, it may be that the rs_antenna 4313197d53c5SAdrian Chadd * field is only valid for the lsat frame of 4314197d53c5SAdrian Chadd * an aggregate and it just happens that it is 4315197d53c5SAdrian Chadd * "mostly" right. (This is a general statement - 4316197d53c5SAdrian Chadd * the majority of the statistics are only valid 4317197d53c5SAdrian Chadd * for the last frame in an aggregate. 4318197d53c5SAdrian Chadd */ 4319197d53c5SAdrian Chadd if (rs->rs_antenna > 7) { 4320197d53c5SAdrian Chadd device_printf(sc->sc_dev, "%s: rs_antenna > 7 (%d)\n", 4321197d53c5SAdrian Chadd __func__, rs->rs_antenna); 4322197d53c5SAdrian Chadd #ifdef ATH_DEBUG 4323197d53c5SAdrian Chadd ath_printrxbuf(sc, bf, 0, status == HAL_OK); 4324197d53c5SAdrian Chadd #endif /* ATH_DEBUG */ 4325197d53c5SAdrian Chadd rs->rs_antenna = 0; /* XXX better than nothing */ 4326197d53c5SAdrian Chadd } 4327197d53c5SAdrian Chadd 4328b032f27cSSam Leffler ifp->if_ipackets++; 432965f9edeeSSam Leffler sc->sc_stats.ast_ant_rx[rs->rs_antenna]++; 4330c42a7b7eSSam Leffler 43315463c4a4SSam Leffler /* 43325463c4a4SSam Leffler * Populate the rx status block. When there are bpf 43335463c4a4SSam Leffler * listeners we do the additional work to provide 43345463c4a4SSam Leffler * complete status. Otherwise we fill in only the 43355463c4a4SSam Leffler * material required by ieee80211_input. Note that 43365463c4a4SSam Leffler * noise setting is filled in above. 43375463c4a4SSam Leffler */ 43385463c4a4SSam Leffler if (ieee80211_radiotap_active(ic)) 433906fc4a10SAdrian Chadd ath_rx_tap(ifp, m, rs, rstamp, nf); 43400a915fadSSam Leffler 43415591b213SSam Leffler /* 4342c42a7b7eSSam Leffler * From this point on we assume the frame is at least 4343c42a7b7eSSam Leffler * as large as ieee80211_frame_min; verify that. 43445591b213SSam Leffler */ 4345c42a7b7eSSam Leffler if (len < IEEE80211_MIN_LEN) { 43465463c4a4SSam Leffler if (!ieee80211_radiotap_active(ic)) { 43475463c4a4SSam Leffler DPRINTF(sc, ATH_DEBUG_RECV, 43485463c4a4SSam Leffler "%s: short packet %d\n", __func__, len); 4349c42a7b7eSSam Leffler sc->sc_stats.ast_rx_tooshort++; 43505463c4a4SSam Leffler } else { 43515463c4a4SSam Leffler /* NB: in particular this captures ack's */ 43525463c4a4SSam Leffler ieee80211_radiotap_rx_all(ic, m); 43535463c4a4SSam Leffler } 4354c42a7b7eSSam Leffler m_freem(m); 4355c42a7b7eSSam Leffler goto rx_next; 43565591b213SSam Leffler } 43570a915fadSSam Leffler 4358c42a7b7eSSam Leffler if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) { 435946d4d74cSSam Leffler const HAL_RATE_TABLE *rt = sc->sc_currates; 436046d4d74cSSam Leffler uint8_t rix = rt->rateCodeToIndex[rs->rs_rate]; 436146d4d74cSSam Leffler 436268e8e04eSSam Leffler ieee80211_dump_pkt(ic, mtod(m, caddr_t), len, 436346d4d74cSSam Leffler sc->sc_hwmap[rix].ieeerate, rs->rs_rssi); 4364c42a7b7eSSam Leffler } 4365c42a7b7eSSam Leffler 4366c42a7b7eSSam Leffler m_adj(m, -IEEE80211_CRC_LEN); 4367de5af704SSam Leffler 4368de5af704SSam Leffler /* 4369c42a7b7eSSam Leffler * Locate the node for sender, track state, and then 4370c42a7b7eSSam Leffler * pass the (referenced) node up to the 802.11 layer 4371c42a7b7eSSam Leffler * for its use. 4372c42a7b7eSSam Leffler */ 4373c1225b52SSam Leffler ni = ieee80211_find_rxnode_withkey(ic, 4374c1225b52SSam Leffler mtod(m, const struct ieee80211_frame_min *), 437565f9edeeSSam Leffler rs->rs_keyix == HAL_RXKEYIX_INVALID ? 437665f9edeeSSam Leffler IEEE80211_KEYIX_NONE : rs->rs_keyix); 43777041d50cSBernhard Schmidt sc->sc_lastrs = rs; 4378a07e9ddbSAdrian Chadd 43796e0f1168SAdrian Chadd #ifdef AH_SUPPORT_AR5416 4380a07e9ddbSAdrian Chadd if (rs->rs_isaggr) 4381a07e9ddbSAdrian Chadd sc->sc_stats.ast_rx_agg++; 43826e0f1168SAdrian Chadd #endif /* AH_SUPPORT_AR5416 */ 4383a07e9ddbSAdrian Chadd 4384a07e9ddbSAdrian Chadd if (ni != NULL) { 4385b032f27cSSam Leffler /* 4386e57539afSAdrian Chadd * Only punt packets for ampdu reorder processing for 4387e57539afSAdrian Chadd * 11n nodes; net80211 enforces that M_AMPDU is only 4388e57539afSAdrian Chadd * set for 11n nodes. 438900fc8705SAdrian Chadd */ 439000fc8705SAdrian Chadd if (ni->ni_flags & IEEE80211_NODE_HT) 439100fc8705SAdrian Chadd m->m_flags |= M_AMPDU; 439200fc8705SAdrian Chadd 439300fc8705SAdrian Chadd /* 4394b032f27cSSam Leffler * Sending station is known, dispatch directly. 4395b032f27cSSam Leffler */ 43965463c4a4SSam Leffler type = ieee80211_input(ni, m, rs->rs_rssi, nf); 4397b032f27cSSam Leffler ieee80211_free_node(ni); 4398b032f27cSSam Leffler /* 4399b032f27cSSam Leffler * Arrange to update the last rx timestamp only for 4400b032f27cSSam Leffler * frames from our ap when operating in station mode. 4401b032f27cSSam Leffler * This assumes the rx key is always setup when 4402b032f27cSSam Leffler * associated. 4403b032f27cSSam Leffler */ 4404b032f27cSSam Leffler if (ic->ic_opmode == IEEE80211_M_STA && 4405b032f27cSSam Leffler rs->rs_keyix != HAL_RXKEYIX_INVALID) 4406b032f27cSSam Leffler ngood++; 4407b032f27cSSam Leffler } else { 44085463c4a4SSam Leffler type = ieee80211_input_all(ic, m, rs->rs_rssi, nf); 4409b032f27cSSam Leffler } 4410c42a7b7eSSam Leffler /* 4411c42a7b7eSSam Leffler * Track rx rssi and do any rx antenna management. 4412de5af704SSam Leffler */ 441365f9edeeSSam Leffler ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi); 4414c42a7b7eSSam Leffler if (sc->sc_diversity) { 4415c42a7b7eSSam Leffler /* 4416c42a7b7eSSam Leffler * When using fast diversity, change the default rx 4417c42a7b7eSSam Leffler * antenna if diversity chooses the other antenna 3 4418c42a7b7eSSam Leffler * times in a row. 4419c42a7b7eSSam Leffler */ 442065f9edeeSSam Leffler if (sc->sc_defant != rs->rs_antenna) { 4421c42a7b7eSSam Leffler if (++sc->sc_rxotherant >= 3) 442265f9edeeSSam Leffler ath_setdefantenna(sc, rs->rs_antenna); 4423c42a7b7eSSam Leffler } else 4424c42a7b7eSSam Leffler sc->sc_rxotherant = 0; 4425c42a7b7eSSam Leffler } 4426235ab70eSAdrian Chadd 4427235ab70eSAdrian Chadd /* Newer school diversity - kite specific for now */ 4428235ab70eSAdrian Chadd /* XXX perhaps migrate the normal diversity code to this? */ 4429235ab70eSAdrian Chadd if ((ah)->ah_rxAntCombDiversity) 4430235ab70eSAdrian Chadd (*(ah)->ah_rxAntCombDiversity)(ah, rs, ticks, hz); 4431235ab70eSAdrian Chadd 44323e50ec2cSSam Leffler if (sc->sc_softled) { 44333e50ec2cSSam Leffler /* 44343e50ec2cSSam Leffler * Blink for any data frame. Otherwise do a 44353e50ec2cSSam Leffler * heartbeat-style blink when idle. The latter 44363e50ec2cSSam Leffler * is mainly for station mode where we depend on 44373e50ec2cSSam Leffler * periodic beacon frames to trigger the poll event. 44383e50ec2cSSam Leffler */ 443931640eb7SSam Leffler if (type == IEEE80211_FC0_TYPE_DATA) { 444046d4d74cSSam Leffler const HAL_RATE_TABLE *rt = sc->sc_currates; 444146d4d74cSSam Leffler ath_led_event(sc, 444246d4d74cSSam Leffler rt->rateCodeToIndex[rs->rs_rate]); 44433e50ec2cSSam Leffler } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle) 444446d4d74cSSam Leffler ath_led_event(sc, 0); 44453e50ec2cSSam Leffler } 44465591b213SSam Leffler rx_next: 44476b349e5aSAdrian Chadd TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 44485591b213SSam Leffler } while (ath_rxbuf_init(sc, bf) == 0); 44495591b213SSam Leffler 4450c42a7b7eSSam Leffler /* rx signal state monitoring */ 445159efa8b5SSam Leffler ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); 4452d7736e13SSam Leffler if (ngood) 4453d7736e13SSam Leffler sc->sc_lastrx = tsf; 4454b5f4adb3SSam Leffler 4455f52d3452SAdrian Chadd CTR2(ATH_KTR_INTR, "ath_rx_proc: npkts=%d, ngood=%d", npkts, ngood); 445648237774SAdrian Chadd /* Queue DFS tasklet if needed */ 445796ff485dSAdrian Chadd if (resched && ath_dfs_tasklet_needed(sc, sc->sc_curchan)) 445848237774SAdrian Chadd taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); 445948237774SAdrian Chadd 44601fdadc0fSAdrian Chadd /* 44611fdadc0fSAdrian Chadd * Now that all the RX frames were handled that 44621fdadc0fSAdrian Chadd * need to be handled, kick the PCU if there's 44631fdadc0fSAdrian Chadd * been an RXEOL condition. 44641fdadc0fSAdrian Chadd */ 4465ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 446696ff485dSAdrian Chadd if (resched && sc->sc_kickpcu) { 4467f52d3452SAdrian Chadd CTR0(ATH_KTR_ERR, "ath_rx_proc: kickpcu"); 44688f939e79SAdrian Chadd device_printf(sc->sc_dev, "%s: kickpcu; handled %d packets\n", 44698f939e79SAdrian Chadd __func__, npkts); 44708f939e79SAdrian Chadd 44718f939e79SAdrian Chadd /* XXX rxslink? */ 4472ef27340cSAdrian Chadd /* 4473ef27340cSAdrian Chadd * XXX can we hold the PCU lock here? 4474ef27340cSAdrian Chadd * Are there any net80211 buffer calls involved? 4475ef27340cSAdrian Chadd */ 44768f939e79SAdrian Chadd bf = TAILQ_FIRST(&sc->sc_rxbuf); 44778f939e79SAdrian Chadd ath_hal_putrxbuf(ah, bf->bf_daddr); 44788f939e79SAdrian Chadd ath_hal_rxena(ah); /* enable recv descriptors */ 44798f939e79SAdrian Chadd ath_mode_init(sc); /* set filters, etc. */ 44808f939e79SAdrian Chadd ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 44818f939e79SAdrian Chadd 44821fdadc0fSAdrian Chadd ath_hal_intrset(ah, sc->sc_imask); 44838f939e79SAdrian Chadd sc->sc_kickpcu = 0; 44841fdadc0fSAdrian Chadd } 4485ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 44861fdadc0fSAdrian Chadd 4487ef27340cSAdrian Chadd /* XXX check this inside of IF_LOCK? */ 448896ff485dSAdrian Chadd if (resched && (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 4489339ccfb3SSam Leffler #ifdef IEEE80211_SUPPORT_SUPERG 449004f19fd6SSam Leffler ieee80211_ff_age_all(ic, 100); 4491339ccfb3SSam Leffler #endif 4492339ccfb3SSam Leffler if (!IFQ_IS_EMPTY(&ifp->if_snd)) 4493cd196bb2SSam Leffler ath_start(ifp); 4494339ccfb3SSam Leffler } 44958cec0ab9SSam Leffler #undef PA2DESC 4496ef27340cSAdrian Chadd 4497ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 4498ef27340cSAdrian Chadd sc->sc_rxproc_cnt--; 4499ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 45005591b213SSam Leffler } 45015591b213SSam Leffler 4502622b3fd2SSam Leffler static void 4503622b3fd2SSam Leffler ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 4504622b3fd2SSam Leffler { 4505622b3fd2SSam Leffler txq->axq_qnum = qnum; 4506339ccfb3SSam Leffler txq->axq_ac = 0; 4507622b3fd2SSam Leffler txq->axq_depth = 0; 450816d4de92SAdrian Chadd txq->axq_aggr_depth = 0; 4509622b3fd2SSam Leffler txq->axq_intrcnt = 0; 4510622b3fd2SSam Leffler txq->axq_link = NULL; 45116b349e5aSAdrian Chadd txq->axq_softc = sc; 45126b349e5aSAdrian Chadd TAILQ_INIT(&txq->axq_q); 45136b349e5aSAdrian Chadd TAILQ_INIT(&txq->axq_tidq); 4514622b3fd2SSam Leffler ATH_TXQ_LOCK_INIT(sc, txq); 4515622b3fd2SSam Leffler } 4516622b3fd2SSam Leffler 45175591b213SSam Leffler /* 4518c42a7b7eSSam Leffler * Setup a h/w transmit queue. 45195591b213SSam Leffler */ 4520c42a7b7eSSam Leffler static struct ath_txq * 4521c42a7b7eSSam Leffler ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 4522c42a7b7eSSam Leffler { 4523c42a7b7eSSam Leffler #define N(a) (sizeof(a)/sizeof(a[0])) 4524c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 4525c42a7b7eSSam Leffler HAL_TXQ_INFO qi; 4526c42a7b7eSSam Leffler int qnum; 4527c42a7b7eSSam Leffler 4528c42a7b7eSSam Leffler memset(&qi, 0, sizeof(qi)); 4529c42a7b7eSSam Leffler qi.tqi_subtype = subtype; 4530c42a7b7eSSam Leffler qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 4531c42a7b7eSSam Leffler qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 4532c42a7b7eSSam Leffler qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 4533c42a7b7eSSam Leffler /* 4534c42a7b7eSSam Leffler * Enable interrupts only for EOL and DESC conditions. 4535c42a7b7eSSam Leffler * We mark tx descriptors to receive a DESC interrupt 4536c42a7b7eSSam Leffler * when a tx queue gets deep; otherwise waiting for the 4537c42a7b7eSSam Leffler * EOL to reap descriptors. Note that this is done to 4538c42a7b7eSSam Leffler * reduce interrupt load and this only defers reaping 4539c42a7b7eSSam Leffler * descriptors, never transmitting frames. Aside from 4540c42a7b7eSSam Leffler * reducing interrupts this also permits more concurrency. 4541c42a7b7eSSam Leffler * The only potential downside is if the tx queue backs 4542c42a7b7eSSam Leffler * up in which case the top half of the kernel may backup 4543c42a7b7eSSam Leffler * due to a lack of tx descriptors. 4544c42a7b7eSSam Leffler */ 4545bd5a9920SSam Leffler qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE; 4546c42a7b7eSSam Leffler qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 4547c42a7b7eSSam Leffler if (qnum == -1) { 4548c42a7b7eSSam Leffler /* 4549c42a7b7eSSam Leffler * NB: don't print a message, this happens 4550a614e076SSam Leffler * normally on parts with too few tx queues 4551c42a7b7eSSam Leffler */ 4552c42a7b7eSSam Leffler return NULL; 4553c42a7b7eSSam Leffler } 4554c42a7b7eSSam Leffler if (qnum >= N(sc->sc_txq)) { 45556891c875SPeter Wemm device_printf(sc->sc_dev, 45566891c875SPeter Wemm "hal qnum %u out of range, max %zu!\n", 4557c42a7b7eSSam Leffler qnum, N(sc->sc_txq)); 4558c42a7b7eSSam Leffler ath_hal_releasetxqueue(ah, qnum); 4559c42a7b7eSSam Leffler return NULL; 4560c42a7b7eSSam Leffler } 4561c42a7b7eSSam Leffler if (!ATH_TXQ_SETUP(sc, qnum)) { 4562622b3fd2SSam Leffler ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 4563c42a7b7eSSam Leffler sc->sc_txqsetup |= 1<<qnum; 4564c42a7b7eSSam Leffler } 4565c42a7b7eSSam Leffler return &sc->sc_txq[qnum]; 4566c42a7b7eSSam Leffler #undef N 4567c42a7b7eSSam Leffler } 4568c42a7b7eSSam Leffler 4569c42a7b7eSSam Leffler /* 4570c42a7b7eSSam Leffler * Setup a hardware data transmit queue for the specified 4571c42a7b7eSSam Leffler * access control. The hal may not support all requested 4572c42a7b7eSSam Leffler * queues in which case it will return a reference to a 4573c42a7b7eSSam Leffler * previously setup queue. We record the mapping from ac's 4574c42a7b7eSSam Leffler * to h/w queues for use by ath_tx_start and also track 4575c42a7b7eSSam Leffler * the set of h/w queues being used to optimize work in the 4576c42a7b7eSSam Leffler * transmit interrupt handler and related routines. 4577c42a7b7eSSam Leffler */ 4578c42a7b7eSSam Leffler static int 4579c42a7b7eSSam Leffler ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 4580c42a7b7eSSam Leffler { 4581c42a7b7eSSam Leffler #define N(a) (sizeof(a)/sizeof(a[0])) 4582c42a7b7eSSam Leffler struct ath_txq *txq; 4583c42a7b7eSSam Leffler 4584c42a7b7eSSam Leffler if (ac >= N(sc->sc_ac2q)) { 45856891c875SPeter Wemm device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 4586c42a7b7eSSam Leffler ac, N(sc->sc_ac2q)); 4587c42a7b7eSSam Leffler return 0; 4588c42a7b7eSSam Leffler } 4589c42a7b7eSSam Leffler txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 4590c42a7b7eSSam Leffler if (txq != NULL) { 4591339ccfb3SSam Leffler txq->axq_ac = ac; 4592c42a7b7eSSam Leffler sc->sc_ac2q[ac] = txq; 4593c42a7b7eSSam Leffler return 1; 4594c42a7b7eSSam Leffler } else 4595c42a7b7eSSam Leffler return 0; 4596c42a7b7eSSam Leffler #undef N 4597c42a7b7eSSam Leffler } 4598c42a7b7eSSam Leffler 4599c42a7b7eSSam Leffler /* 4600c42a7b7eSSam Leffler * Update WME parameters for a transmit queue. 4601c42a7b7eSSam Leffler */ 4602c42a7b7eSSam Leffler static int 4603c42a7b7eSSam Leffler ath_txq_update(struct ath_softc *sc, int ac) 4604c42a7b7eSSam Leffler { 4605c42a7b7eSSam Leffler #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 4606c42a7b7eSSam Leffler #define ATH_TXOP_TO_US(v) (v<<5) 4607b032f27cSSam Leffler struct ifnet *ifp = sc->sc_ifp; 4608b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 4609c42a7b7eSSam Leffler struct ath_txq *txq = sc->sc_ac2q[ac]; 4610c42a7b7eSSam Leffler struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 4611c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 4612c42a7b7eSSam Leffler HAL_TXQ_INFO qi; 4613c42a7b7eSSam Leffler 4614c42a7b7eSSam Leffler ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 4615584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 461610ad9a77SSam Leffler if (sc->sc_tdma) { 461710ad9a77SSam Leffler /* 461810ad9a77SSam Leffler * AIFS is zero so there's no pre-transmit wait. The 461910ad9a77SSam Leffler * burst time defines the slot duration and is configured 462009be6601SSam Leffler * through net80211. The QCU is setup to not do post-xmit 462110ad9a77SSam Leffler * back off, lockout all lower-priority QCU's, and fire 462210ad9a77SSam Leffler * off the DMA beacon alert timer which is setup based 462310ad9a77SSam Leffler * on the slot configuration. 462410ad9a77SSam Leffler */ 462510ad9a77SSam Leffler qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 462610ad9a77SSam Leffler | HAL_TXQ_TXERRINT_ENABLE 462710ad9a77SSam Leffler | HAL_TXQ_TXURNINT_ENABLE 462810ad9a77SSam Leffler | HAL_TXQ_TXEOLINT_ENABLE 462910ad9a77SSam Leffler | HAL_TXQ_DBA_GATED 463010ad9a77SSam Leffler | HAL_TXQ_BACKOFF_DISABLE 463110ad9a77SSam Leffler | HAL_TXQ_ARB_LOCKOUT_GLOBAL 463210ad9a77SSam Leffler ; 463310ad9a77SSam Leffler qi.tqi_aifs = 0; 463410ad9a77SSam Leffler /* XXX +dbaprep? */ 463510ad9a77SSam Leffler qi.tqi_readyTime = sc->sc_tdmaslotlen; 463610ad9a77SSam Leffler qi.tqi_burstTime = qi.tqi_readyTime; 463710ad9a77SSam Leffler } else { 463810ad9a77SSam Leffler #endif 463916d4de92SAdrian Chadd /* 464016d4de92SAdrian Chadd * XXX shouldn't this just use the default flags 464116d4de92SAdrian Chadd * used in the previous queue setup? 464216d4de92SAdrian Chadd */ 464310ad9a77SSam Leffler qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 464410ad9a77SSam Leffler | HAL_TXQ_TXERRINT_ENABLE 464510ad9a77SSam Leffler | HAL_TXQ_TXDESCINT_ENABLE 464610ad9a77SSam Leffler | HAL_TXQ_TXURNINT_ENABLE 46471f25c0f7SAdrian Chadd | HAL_TXQ_TXEOLINT_ENABLE 464810ad9a77SSam Leffler ; 4649c42a7b7eSSam Leffler qi.tqi_aifs = wmep->wmep_aifsn; 4650c42a7b7eSSam Leffler qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 4651c42a7b7eSSam Leffler qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 465210ad9a77SSam Leffler qi.tqi_readyTime = 0; 4653c42a7b7eSSam Leffler qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 4654584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 465510ad9a77SSam Leffler } 465610ad9a77SSam Leffler #endif 465710ad9a77SSam Leffler 465810ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, 465910ad9a77SSam Leffler "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", 466010ad9a77SSam Leffler __func__, txq->axq_qnum, qi.tqi_qflags, 466110ad9a77SSam Leffler qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); 4662c42a7b7eSSam Leffler 4663c42a7b7eSSam Leffler if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 4664b032f27cSSam Leffler if_printf(ifp, "unable to update hardware queue " 4665c42a7b7eSSam Leffler "parameters for %s traffic!\n", 4666c42a7b7eSSam Leffler ieee80211_wme_acnames[ac]); 4667c42a7b7eSSam Leffler return 0; 4668c42a7b7eSSam Leffler } else { 4669c42a7b7eSSam Leffler ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 4670c42a7b7eSSam Leffler return 1; 4671c42a7b7eSSam Leffler } 4672c42a7b7eSSam Leffler #undef ATH_TXOP_TO_US 4673c42a7b7eSSam Leffler #undef ATH_EXPONENT_TO_VALUE 4674c42a7b7eSSam Leffler } 4675c42a7b7eSSam Leffler 4676c42a7b7eSSam Leffler /* 4677c42a7b7eSSam Leffler * Callback from the 802.11 layer to update WME parameters. 4678c42a7b7eSSam Leffler */ 4679c42a7b7eSSam Leffler static int 4680c42a7b7eSSam Leffler ath_wme_update(struct ieee80211com *ic) 4681c42a7b7eSSam Leffler { 4682c42a7b7eSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 4683c42a7b7eSSam Leffler 4684c42a7b7eSSam Leffler return !ath_txq_update(sc, WME_AC_BE) || 4685c42a7b7eSSam Leffler !ath_txq_update(sc, WME_AC_BK) || 4686c42a7b7eSSam Leffler !ath_txq_update(sc, WME_AC_VI) || 4687c42a7b7eSSam Leffler !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 4688c42a7b7eSSam Leffler } 4689c42a7b7eSSam Leffler 4690c42a7b7eSSam Leffler /* 4691c42a7b7eSSam Leffler * Reclaim resources for a setup queue. 4692c42a7b7eSSam Leffler */ 4693c42a7b7eSSam Leffler static void 4694c42a7b7eSSam Leffler ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 4695c42a7b7eSSam Leffler { 4696c42a7b7eSSam Leffler 4697c42a7b7eSSam Leffler ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 4698c42a7b7eSSam Leffler ATH_TXQ_LOCK_DESTROY(txq); 4699c42a7b7eSSam Leffler sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 4700c42a7b7eSSam Leffler } 4701c42a7b7eSSam Leffler 4702c42a7b7eSSam Leffler /* 4703c42a7b7eSSam Leffler * Reclaim all tx queue resources. 4704c42a7b7eSSam Leffler */ 4705c42a7b7eSSam Leffler static void 4706c42a7b7eSSam Leffler ath_tx_cleanup(struct ath_softc *sc) 4707c42a7b7eSSam Leffler { 4708c42a7b7eSSam Leffler int i; 4709c42a7b7eSSam Leffler 4710c42a7b7eSSam Leffler ATH_TXBUF_LOCK_DESTROY(sc); 4711c42a7b7eSSam Leffler for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4712c42a7b7eSSam Leffler if (ATH_TXQ_SETUP(sc, i)) 4713c42a7b7eSSam Leffler ath_tx_cleanupq(sc, &sc->sc_txq[i]); 4714c42a7b7eSSam Leffler } 47155591b213SSam Leffler 471699d258fdSSam Leffler /* 4717ab06fdf2SSam Leffler * Return h/w rate index for an IEEE rate (w/o basic rate bit) 4718ab06fdf2SSam Leffler * using the current rates in sc_rixmap. 47198b5341deSSam Leffler */ 4720b8e788a5SAdrian Chadd int 4721ab06fdf2SSam Leffler ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) 47228b5341deSSam Leffler { 4723ab06fdf2SSam Leffler int rix = sc->sc_rixmap[rate]; 4724ab06fdf2SSam Leffler /* NB: return lowest rix for invalid rate */ 4725ab06fdf2SSam Leffler return (rix == 0xff ? 0 : rix); 47268b5341deSSam Leffler } 47278b5341deSSam Leffler 47289352fb7aSAdrian Chadd static void 47299352fb7aSAdrian Chadd ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts, 47309352fb7aSAdrian Chadd struct ath_buf *bf) 47319352fb7aSAdrian Chadd { 47329352fb7aSAdrian Chadd struct ieee80211_node *ni = bf->bf_node; 47339352fb7aSAdrian Chadd struct ifnet *ifp = sc->sc_ifp; 47349352fb7aSAdrian Chadd struct ieee80211com *ic = ifp->if_l2com; 47359352fb7aSAdrian Chadd int sr, lr, pri; 47369352fb7aSAdrian Chadd 47379352fb7aSAdrian Chadd if (ts->ts_status == 0) { 47389352fb7aSAdrian Chadd u_int8_t txant = ts->ts_antenna; 47399352fb7aSAdrian Chadd sc->sc_stats.ast_ant_tx[txant]++; 47409352fb7aSAdrian Chadd sc->sc_ant_tx[txant]++; 47419352fb7aSAdrian Chadd if (ts->ts_finaltsi != 0) 47429352fb7aSAdrian Chadd sc->sc_stats.ast_tx_altrate++; 47439352fb7aSAdrian Chadd pri = M_WME_GETAC(bf->bf_m); 47449352fb7aSAdrian Chadd if (pri >= WME_AC_VO) 47459352fb7aSAdrian Chadd ic->ic_wme.wme_hipri_traffic++; 47469352fb7aSAdrian Chadd if ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0) 47479352fb7aSAdrian Chadd ni->ni_inact = ni->ni_inact_reload; 47489352fb7aSAdrian Chadd } else { 47499352fb7aSAdrian Chadd if (ts->ts_status & HAL_TXERR_XRETRY) 47509352fb7aSAdrian Chadd sc->sc_stats.ast_tx_xretries++; 47519352fb7aSAdrian Chadd if (ts->ts_status & HAL_TXERR_FIFO) 47529352fb7aSAdrian Chadd sc->sc_stats.ast_tx_fifoerr++; 47539352fb7aSAdrian Chadd if (ts->ts_status & HAL_TXERR_FILT) 47549352fb7aSAdrian Chadd sc->sc_stats.ast_tx_filtered++; 47559352fb7aSAdrian Chadd if (ts->ts_status & HAL_TXERR_XTXOP) 47569352fb7aSAdrian Chadd sc->sc_stats.ast_tx_xtxop++; 47579352fb7aSAdrian Chadd if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) 47589352fb7aSAdrian Chadd sc->sc_stats.ast_tx_timerexpired++; 47599352fb7aSAdrian Chadd 47609352fb7aSAdrian Chadd if (ts->ts_status & HAL_TX_DATA_UNDERRUN) 47619352fb7aSAdrian Chadd sc->sc_stats.ast_tx_data_underrun++; 47629352fb7aSAdrian Chadd if (ts->ts_status & HAL_TX_DELIM_UNDERRUN) 47639352fb7aSAdrian Chadd sc->sc_stats.ast_tx_delim_underrun++; 47649352fb7aSAdrian Chadd 47659352fb7aSAdrian Chadd if (bf->bf_m->m_flags & M_FF) 47669352fb7aSAdrian Chadd sc->sc_stats.ast_ff_txerr++; 47679352fb7aSAdrian Chadd } 47689352fb7aSAdrian Chadd /* XXX when is this valid? */ 47699352fb7aSAdrian Chadd if (ts->ts_status & HAL_TX_DESC_CFG_ERR) 47709352fb7aSAdrian Chadd sc->sc_stats.ast_tx_desccfgerr++; 47719352fb7aSAdrian Chadd 47729352fb7aSAdrian Chadd sr = ts->ts_shortretry; 47739352fb7aSAdrian Chadd lr = ts->ts_longretry; 47749352fb7aSAdrian Chadd sc->sc_stats.ast_tx_shortretry += sr; 47759352fb7aSAdrian Chadd sc->sc_stats.ast_tx_longretry += lr; 47769352fb7aSAdrian Chadd 47779352fb7aSAdrian Chadd } 47789352fb7aSAdrian Chadd 47799352fb7aSAdrian Chadd /* 47809352fb7aSAdrian Chadd * The default completion. If fail is 1, this means 47819352fb7aSAdrian Chadd * "please don't retry the frame, and just return -1 status 47829352fb7aSAdrian Chadd * to the net80211 stack. 47839352fb7aSAdrian Chadd */ 47849352fb7aSAdrian Chadd void 47859352fb7aSAdrian Chadd ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 47869352fb7aSAdrian Chadd { 47879352fb7aSAdrian Chadd struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 47889352fb7aSAdrian Chadd int st; 47899352fb7aSAdrian Chadd 47909352fb7aSAdrian Chadd if (fail == 1) 47919352fb7aSAdrian Chadd st = -1; 47929352fb7aSAdrian Chadd else 47939352fb7aSAdrian Chadd st = ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0) ? 47949352fb7aSAdrian Chadd ts->ts_status : HAL_TXERR_XRETRY; 47959352fb7aSAdrian Chadd 47969352fb7aSAdrian Chadd if (bf->bf_state.bfs_dobaw) 47979352fb7aSAdrian Chadd device_printf(sc->sc_dev, 47989352fb7aSAdrian Chadd "%s: dobaw should've been cleared!\n", __func__); 47999352fb7aSAdrian Chadd if (bf->bf_next != NULL) 48009352fb7aSAdrian Chadd device_printf(sc->sc_dev, 48019352fb7aSAdrian Chadd "%s: bf_next not NULL!\n", __func__); 48029352fb7aSAdrian Chadd 48039352fb7aSAdrian Chadd /* 48049352fb7aSAdrian Chadd * Do any tx complete callback. Note this must 48059352fb7aSAdrian Chadd * be done before releasing the node reference. 48069352fb7aSAdrian Chadd * This will free the mbuf, release the net80211 48079352fb7aSAdrian Chadd * node and recycle the ath_buf. 48089352fb7aSAdrian Chadd */ 48099352fb7aSAdrian Chadd ath_tx_freebuf(sc, bf, st); 48109352fb7aSAdrian Chadd } 48119352fb7aSAdrian Chadd 48129352fb7aSAdrian Chadd /* 4813eb6f0de0SAdrian Chadd * Update rate control with the given completion status. 4814eb6f0de0SAdrian Chadd */ 4815eb6f0de0SAdrian Chadd void 4816eb6f0de0SAdrian Chadd ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 4817eb6f0de0SAdrian Chadd struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, 4818eb6f0de0SAdrian Chadd int nframes, int nbad) 4819eb6f0de0SAdrian Chadd { 4820eb6f0de0SAdrian Chadd struct ath_node *an; 4821eb6f0de0SAdrian Chadd 4822eb6f0de0SAdrian Chadd /* Only for unicast frames */ 4823eb6f0de0SAdrian Chadd if (ni == NULL) 4824eb6f0de0SAdrian Chadd return; 4825eb6f0de0SAdrian Chadd 4826eb6f0de0SAdrian Chadd an = ATH_NODE(ni); 4827eb6f0de0SAdrian Chadd 4828eb6f0de0SAdrian Chadd if ((ts->ts_status & HAL_TXERR_FILT) == 0) { 4829eb6f0de0SAdrian Chadd ATH_NODE_LOCK(an); 4830eb6f0de0SAdrian Chadd ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad); 4831eb6f0de0SAdrian Chadd ATH_NODE_UNLOCK(an); 4832eb6f0de0SAdrian Chadd } 4833eb6f0de0SAdrian Chadd } 4834eb6f0de0SAdrian Chadd 4835eb6f0de0SAdrian Chadd /* 48369352fb7aSAdrian Chadd * Update the busy status of the last frame on the free list. 48379352fb7aSAdrian Chadd * When doing TDMA, the busy flag tracks whether the hardware 48389352fb7aSAdrian Chadd * currently points to this buffer or not, and thus gated DMA 48399352fb7aSAdrian Chadd * may restart by re-reading the last descriptor in this 48409352fb7aSAdrian Chadd * buffer. 48419352fb7aSAdrian Chadd * 48429352fb7aSAdrian Chadd * This should be called in the completion function once one 48439352fb7aSAdrian Chadd * of the buffers has been used. 48449352fb7aSAdrian Chadd */ 48459352fb7aSAdrian Chadd static void 48469352fb7aSAdrian Chadd ath_tx_update_busy(struct ath_softc *sc) 48479352fb7aSAdrian Chadd { 48489352fb7aSAdrian Chadd struct ath_buf *last; 48499352fb7aSAdrian Chadd 48509352fb7aSAdrian Chadd /* 48519352fb7aSAdrian Chadd * Since the last frame may still be marked 48529352fb7aSAdrian Chadd * as ATH_BUF_BUSY, unmark it here before 48539352fb7aSAdrian Chadd * finishing the frame processing. 48549352fb7aSAdrian Chadd * Since we've completed a frame (aggregate 48559352fb7aSAdrian Chadd * or otherwise), the hardware has moved on 48569352fb7aSAdrian Chadd * and is no longer referencing the previous 48579352fb7aSAdrian Chadd * descriptor. 48589352fb7aSAdrian Chadd */ 48599352fb7aSAdrian Chadd ATH_TXBUF_LOCK_ASSERT(sc); 48609352fb7aSAdrian Chadd last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 48619352fb7aSAdrian Chadd if (last != NULL) 48629352fb7aSAdrian Chadd last->bf_flags &= ~ATH_BUF_BUSY; 48639352fb7aSAdrian Chadd } 48649352fb7aSAdrian Chadd 48659352fb7aSAdrian Chadd 486668e8e04eSSam Leffler /* 4867c42a7b7eSSam Leffler * Process completed xmit descriptors from the specified queue. 4868eb6f0de0SAdrian Chadd * Kick the packet scheduler if needed. This can occur from this 4869eb6f0de0SAdrian Chadd * particular task. 4870c42a7b7eSSam Leffler */ 4871d7736e13SSam Leffler static int 487296ff485dSAdrian Chadd ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) 48735591b213SSam Leffler { 48745591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 48759352fb7aSAdrian Chadd struct ath_buf *bf; 48766edf1dc7SAdrian Chadd struct ath_desc *ds; 487765f9edeeSSam Leffler struct ath_tx_status *ts; 48785591b213SSam Leffler struct ieee80211_node *ni; 4879eb6f0de0SAdrian Chadd struct ath_node *an; 48809352fb7aSAdrian Chadd int nacked; 48815591b213SSam Leffler HAL_STATUS status; 48825591b213SSam Leffler 4883c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 4884c42a7b7eSSam Leffler __func__, txq->axq_qnum, 4885c42a7b7eSSam Leffler (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 4886c42a7b7eSSam Leffler txq->axq_link); 4887d7736e13SSam Leffler nacked = 0; 48885591b213SSam Leffler for (;;) { 4889c42a7b7eSSam Leffler ATH_TXQ_LOCK(txq); 4890c42a7b7eSSam Leffler txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 48916b349e5aSAdrian Chadd bf = TAILQ_FIRST(&txq->axq_q); 48925591b213SSam Leffler if (bf == NULL) { 4893c42a7b7eSSam Leffler ATH_TXQ_UNLOCK(txq); 48945591b213SSam Leffler break; 48955591b213SSam Leffler } 48966edf1dc7SAdrian Chadd ds = bf->bf_lastds; /* XXX must be setup correctly! */ 489765f9edeeSSam Leffler ts = &bf->bf_status.ds_txstat; 489865f9edeeSSam Leffler status = ath_hal_txprocdesc(ah, ds, ts); 4899a585a9a1SSam Leffler #ifdef ATH_DEBUG 4900c42a7b7eSSam Leffler if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 49016902009eSSam Leffler ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 49026902009eSSam Leffler status == HAL_OK); 49035591b213SSam Leffler #endif 49045591b213SSam Leffler if (status == HAL_EINPROGRESS) { 4905c42a7b7eSSam Leffler ATH_TXQ_UNLOCK(txq); 49065591b213SSam Leffler break; 49075591b213SSam Leffler } 49086b349e5aSAdrian Chadd ATH_TXQ_REMOVE(txq, bf, bf_list); 4909584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 491010ad9a77SSam Leffler if (txq->axq_depth > 0) { 491110ad9a77SSam Leffler /* 491210ad9a77SSam Leffler * More frames follow. Mark the buffer busy 491310ad9a77SSam Leffler * so it's not re-used while the hardware may 491410ad9a77SSam Leffler * still re-read the link field in the descriptor. 49156edf1dc7SAdrian Chadd * 49166edf1dc7SAdrian Chadd * Use the last buffer in an aggregate as that 49176edf1dc7SAdrian Chadd * is where the hardware may be - intermediate 49186edf1dc7SAdrian Chadd * descriptors won't be "busy". 491910ad9a77SSam Leffler */ 49206edf1dc7SAdrian Chadd bf->bf_last->bf_flags |= ATH_BUF_BUSY; 492110ad9a77SSam Leffler } else 492210ad9a77SSam Leffler #else 4923ebecf802SSam Leffler if (txq->axq_depth == 0) 492410ad9a77SSam Leffler #endif 49251539af1eSSam Leffler txq->axq_link = NULL; 49266edf1dc7SAdrian Chadd if (bf->bf_state.bfs_aggr) 49276edf1dc7SAdrian Chadd txq->axq_aggr_depth--; 49285591b213SSam Leffler 49295591b213SSam Leffler ni = bf->bf_node; 4930c42a7b7eSSam Leffler /* 49319352fb7aSAdrian Chadd * If unicast frame was ack'd update RSSI, 493284784be1SSam Leffler * including the last rx time used to 493384784be1SSam Leffler * workaround phantom bmiss interrupts. 4934d7736e13SSam Leffler */ 49359352fb7aSAdrian Chadd if (ni != NULL && ts->ts_status == 0 && 49369352fb7aSAdrian Chadd ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0)) { 4937d7736e13SSam Leffler nacked++; 493884784be1SSam Leffler sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 493984784be1SSam Leffler ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 494084784be1SSam Leffler ts->ts_rssi); 494184784be1SSam Leffler } 49429352fb7aSAdrian Chadd ATH_TXQ_UNLOCK(txq); 49439352fb7aSAdrian Chadd 49449352fb7aSAdrian Chadd /* If unicast frame, update general statistics */ 49459352fb7aSAdrian Chadd if (ni != NULL) { 4946eb6f0de0SAdrian Chadd an = ATH_NODE(ni); 49479352fb7aSAdrian Chadd /* update statistics */ 49489352fb7aSAdrian Chadd ath_tx_update_stats(sc, ts, bf); 4949d7736e13SSam Leffler } 49509352fb7aSAdrian Chadd 49510a915fadSSam Leffler /* 49529352fb7aSAdrian Chadd * Call the completion handler. 49539352fb7aSAdrian Chadd * The completion handler is responsible for 49549352fb7aSAdrian Chadd * calling the rate control code. 49559352fb7aSAdrian Chadd * 49569352fb7aSAdrian Chadd * Frames with no completion handler get the 49579352fb7aSAdrian Chadd * rate control code called here. 495868e8e04eSSam Leffler */ 49599352fb7aSAdrian Chadd if (bf->bf_comp == NULL) { 49609352fb7aSAdrian Chadd if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 49619352fb7aSAdrian Chadd (bf->bf_txflags & HAL_TXDESC_NOACK) == 0) { 49629352fb7aSAdrian Chadd /* 49639352fb7aSAdrian Chadd * XXX assume this isn't an aggregate 49649352fb7aSAdrian Chadd * frame. 49659352fb7aSAdrian Chadd */ 4966eb6f0de0SAdrian Chadd ath_tx_update_ratectrl(sc, ni, 4967eb6f0de0SAdrian Chadd bf->bf_state.bfs_rc, ts, 4968eb6f0de0SAdrian Chadd bf->bf_state.bfs_pktlen, 1, 4969eb6f0de0SAdrian Chadd (ts->ts_status == 0 ? 0 : 1)); 49705591b213SSam Leffler } 49719352fb7aSAdrian Chadd ath_tx_default_comp(sc, bf, 0); 49729352fb7aSAdrian Chadd } else 49739352fb7aSAdrian Chadd bf->bf_comp(sc, bf, 0); 49745591b213SSam Leffler } 4975339ccfb3SSam Leffler #ifdef IEEE80211_SUPPORT_SUPERG 497668e8e04eSSam Leffler /* 497768e8e04eSSam Leffler * Flush fast-frame staging queue when traffic slows. 497868e8e04eSSam Leffler */ 497968e8e04eSSam Leffler if (txq->axq_depth <= 1) 498004f19fd6SSam Leffler ieee80211_ff_flush(ic, txq->axq_ac); 4981339ccfb3SSam Leffler #endif 4982eb6f0de0SAdrian Chadd 4983eb6f0de0SAdrian Chadd /* Kick the TXQ scheduler */ 4984eb6f0de0SAdrian Chadd if (dosched) { 4985eb6f0de0SAdrian Chadd ATH_TXQ_LOCK(txq); 4986eb6f0de0SAdrian Chadd ath_txq_sched(sc, txq); 4987eb6f0de0SAdrian Chadd ATH_TXQ_UNLOCK(txq); 4988eb6f0de0SAdrian Chadd } 4989eb6f0de0SAdrian Chadd 4990d7736e13SSam Leffler return nacked; 4991d7736e13SSam Leffler } 4992d7736e13SSam Leffler 49938f939e79SAdrian Chadd #define TXQACTIVE(t, q) ( (t) & (1 << (q))) 4994c42a7b7eSSam Leffler 4995c42a7b7eSSam Leffler /* 4996c42a7b7eSSam Leffler * Deferred processing of transmit interrupt; special-cased 4997c42a7b7eSSam Leffler * for a single hardware transmit queue (e.g. 5210 and 5211). 4998c42a7b7eSSam Leffler */ 4999c42a7b7eSSam Leffler static void 5000c42a7b7eSSam Leffler ath_tx_proc_q0(void *arg, int npending) 5001c42a7b7eSSam Leffler { 5002c42a7b7eSSam Leffler struct ath_softc *sc = arg; 5003fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 50048f939e79SAdrian Chadd uint32_t txqs; 5005c42a7b7eSSam Leffler 5006ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 5007ef27340cSAdrian Chadd sc->sc_txproc_cnt++; 50088f939e79SAdrian Chadd txqs = sc->sc_txq_active; 50098f939e79SAdrian Chadd sc->sc_txq_active &= ~txqs; 5010ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 50118f939e79SAdrian Chadd 501296ff485dSAdrian Chadd if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) 50138f939e79SAdrian Chadd /* XXX why is lastrx updated in tx code? */ 5014d7736e13SSam Leffler sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 50158f939e79SAdrian Chadd if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 501696ff485dSAdrian Chadd ath_tx_processq(sc, sc->sc_cabq, 1); 5017e4e7938aSAdrian Chadd IF_LOCK(&ifp->if_snd); 501813f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5019e4e7938aSAdrian Chadd IF_UNLOCK(&ifp->if_snd); 50202e986da5SSam Leffler sc->sc_wd_timer = 0; 50215591b213SSam Leffler 50223e50ec2cSSam Leffler if (sc->sc_softled) 502346d4d74cSSam Leffler ath_led_event(sc, sc->sc_txrix); 50243e50ec2cSSam Leffler 5025ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 5026ef27340cSAdrian Chadd sc->sc_txproc_cnt--; 5027ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 5028ef27340cSAdrian Chadd 50295591b213SSam Leffler ath_start(ifp); 50305591b213SSam Leffler } 50315591b213SSam Leffler 50325591b213SSam Leffler /* 5033c42a7b7eSSam Leffler * Deferred processing of transmit interrupt; special-cased 5034c42a7b7eSSam Leffler * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 50355591b213SSam Leffler */ 50365591b213SSam Leffler static void 5037c42a7b7eSSam Leffler ath_tx_proc_q0123(void *arg, int npending) 5038c42a7b7eSSam Leffler { 5039c42a7b7eSSam Leffler struct ath_softc *sc = arg; 5040fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 5041d7736e13SSam Leffler int nacked; 50428f939e79SAdrian Chadd uint32_t txqs; 50438f939e79SAdrian Chadd 5044ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 5045ef27340cSAdrian Chadd sc->sc_txproc_cnt++; 50468f939e79SAdrian Chadd txqs = sc->sc_txq_active; 50478f939e79SAdrian Chadd sc->sc_txq_active &= ~txqs; 5048ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 5049c42a7b7eSSam Leffler 5050c42a7b7eSSam Leffler /* 5051c42a7b7eSSam Leffler * Process each active queue. 5052c42a7b7eSSam Leffler */ 5053d7736e13SSam Leffler nacked = 0; 50548f939e79SAdrian Chadd if (TXQACTIVE(txqs, 0)) 505596ff485dSAdrian Chadd nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); 50568f939e79SAdrian Chadd if (TXQACTIVE(txqs, 1)) 505796ff485dSAdrian Chadd nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); 50588f939e79SAdrian Chadd if (TXQACTIVE(txqs, 2)) 505996ff485dSAdrian Chadd nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); 50608f939e79SAdrian Chadd if (TXQACTIVE(txqs, 3)) 506196ff485dSAdrian Chadd nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); 50628f939e79SAdrian Chadd if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 506396ff485dSAdrian Chadd ath_tx_processq(sc, sc->sc_cabq, 1); 5064d7736e13SSam Leffler if (nacked) 5065d7736e13SSam Leffler sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 5066c42a7b7eSSam Leffler 5067e4e7938aSAdrian Chadd IF_LOCK(&ifp->if_snd); 506813f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5069e4e7938aSAdrian Chadd IF_UNLOCK(&ifp->if_snd); 50702e986da5SSam Leffler sc->sc_wd_timer = 0; 5071c42a7b7eSSam Leffler 50723e50ec2cSSam Leffler if (sc->sc_softled) 507346d4d74cSSam Leffler ath_led_event(sc, sc->sc_txrix); 50743e50ec2cSSam Leffler 5075ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 5076ef27340cSAdrian Chadd sc->sc_txproc_cnt--; 5077ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 5078ef27340cSAdrian Chadd 5079c42a7b7eSSam Leffler ath_start(ifp); 5080c42a7b7eSSam Leffler } 5081c42a7b7eSSam Leffler 5082c42a7b7eSSam Leffler /* 5083c42a7b7eSSam Leffler * Deferred processing of transmit interrupt. 5084c42a7b7eSSam Leffler */ 5085c42a7b7eSSam Leffler static void 5086c42a7b7eSSam Leffler ath_tx_proc(void *arg, int npending) 5087c42a7b7eSSam Leffler { 5088c42a7b7eSSam Leffler struct ath_softc *sc = arg; 5089fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 5090d7736e13SSam Leffler int i, nacked; 50918f939e79SAdrian Chadd uint32_t txqs; 50928f939e79SAdrian Chadd 5093ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 5094ef27340cSAdrian Chadd sc->sc_txproc_cnt++; 50958f939e79SAdrian Chadd txqs = sc->sc_txq_active; 50968f939e79SAdrian Chadd sc->sc_txq_active &= ~txqs; 5097ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 5098c42a7b7eSSam Leffler 5099c42a7b7eSSam Leffler /* 5100c42a7b7eSSam Leffler * Process each active queue. 5101c42a7b7eSSam Leffler */ 5102d7736e13SSam Leffler nacked = 0; 5103c42a7b7eSSam Leffler for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 51048f939e79SAdrian Chadd if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) 510596ff485dSAdrian Chadd nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); 5106d7736e13SSam Leffler if (nacked) 5107d7736e13SSam Leffler sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 5108c42a7b7eSSam Leffler 5109ef27340cSAdrian Chadd /* XXX check this inside of IF_LOCK? */ 5110e4e7938aSAdrian Chadd IF_LOCK(&ifp->if_snd); 511113f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5112e4e7938aSAdrian Chadd IF_UNLOCK(&ifp->if_snd); 51132e986da5SSam Leffler sc->sc_wd_timer = 0; 5114c42a7b7eSSam Leffler 51153e50ec2cSSam Leffler if (sc->sc_softled) 511646d4d74cSSam Leffler ath_led_event(sc, sc->sc_txrix); 51173e50ec2cSSam Leffler 5118ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 5119ef27340cSAdrian Chadd sc->sc_txproc_cnt--; 5120ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 5121ef27340cSAdrian Chadd 5122c42a7b7eSSam Leffler ath_start(ifp); 5123c42a7b7eSSam Leffler } 512416d4de92SAdrian Chadd #undef TXQACTIVE 5125c42a7b7eSSam Leffler 51269352fb7aSAdrian Chadd /* 51279352fb7aSAdrian Chadd * Return a buffer to the pool and update the 'busy' flag on the 51289352fb7aSAdrian Chadd * previous 'tail' entry. 51299352fb7aSAdrian Chadd * 51309352fb7aSAdrian Chadd * This _must_ only be called when the buffer is involved in a completed 51319352fb7aSAdrian Chadd * TX. The logic is that if it was part of an active TX, the previous 51329352fb7aSAdrian Chadd * buffer on the list is now not involved in a halted TX DMA queue, waiting 51339352fb7aSAdrian Chadd * for restart (eg for TDMA.) 51349352fb7aSAdrian Chadd * 51359352fb7aSAdrian Chadd * The caller must free the mbuf and recycle the node reference. 51369352fb7aSAdrian Chadd */ 51379352fb7aSAdrian Chadd void 51389352fb7aSAdrian Chadd ath_freebuf(struct ath_softc *sc, struct ath_buf *bf) 51399352fb7aSAdrian Chadd { 51409352fb7aSAdrian Chadd bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 51419352fb7aSAdrian Chadd bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); 51429352fb7aSAdrian Chadd 51439352fb7aSAdrian Chadd KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__)); 51449352fb7aSAdrian Chadd KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__)); 51459352fb7aSAdrian Chadd 51469352fb7aSAdrian Chadd ATH_TXBUF_LOCK(sc); 51479352fb7aSAdrian Chadd ath_tx_update_busy(sc); 51489352fb7aSAdrian Chadd TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 51499352fb7aSAdrian Chadd ATH_TXBUF_UNLOCK(sc); 51509352fb7aSAdrian Chadd } 51519352fb7aSAdrian Chadd 51529352fb7aSAdrian Chadd /* 51539352fb7aSAdrian Chadd * This is currently used by ath_tx_draintxq() and 51549352fb7aSAdrian Chadd * ath_tx_tid_free_pkts(). 51559352fb7aSAdrian Chadd * 51569352fb7aSAdrian Chadd * It recycles a single ath_buf. 51579352fb7aSAdrian Chadd */ 51589352fb7aSAdrian Chadd void 51599352fb7aSAdrian Chadd ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status) 51609352fb7aSAdrian Chadd { 51619352fb7aSAdrian Chadd struct ieee80211_node *ni = bf->bf_node; 51629352fb7aSAdrian Chadd struct mbuf *m0 = bf->bf_m; 51639352fb7aSAdrian Chadd 51649352fb7aSAdrian Chadd bf->bf_node = NULL; 51659352fb7aSAdrian Chadd bf->bf_m = NULL; 51669352fb7aSAdrian Chadd 51679352fb7aSAdrian Chadd /* Free the buffer, it's not needed any longer */ 51689352fb7aSAdrian Chadd ath_freebuf(sc, bf); 51699352fb7aSAdrian Chadd 51709352fb7aSAdrian Chadd if (ni != NULL) { 51719352fb7aSAdrian Chadd /* 51729352fb7aSAdrian Chadd * Do any callback and reclaim the node reference. 51739352fb7aSAdrian Chadd */ 51749352fb7aSAdrian Chadd if (m0->m_flags & M_TXCB) 51759352fb7aSAdrian Chadd ieee80211_process_callback(ni, m0, status); 51769352fb7aSAdrian Chadd ieee80211_free_node(ni); 51779352fb7aSAdrian Chadd } 51789352fb7aSAdrian Chadd m_freem(m0); 51799352fb7aSAdrian Chadd 51809352fb7aSAdrian Chadd /* 51819352fb7aSAdrian Chadd * XXX the buffer used to be freed -after-, but the DMA map was 51829352fb7aSAdrian Chadd * freed where ath_freebuf() now is. I've no idea what this 51839352fb7aSAdrian Chadd * will do. 51849352fb7aSAdrian Chadd */ 51859352fb7aSAdrian Chadd } 51869352fb7aSAdrian Chadd 51879352fb7aSAdrian Chadd void 5188c42a7b7eSSam Leffler ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 51895591b213SSam Leffler { 5190a585a9a1SSam Leffler #ifdef ATH_DEBUG 51915591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 5192d2f6ed15SSam Leffler #endif 51935591b213SSam Leffler struct ath_buf *bf; 51947a4c5ed9SSam Leffler u_int ix; 51955591b213SSam Leffler 5196c42a7b7eSSam Leffler /* 5197c42a7b7eSSam Leffler * NB: this assumes output has been stopped and 51985d61b5e8SSam Leffler * we do not need to block ath_tx_proc 5199c42a7b7eSSam Leffler */ 520010ad9a77SSam Leffler ATH_TXBUF_LOCK(sc); 52016b349e5aSAdrian Chadd bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 520210ad9a77SSam Leffler if (bf != NULL) 520310ad9a77SSam Leffler bf->bf_flags &= ~ATH_BUF_BUSY; 520410ad9a77SSam Leffler ATH_TXBUF_UNLOCK(sc); 52059352fb7aSAdrian Chadd 52067a4c5ed9SSam Leffler for (ix = 0;; ix++) { 5207c42a7b7eSSam Leffler ATH_TXQ_LOCK(txq); 52086b349e5aSAdrian Chadd bf = TAILQ_FIRST(&txq->axq_q); 52095591b213SSam Leffler if (bf == NULL) { 5210ebecf802SSam Leffler txq->axq_link = NULL; 5211c42a7b7eSSam Leffler ATH_TXQ_UNLOCK(txq); 52125591b213SSam Leffler break; 52135591b213SSam Leffler } 52146b349e5aSAdrian Chadd ATH_TXQ_REMOVE(txq, bf, bf_list); 52156edf1dc7SAdrian Chadd if (bf->bf_state.bfs_aggr) 52166edf1dc7SAdrian Chadd txq->axq_aggr_depth--; 5217a585a9a1SSam Leffler #ifdef ATH_DEBUG 52184a3ac3fcSSam Leffler if (sc->sc_debug & ATH_DEBUG_RESET) { 5219b032f27cSSam Leffler struct ieee80211com *ic = sc->sc_ifp->if_l2com; 5220b032f27cSSam Leffler 52216902009eSSam Leffler ath_printtxbuf(sc, bf, txq->axq_qnum, ix, 52226edf1dc7SAdrian Chadd ath_hal_txprocdesc(ah, bf->bf_lastds, 522365f9edeeSSam Leffler &bf->bf_status.ds_txstat) == HAL_OK); 5224e40b6ab1SSam Leffler ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), 52254a3ac3fcSSam Leffler bf->bf_m->m_len, 0, -1); 52264a3ac3fcSSam Leffler } 5227a585a9a1SSam Leffler #endif /* ATH_DEBUG */ 522823428eafSSam Leffler /* 52299352fb7aSAdrian Chadd * Since we're now doing magic in the completion 52309352fb7aSAdrian Chadd * functions, we -must- call it for aggregation 52319352fb7aSAdrian Chadd * destinations or BAW tracking will get upset. 523223428eafSSam Leffler */ 52339352fb7aSAdrian Chadd /* 52349352fb7aSAdrian Chadd * Clear ATH_BUF_BUSY; the completion handler 52359352fb7aSAdrian Chadd * will free the buffer. 52369352fb7aSAdrian Chadd */ 52379352fb7aSAdrian Chadd ATH_TXQ_UNLOCK(txq); 523810ad9a77SSam Leffler bf->bf_flags &= ~ATH_BUF_BUSY; 52399352fb7aSAdrian Chadd if (bf->bf_comp) 52409352fb7aSAdrian Chadd bf->bf_comp(sc, bf, 1); 52419352fb7aSAdrian Chadd else 52429352fb7aSAdrian Chadd ath_tx_default_comp(sc, bf, 1); 52435591b213SSam Leffler } 52449352fb7aSAdrian Chadd 5245eb6f0de0SAdrian Chadd /* 5246eb6f0de0SAdrian Chadd * Drain software queued frames which are on 5247eb6f0de0SAdrian Chadd * active TIDs. 5248eb6f0de0SAdrian Chadd */ 5249eb6f0de0SAdrian Chadd ath_tx_txq_drain(sc, txq); 5250c42a7b7eSSam Leffler } 5251c42a7b7eSSam Leffler 5252c42a7b7eSSam Leffler static void 5253c42a7b7eSSam Leffler ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 5254c42a7b7eSSam Leffler { 5255c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 5256c42a7b7eSSam Leffler 5257c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 5258c42a7b7eSSam Leffler __func__, txq->axq_qnum, 52596891c875SPeter Wemm (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 52606891c875SPeter Wemm txq->axq_link); 52614a3ac3fcSSam Leffler (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 5262c42a7b7eSSam Leffler } 5263c42a7b7eSSam Leffler 52642d433424SAdrian Chadd static int 52652d433424SAdrian Chadd ath_stoptxdma(struct ath_softc *sc) 5266c42a7b7eSSam Leffler { 5267c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 5268c42a7b7eSSam Leffler int i; 5269c42a7b7eSSam Leffler 5270c42a7b7eSSam Leffler /* XXX return value */ 52712d433424SAdrian Chadd if (sc->sc_invalid) 52722d433424SAdrian Chadd return 0; 52732d433424SAdrian Chadd 5274c42a7b7eSSam Leffler if (!sc->sc_invalid) { 5275c42a7b7eSSam Leffler /* don't touch the hardware if marked invalid */ 52764a3ac3fcSSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 52774a3ac3fcSSam Leffler __func__, sc->sc_bhalq, 52784a3ac3fcSSam Leffler (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 52794a3ac3fcSSam Leffler NULL); 5280c42a7b7eSSam Leffler (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 5281c42a7b7eSSam Leffler for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 5282c42a7b7eSSam Leffler if (ATH_TXQ_SETUP(sc, i)) 5283c42a7b7eSSam Leffler ath_tx_stopdma(sc, &sc->sc_txq[i]); 5284c42a7b7eSSam Leffler } 52852d433424SAdrian Chadd 52862d433424SAdrian Chadd return 1; 52872d433424SAdrian Chadd } 52882d433424SAdrian Chadd 52892d433424SAdrian Chadd /* 52902d433424SAdrian Chadd * Drain the transmit queues and reclaim resources. 52912d433424SAdrian Chadd */ 52922d433424SAdrian Chadd static void 52932d433424SAdrian Chadd ath_draintxq(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 52942d433424SAdrian Chadd { 52952d433424SAdrian Chadd #ifdef ATH_DEBUG 52962d433424SAdrian Chadd struct ath_hal *ah = sc->sc_ah; 52972d433424SAdrian Chadd #endif 52982d433424SAdrian Chadd struct ifnet *ifp = sc->sc_ifp; 52992d433424SAdrian Chadd int i; 53002d433424SAdrian Chadd 53012d433424SAdrian Chadd (void) ath_stoptxdma(sc); 53022d433424SAdrian Chadd 5303ef27340cSAdrian Chadd for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 5304ef27340cSAdrian Chadd /* 5305ef27340cSAdrian Chadd * XXX TODO: should we just handle the completed TX frames 5306ef27340cSAdrian Chadd * here, whether or not the reset is a full one or not? 5307ef27340cSAdrian Chadd */ 5308ef27340cSAdrian Chadd if (ATH_TXQ_SETUP(sc, i)) { 5309ef27340cSAdrian Chadd if (reset_type == ATH_RESET_NOLOSS) 5310ef27340cSAdrian Chadd ath_tx_processq(sc, &sc->sc_txq[i], 0); 5311ef27340cSAdrian Chadd else 5312c42a7b7eSSam Leffler ath_tx_draintxq(sc, &sc->sc_txq[i]); 5313ef27340cSAdrian Chadd } 5314ef27340cSAdrian Chadd } 53154a3ac3fcSSam Leffler #ifdef ATH_DEBUG 53164a3ac3fcSSam Leffler if (sc->sc_debug & ATH_DEBUG_RESET) { 53176b349e5aSAdrian Chadd struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); 53184a3ac3fcSSam Leffler if (bf != NULL && bf->bf_m != NULL) { 53196902009eSSam Leffler ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, 53206edf1dc7SAdrian Chadd ath_hal_txprocdesc(ah, bf->bf_lastds, 532165f9edeeSSam Leffler &bf->bf_status.ds_txstat) == HAL_OK); 5322e40b6ab1SSam Leffler ieee80211_dump_pkt(ifp->if_l2com, 5323e40b6ab1SSam Leffler mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 5324e40b6ab1SSam Leffler 0, -1); 53254a3ac3fcSSam Leffler } 53264a3ac3fcSSam Leffler } 53274a3ac3fcSSam Leffler #endif /* ATH_DEBUG */ 5328e4e7938aSAdrian Chadd IF_LOCK(&ifp->if_snd); 532913f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5330e4e7938aSAdrian Chadd IF_UNLOCK(&ifp->if_snd); 53312e986da5SSam Leffler sc->sc_wd_timer = 0; 53325591b213SSam Leffler } 53335591b213SSam Leffler 53345591b213SSam Leffler /* 53355591b213SSam Leffler * Disable the receive h/w in preparation for a reset. 53365591b213SSam Leffler */ 53375591b213SSam Leffler static void 53389a842e8bSAdrian Chadd ath_stoprecv(struct ath_softc *sc, int dodelay) 53395591b213SSam Leffler { 53408cec0ab9SSam Leffler #define PA2DESC(_sc, _pa) \ 5341c42a7b7eSSam Leffler ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 5342c42a7b7eSSam Leffler ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 53435591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 53445591b213SSam Leffler 53455591b213SSam Leffler ath_hal_stoppcurecv(ah); /* disable PCU */ 53465591b213SSam Leffler ath_hal_setrxfilter(ah, 0); /* clear recv filter */ 53475591b213SSam Leffler ath_hal_stopdmarecv(ah); /* disable DMA engine */ 53489a842e8bSAdrian Chadd if (dodelay) 5349c42a7b7eSSam Leffler DELAY(3000); /* 3ms is long enough for 1 frame */ 5350a585a9a1SSam Leffler #ifdef ATH_DEBUG 5351c42a7b7eSSam Leffler if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) { 53525591b213SSam Leffler struct ath_buf *bf; 53537a4c5ed9SSam Leffler u_int ix; 53545591b213SSam Leffler 5355e325e530SSam Leffler printf("%s: rx queue %p, link %p\n", __func__, 535630310634SPeter Wemm (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink); 53577a4c5ed9SSam Leffler ix = 0; 53586b349e5aSAdrian Chadd TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 53598cec0ab9SSam Leffler struct ath_desc *ds = bf->bf_desc; 536065f9edeeSSam Leffler struct ath_rx_status *rs = &bf->bf_status.ds_rxstat; 5361c42a7b7eSSam Leffler HAL_STATUS status = ath_hal_rxprocdesc(ah, ds, 536265f9edeeSSam Leffler bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 5363c42a7b7eSSam Leffler if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL)) 53646902009eSSam Leffler ath_printrxbuf(sc, bf, ix, status == HAL_OK); 53657a4c5ed9SSam Leffler ix++; 53665591b213SSam Leffler } 53675591b213SSam Leffler } 53685591b213SSam Leffler #endif 536968e8e04eSSam Leffler if (sc->sc_rxpending != NULL) { 537068e8e04eSSam Leffler m_freem(sc->sc_rxpending); 537168e8e04eSSam Leffler sc->sc_rxpending = NULL; 537268e8e04eSSam Leffler } 53735591b213SSam Leffler sc->sc_rxlink = NULL; /* just in case */ 53748cec0ab9SSam Leffler #undef PA2DESC 53755591b213SSam Leffler } 53765591b213SSam Leffler 53775591b213SSam Leffler /* 53785591b213SSam Leffler * Enable the receive h/w following a reset. 53795591b213SSam Leffler */ 53805591b213SSam Leffler static int 53815591b213SSam Leffler ath_startrecv(struct ath_softc *sc) 53825591b213SSam Leffler { 53835591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 53845591b213SSam Leffler struct ath_buf *bf; 53855591b213SSam Leffler 53865591b213SSam Leffler sc->sc_rxlink = NULL; 538768e8e04eSSam Leffler sc->sc_rxpending = NULL; 53886b349e5aSAdrian Chadd TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 53895591b213SSam Leffler int error = ath_rxbuf_init(sc, bf); 53905591b213SSam Leffler if (error != 0) { 5391c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_RECV, 5392c42a7b7eSSam Leffler "%s: ath_rxbuf_init failed %d\n", 5393c42a7b7eSSam Leffler __func__, error); 53945591b213SSam Leffler return error; 53955591b213SSam Leffler } 53965591b213SSam Leffler } 53975591b213SSam Leffler 53986b349e5aSAdrian Chadd bf = TAILQ_FIRST(&sc->sc_rxbuf); 53995591b213SSam Leffler ath_hal_putrxbuf(ah, bf->bf_daddr); 54005591b213SSam Leffler ath_hal_rxena(ah); /* enable recv descriptors */ 54015591b213SSam Leffler ath_mode_init(sc); /* set filters, etc. */ 54025591b213SSam Leffler ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 54035591b213SSam Leffler return 0; 54045591b213SSam Leffler } 54055591b213SSam Leffler 54065591b213SSam Leffler /* 5407c42a7b7eSSam Leffler * Update internal state after a channel change. 5408c42a7b7eSSam Leffler */ 5409c42a7b7eSSam Leffler static void 5410c42a7b7eSSam Leffler ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 5411c42a7b7eSSam Leffler { 5412c42a7b7eSSam Leffler enum ieee80211_phymode mode; 5413c42a7b7eSSam Leffler 5414c42a7b7eSSam Leffler /* 5415c42a7b7eSSam Leffler * Change channels and update the h/w rate map 5416c42a7b7eSSam Leffler * if we're switching; e.g. 11a to 11b/g. 5417c42a7b7eSSam Leffler */ 541868e8e04eSSam Leffler mode = ieee80211_chan2mode(chan); 5419c42a7b7eSSam Leffler if (mode != sc->sc_curmode) 5420c42a7b7eSSam Leffler ath_setcurmode(sc, mode); 542159efa8b5SSam Leffler sc->sc_curchan = chan; 5422c42a7b7eSSam Leffler } 5423c42a7b7eSSam Leffler 5424c42a7b7eSSam Leffler /* 54255591b213SSam Leffler * Set/change channels. If the channel is really being changed, 54264fa8d4efSDaniel Eischen * it's done by resetting the chip. To accomplish this we must 54275591b213SSam Leffler * first cleanup any pending DMA, then restart stuff after a la 54285591b213SSam Leffler * ath_init. 54295591b213SSam Leffler */ 54305591b213SSam Leffler static int 54315591b213SSam Leffler ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 54325591b213SSam Leffler { 5433b032f27cSSam Leffler struct ifnet *ifp = sc->sc_ifp; 5434b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 54355591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 5436ef27340cSAdrian Chadd int ret = 0; 5437ef27340cSAdrian Chadd 5438ef27340cSAdrian Chadd /* Treat this as an interface reset */ 5439d52f7132SAdrian Chadd ATH_PCU_UNLOCK_ASSERT(sc); 5440d52f7132SAdrian Chadd ATH_UNLOCK_ASSERT(sc); 5441d52f7132SAdrian Chadd 5442d52f7132SAdrian Chadd /* (Try to) stop TX/RX from occuring */ 5443d52f7132SAdrian Chadd taskqueue_block(sc->sc_tq); 5444d52f7132SAdrian Chadd 5445ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 5446e78719adSAdrian Chadd ath_hal_intrset(ah, 0); /* Stop new RX/TX completion */ 5447e78719adSAdrian Chadd ath_txrx_stop_locked(sc); /* Stop pending RX/TX completion */ 5448ee321975SAdrian Chadd if (ath_reset_grablock(sc, 1) == 0) { 5449ee321975SAdrian Chadd device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 5450ef27340cSAdrian Chadd __func__); 5451ee321975SAdrian Chadd } 5452ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 5453c42a7b7eSSam Leffler 545459efa8b5SSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", 545559efa8b5SSam Leffler __func__, ieee80211_chan2ieee(ic, chan), 545659efa8b5SSam Leffler chan->ic_freq, chan->ic_flags); 545759efa8b5SSam Leffler if (chan != sc->sc_curchan) { 5458c42a7b7eSSam Leffler HAL_STATUS status; 54595591b213SSam Leffler /* 54605591b213SSam Leffler * To switch channels clear any pending DMA operations; 54615591b213SSam Leffler * wait long enough for the RX fifo to drain, reset the 54625591b213SSam Leffler * hardware at the new frequency, and then re-enable 54635591b213SSam Leffler * the relevant bits of the h/w. 54645591b213SSam Leffler */ 5465ef27340cSAdrian Chadd #if 0 54665591b213SSam Leffler ath_hal_intrset(ah, 0); /* disable interrupts */ 5467ef27340cSAdrian Chadd #endif 54689a842e8bSAdrian Chadd ath_stoprecv(sc, 1); /* turn off frame recv */ 54699a842e8bSAdrian Chadd /* 54709a842e8bSAdrian Chadd * First, handle completed TX/RX frames. 54719a842e8bSAdrian Chadd */ 54729a842e8bSAdrian Chadd ath_rx_proc(sc, 0); 54739a842e8bSAdrian Chadd ath_draintxq(sc, ATH_RESET_NOLOSS); 54749a842e8bSAdrian Chadd /* 54759a842e8bSAdrian Chadd * Next, flush the non-scheduled frames. 54769a842e8bSAdrian Chadd */ 5477517526efSAdrian Chadd ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */ 54789a842e8bSAdrian Chadd 547959efa8b5SSam Leffler if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) { 5480b032f27cSSam Leffler if_printf(ifp, "%s: unable to reset " 548179649302SGavin Atkinson "channel %u (%u MHz, flags 0x%x), hal status %u\n", 548259efa8b5SSam Leffler __func__, ieee80211_chan2ieee(ic, chan), 548359efa8b5SSam Leffler chan->ic_freq, chan->ic_flags, status); 5484ef27340cSAdrian Chadd ret = EIO; 5485ef27340cSAdrian Chadd goto finish; 54865591b213SSam Leffler } 5487c59005e9SSam Leffler sc->sc_diversity = ath_hal_getdiversity(ah); 5488c42a7b7eSSam Leffler 548948237774SAdrian Chadd /* Let DFS at it in case it's a DFS channel */ 5490398bca2eSAdrian Chadd ath_dfs_radar_enable(sc, chan); 549148237774SAdrian Chadd 54925591b213SSam Leffler /* 54935591b213SSam Leffler * Re-enable rx framework. 54945591b213SSam Leffler */ 54955591b213SSam Leffler if (ath_startrecv(sc) != 0) { 5496b032f27cSSam Leffler if_printf(ifp, "%s: unable to restart recv logic\n", 5497b032f27cSSam Leffler __func__); 5498ef27340cSAdrian Chadd ret = EIO; 5499ef27340cSAdrian Chadd goto finish; 55005591b213SSam Leffler } 55015591b213SSam Leffler 55025591b213SSam Leffler /* 55035591b213SSam Leffler * Change channels and update the h/w rate map 55045591b213SSam Leffler * if we're switching; e.g. 11a to 11b/g. 55055591b213SSam Leffler */ 5506c42a7b7eSSam Leffler ath_chan_change(sc, chan); 55070a915fadSSam Leffler 55080a915fadSSam Leffler /* 55092fd9aabbSAdrian Chadd * Reset clears the beacon timers; reset them 55102fd9aabbSAdrian Chadd * here if needed. 55112fd9aabbSAdrian Chadd */ 55122fd9aabbSAdrian Chadd if (sc->sc_beacons) { /* restart beacons */ 55132fd9aabbSAdrian Chadd #ifdef IEEE80211_SUPPORT_TDMA 55142fd9aabbSAdrian Chadd if (sc->sc_tdma) 55152fd9aabbSAdrian Chadd ath_tdma_config(sc, NULL); 55162fd9aabbSAdrian Chadd else 55172fd9aabbSAdrian Chadd #endif 55182fd9aabbSAdrian Chadd ath_beacon_config(sc, NULL); 55192fd9aabbSAdrian Chadd } 55202fd9aabbSAdrian Chadd 55212fd9aabbSAdrian Chadd /* 55220a915fadSSam Leffler * Re-enable interrupts. 55230a915fadSSam Leffler */ 5524e78719adSAdrian Chadd #if 0 55250a915fadSSam Leffler ath_hal_intrset(ah, sc->sc_imask); 5526ef27340cSAdrian Chadd #endif 55275591b213SSam Leffler } 5528ef27340cSAdrian Chadd 5529ef27340cSAdrian Chadd finish: 5530ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 5531ef27340cSAdrian Chadd sc->sc_inreset_cnt--; 5532ef27340cSAdrian Chadd /* XXX only do this if sc_inreset_cnt == 0? */ 5533ef27340cSAdrian Chadd ath_hal_intrset(ah, sc->sc_imask); 5534ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 5535ef27340cSAdrian Chadd 5536e4e7938aSAdrian Chadd IF_LOCK(&ifp->if_snd); 5537ef27340cSAdrian Chadd ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5538e4e7938aSAdrian Chadd IF_UNLOCK(&ifp->if_snd); 5539ef27340cSAdrian Chadd ath_txrx_start(sc); 5540ef27340cSAdrian Chadd /* XXX ath_start? */ 5541ef27340cSAdrian Chadd 5542ef27340cSAdrian Chadd return ret; 55435591b213SSam Leffler } 55445591b213SSam Leffler 55455591b213SSam Leffler /* 55465591b213SSam Leffler * Periodically recalibrate the PHY to account 55475591b213SSam Leffler * for temperature/environment changes. 55485591b213SSam Leffler */ 55495591b213SSam Leffler static void 55505591b213SSam Leffler ath_calibrate(void *arg) 55515591b213SSam Leffler { 55525591b213SSam Leffler struct ath_softc *sc = arg; 55535591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 55542dc7fcc4SSam Leffler struct ifnet *ifp = sc->sc_ifp; 55558d91de92SSam Leffler struct ieee80211com *ic = ifp->if_l2com; 55562dc7fcc4SSam Leffler HAL_BOOL longCal, isCalDone; 5557a108ab63SAdrian Chadd HAL_BOOL aniCal, shortCal = AH_FALSE; 55582dc7fcc4SSam Leffler int nextcal; 55595591b213SSam Leffler 55608d91de92SSam Leffler if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ 55618d91de92SSam Leffler goto restart; 55622dc7fcc4SSam Leffler longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); 5563a108ab63SAdrian Chadd aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); 5564a108ab63SAdrian Chadd if (sc->sc_doresetcal) 5565a108ab63SAdrian Chadd shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); 5566a108ab63SAdrian Chadd 5567a108ab63SAdrian Chadd DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); 5568a108ab63SAdrian Chadd if (aniCal) { 5569a108ab63SAdrian Chadd sc->sc_stats.ast_ani_cal++; 5570a108ab63SAdrian Chadd sc->sc_lastani = ticks; 5571a108ab63SAdrian Chadd ath_hal_ani_poll(ah, sc->sc_curchan); 5572a108ab63SAdrian Chadd } 5573a108ab63SAdrian Chadd 55742dc7fcc4SSam Leffler if (longCal) { 55755591b213SSam Leffler sc->sc_stats.ast_per_cal++; 55768197f57eSAdrian Chadd sc->sc_lastlongcal = ticks; 55775591b213SSam Leffler if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 55785591b213SSam Leffler /* 55795591b213SSam Leffler * Rfgain is out of bounds, reset the chip 55805591b213SSam Leffler * to load new gain values. 55815591b213SSam Leffler */ 5582370572d9SSam Leffler DPRINTF(sc, ATH_DEBUG_CALIBRATE, 5583370572d9SSam Leffler "%s: rfgain change\n", __func__); 55845591b213SSam Leffler sc->sc_stats.ast_per_rfgain++; 5585ef27340cSAdrian Chadd sc->sc_resetcal = 0; 5586ef27340cSAdrian Chadd sc->sc_doresetcal = AH_TRUE; 5587d52f7132SAdrian Chadd taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 5588d52f7132SAdrian Chadd callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 5589ef27340cSAdrian Chadd return; 55905591b213SSam Leffler } 55912dc7fcc4SSam Leffler /* 55922dc7fcc4SSam Leffler * If this long cal is after an idle period, then 55932dc7fcc4SSam Leffler * reset the data collection state so we start fresh. 55942dc7fcc4SSam Leffler */ 55952dc7fcc4SSam Leffler if (sc->sc_resetcal) { 559659efa8b5SSam Leffler (void) ath_hal_calreset(ah, sc->sc_curchan); 55972dc7fcc4SSam Leffler sc->sc_lastcalreset = ticks; 5598a108ab63SAdrian Chadd sc->sc_lastshortcal = ticks; 55992dc7fcc4SSam Leffler sc->sc_resetcal = 0; 5600a108ab63SAdrian Chadd sc->sc_doresetcal = AH_TRUE; 56012dc7fcc4SSam Leffler } 56022dc7fcc4SSam Leffler } 5603a108ab63SAdrian Chadd 5604a108ab63SAdrian Chadd /* Only call if we're doing a short/long cal, not for ANI calibration */ 5605a108ab63SAdrian Chadd if (shortCal || longCal) { 560659efa8b5SSam Leffler if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { 56072dc7fcc4SSam Leffler if (longCal) { 56082dc7fcc4SSam Leffler /* 56092dc7fcc4SSam Leffler * Calibrate noise floor data again in case of change. 56102dc7fcc4SSam Leffler */ 56112dc7fcc4SSam Leffler ath_hal_process_noisefloor(ah); 56122dc7fcc4SSam Leffler } 56132dc7fcc4SSam Leffler } else { 5614c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, 5615c42a7b7eSSam Leffler "%s: calibration of channel %u failed\n", 561659efa8b5SSam Leffler __func__, sc->sc_curchan->ic_freq); 56175591b213SSam Leffler sc->sc_stats.ast_per_calfail++; 56185591b213SSam Leffler } 5619a108ab63SAdrian Chadd if (shortCal) 5620a108ab63SAdrian Chadd sc->sc_lastshortcal = ticks; 5621a108ab63SAdrian Chadd } 56222dc7fcc4SSam Leffler if (!isCalDone) { 56238d91de92SSam Leffler restart: 56247b0c77ecSSam Leffler /* 56252dc7fcc4SSam Leffler * Use a shorter interval to potentially collect multiple 56262dc7fcc4SSam Leffler * data samples required to complete calibration. Once 56272dc7fcc4SSam Leffler * we're told the work is done we drop back to a longer 56282dc7fcc4SSam Leffler * interval between requests. We're more aggressive doing 56292dc7fcc4SSam Leffler * work when operating as an AP to improve operation right 56302dc7fcc4SSam Leffler * after startup. 56317b0c77ecSSam Leffler */ 5632a108ab63SAdrian Chadd sc->sc_lastshortcal = ticks; 5633a108ab63SAdrian Chadd nextcal = ath_shortcalinterval*hz/1000; 56342dc7fcc4SSam Leffler if (sc->sc_opmode != HAL_M_HOSTAP) 56352dc7fcc4SSam Leffler nextcal *= 10; 5636a108ab63SAdrian Chadd sc->sc_doresetcal = AH_TRUE; 56372dc7fcc4SSam Leffler } else { 5638a108ab63SAdrian Chadd /* nextcal should be the shortest time for next event */ 56392dc7fcc4SSam Leffler nextcal = ath_longcalinterval*hz; 56402dc7fcc4SSam Leffler if (sc->sc_lastcalreset == 0) 56412dc7fcc4SSam Leffler sc->sc_lastcalreset = sc->sc_lastlongcal; 56422dc7fcc4SSam Leffler else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) 56432dc7fcc4SSam Leffler sc->sc_resetcal = 1; /* setup reset next trip */ 5644a108ab63SAdrian Chadd sc->sc_doresetcal = AH_FALSE; 5645bd5a9920SSam Leffler } 5646a108ab63SAdrian Chadd /* ANI calibration may occur more often than short/long/resetcal */ 5647a108ab63SAdrian Chadd if (ath_anicalinterval > 0) 5648a108ab63SAdrian Chadd nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); 5649bd5a9920SSam Leffler 56502dc7fcc4SSam Leffler if (nextcal != 0) { 56512dc7fcc4SSam Leffler DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", 56522dc7fcc4SSam Leffler __func__, nextcal, isCalDone ? "" : "!"); 56532dc7fcc4SSam Leffler callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); 56542dc7fcc4SSam Leffler } else { 56552dc7fcc4SSam Leffler DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", 56562dc7fcc4SSam Leffler __func__); 56572dc7fcc4SSam Leffler /* NB: don't rearm timer */ 56582dc7fcc4SSam Leffler } 56595591b213SSam Leffler } 56605591b213SSam Leffler 566168e8e04eSSam Leffler static void 566268e8e04eSSam Leffler ath_scan_start(struct ieee80211com *ic) 566368e8e04eSSam Leffler { 566468e8e04eSSam Leffler struct ifnet *ifp = ic->ic_ifp; 566568e8e04eSSam Leffler struct ath_softc *sc = ifp->if_softc; 566668e8e04eSSam Leffler struct ath_hal *ah = sc->sc_ah; 566768e8e04eSSam Leffler u_int32_t rfilt; 566868e8e04eSSam Leffler 566968e8e04eSSam Leffler /* XXX calibration timer? */ 567068e8e04eSSam Leffler 5671c98cefc5SAdrian Chadd ATH_LOCK(sc); 567268e8e04eSSam Leffler sc->sc_scanning = 1; 567368e8e04eSSam Leffler sc->sc_syncbeacon = 0; 567468e8e04eSSam Leffler rfilt = ath_calcrxfilter(sc); 5675c98cefc5SAdrian Chadd ATH_UNLOCK(sc); 5676c98cefc5SAdrian Chadd 5677c98cefc5SAdrian Chadd ATH_PCU_LOCK(sc); 567868e8e04eSSam Leffler ath_hal_setrxfilter(ah, rfilt); 567968e8e04eSSam Leffler ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0); 5680c98cefc5SAdrian Chadd ATH_PCU_UNLOCK(sc); 568168e8e04eSSam Leffler 568268e8e04eSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", 568368e8e04eSSam Leffler __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr)); 568468e8e04eSSam Leffler } 568568e8e04eSSam Leffler 568668e8e04eSSam Leffler static void 568768e8e04eSSam Leffler ath_scan_end(struct ieee80211com *ic) 568868e8e04eSSam Leffler { 568968e8e04eSSam Leffler struct ifnet *ifp = ic->ic_ifp; 569068e8e04eSSam Leffler struct ath_softc *sc = ifp->if_softc; 569168e8e04eSSam Leffler struct ath_hal *ah = sc->sc_ah; 569268e8e04eSSam Leffler u_int32_t rfilt; 569368e8e04eSSam Leffler 5694c98cefc5SAdrian Chadd ATH_LOCK(sc); 569568e8e04eSSam Leffler sc->sc_scanning = 0; 569668e8e04eSSam Leffler rfilt = ath_calcrxfilter(sc); 5697c98cefc5SAdrian Chadd ATH_UNLOCK(sc); 5698c98cefc5SAdrian Chadd 5699c98cefc5SAdrian Chadd ATH_PCU_LOCK(sc); 570068e8e04eSSam Leffler ath_hal_setrxfilter(ah, rfilt); 570168e8e04eSSam Leffler ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 570268e8e04eSSam Leffler 570368e8e04eSSam Leffler ath_hal_process_noisefloor(ah); 5704c98cefc5SAdrian Chadd ATH_PCU_UNLOCK(sc); 570568e8e04eSSam Leffler 570668e8e04eSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 570768e8e04eSSam Leffler __func__, rfilt, ether_sprintf(sc->sc_curbssid), 570868e8e04eSSam Leffler sc->sc_curaid); 570968e8e04eSSam Leffler } 571068e8e04eSSam Leffler 571168e8e04eSSam Leffler static void 571268e8e04eSSam Leffler ath_set_channel(struct ieee80211com *ic) 571368e8e04eSSam Leffler { 571468e8e04eSSam Leffler struct ifnet *ifp = ic->ic_ifp; 571568e8e04eSSam Leffler struct ath_softc *sc = ifp->if_softc; 571668e8e04eSSam Leffler 571768e8e04eSSam Leffler (void) ath_chan_set(sc, ic->ic_curchan); 571868e8e04eSSam Leffler /* 571968e8e04eSSam Leffler * If we are returning to our bss channel then mark state 572068e8e04eSSam Leffler * so the next recv'd beacon's tsf will be used to sync the 572168e8e04eSSam Leffler * beacon timers. Note that since we only hear beacons in 572268e8e04eSSam Leffler * sta/ibss mode this has no effect in other operating modes. 572368e8e04eSSam Leffler */ 5724a887b1e3SAdrian Chadd ATH_LOCK(sc); 572568e8e04eSSam Leffler if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) 572668e8e04eSSam Leffler sc->sc_syncbeacon = 1; 5727a887b1e3SAdrian Chadd ATH_UNLOCK(sc); 572868e8e04eSSam Leffler } 572968e8e04eSSam Leffler 5730b032f27cSSam Leffler /* 5731b032f27cSSam Leffler * Walk the vap list and check if there any vap's in RUN state. 5732b032f27cSSam Leffler */ 57335591b213SSam Leffler static int 5734b032f27cSSam Leffler ath_isanyrunningvaps(struct ieee80211vap *this) 57355591b213SSam Leffler { 5736b032f27cSSam Leffler struct ieee80211com *ic = this->iv_ic; 5737b032f27cSSam Leffler struct ieee80211vap *vap; 5738b032f27cSSam Leffler 5739b032f27cSSam Leffler IEEE80211_LOCK_ASSERT(ic); 5740b032f27cSSam Leffler 5741b032f27cSSam Leffler TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 5742309a3e45SSam Leffler if (vap != this && vap->iv_state >= IEEE80211_S_RUN) 5743b032f27cSSam Leffler return 1; 5744b032f27cSSam Leffler } 5745b032f27cSSam Leffler return 0; 5746b032f27cSSam Leffler } 5747b032f27cSSam Leffler 5748b032f27cSSam Leffler static int 5749b032f27cSSam Leffler ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 5750b032f27cSSam Leffler { 5751b032f27cSSam Leffler struct ieee80211com *ic = vap->iv_ic; 5752b032f27cSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 5753b032f27cSSam Leffler struct ath_vap *avp = ATH_VAP(vap); 575445bbf62fSSam Leffler struct ath_hal *ah = sc->sc_ah; 5755b032f27cSSam Leffler struct ieee80211_node *ni = NULL; 575668e8e04eSSam Leffler int i, error, stamode; 57575591b213SSam Leffler u_int32_t rfilt; 5758f52efb6dSAdrian Chadd int csa_run_transition = 0; 57595591b213SSam Leffler static const HAL_LED_STATE leds[] = { 57605591b213SSam Leffler HAL_LED_INIT, /* IEEE80211_S_INIT */ 57615591b213SSam Leffler HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 57625591b213SSam Leffler HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 57635591b213SSam Leffler HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 576477d5e068SSam Leffler HAL_LED_RUN, /* IEEE80211_S_CAC */ 57655591b213SSam Leffler HAL_LED_RUN, /* IEEE80211_S_RUN */ 576677d5e068SSam Leffler HAL_LED_RUN, /* IEEE80211_S_CSA */ 576777d5e068SSam Leffler HAL_LED_RUN, /* IEEE80211_S_SLEEP */ 57685591b213SSam Leffler }; 57695591b213SSam Leffler 5770c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 5771b032f27cSSam Leffler ieee80211_state_name[vap->iv_state], 5772c42a7b7eSSam Leffler ieee80211_state_name[nstate]); 57735591b213SSam Leffler 5774107fdf96SAdrian Chadd /* 5775107fdf96SAdrian Chadd * net80211 _should_ have the comlock asserted at this point. 5776107fdf96SAdrian Chadd * There are some comments around the calls to vap->iv_newstate 5777107fdf96SAdrian Chadd * which indicate that it (newstate) may end up dropping the 5778107fdf96SAdrian Chadd * lock. This and the subsequent lock assert check after newstate 5779107fdf96SAdrian Chadd * are an attempt to catch these and figure out how/why. 5780107fdf96SAdrian Chadd */ 5781107fdf96SAdrian Chadd IEEE80211_LOCK_ASSERT(ic); 5782107fdf96SAdrian Chadd 5783f52efb6dSAdrian Chadd if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) 5784f52efb6dSAdrian Chadd csa_run_transition = 1; 5785f52efb6dSAdrian Chadd 57862e986da5SSam Leffler callout_drain(&sc->sc_cal_ch); 57875591b213SSam Leffler ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 57885591b213SSam Leffler 5789b032f27cSSam Leffler if (nstate == IEEE80211_S_SCAN) { 579058769f58SSam Leffler /* 5791b032f27cSSam Leffler * Scanning: turn off beacon miss and don't beacon. 5792b032f27cSSam Leffler * Mark beacon state so when we reach RUN state we'll 5793b032f27cSSam Leffler * [re]setup beacons. Unblock the task q thread so 5794b032f27cSSam Leffler * deferred interrupt processing is done. 579558769f58SSam Leffler */ 5796b032f27cSSam Leffler ath_hal_intrset(ah, 5797b032f27cSSam Leffler sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 57985591b213SSam Leffler sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5799b032f27cSSam Leffler sc->sc_beacons = 0; 5800b032f27cSSam Leffler taskqueue_unblock(sc->sc_tq); 58015591b213SSam Leffler } 58025591b213SSam Leffler 580380767531SAdrian Chadd ni = ieee80211_ref_node(vap->iv_bss); 580468e8e04eSSam Leffler rfilt = ath_calcrxfilter(sc); 5805b032f27cSSam Leffler stamode = (vap->iv_opmode == IEEE80211_M_STA || 58067b916f89SSam Leffler vap->iv_opmode == IEEE80211_M_AHDEMO || 5807b032f27cSSam Leffler vap->iv_opmode == IEEE80211_M_IBSS); 580868e8e04eSSam Leffler if (stamode && nstate == IEEE80211_S_RUN) { 580968e8e04eSSam Leffler sc->sc_curaid = ni->ni_associd; 581068e8e04eSSam Leffler IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); 5811b032f27cSSam Leffler ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5812b032f27cSSam Leffler } 581368e8e04eSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5814b032f27cSSam Leffler __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); 581568e8e04eSSam Leffler ath_hal_setrxfilter(ah, rfilt); 581668e8e04eSSam Leffler 5817b032f27cSSam Leffler /* XXX is this to restore keycache on resume? */ 5818b032f27cSSam Leffler if (vap->iv_opmode != IEEE80211_M_STA && 5819b032f27cSSam Leffler (vap->iv_flags & IEEE80211_F_PRIVACY)) { 58205591b213SSam Leffler for (i = 0; i < IEEE80211_WEP_NKID; i++) 58215591b213SSam Leffler if (ath_hal_keyisvalid(ah, i)) 582268e8e04eSSam Leffler ath_hal_keysetmac(ah, i, ni->ni_bssid); 58235591b213SSam Leffler } 5824b032f27cSSam Leffler 5825b032f27cSSam Leffler /* 5826b032f27cSSam Leffler * Invoke the parent method to do net80211 work. 5827b032f27cSSam Leffler */ 5828b032f27cSSam Leffler error = avp->av_newstate(vap, nstate, arg); 5829b032f27cSSam Leffler if (error != 0) 5830b032f27cSSam Leffler goto bad; 5831c42a7b7eSSam Leffler 5832107fdf96SAdrian Chadd /* 5833107fdf96SAdrian Chadd * See above: ensure av_newstate() doesn't drop the lock 5834107fdf96SAdrian Chadd * on us. 5835107fdf96SAdrian Chadd */ 5836107fdf96SAdrian Chadd IEEE80211_LOCK_ASSERT(ic); 5837107fdf96SAdrian Chadd 583868e8e04eSSam Leffler if (nstate == IEEE80211_S_RUN) { 5839b032f27cSSam Leffler /* NB: collect bss node again, it may have changed */ 584080767531SAdrian Chadd ieee80211_free_node(ni); 584180767531SAdrian Chadd ni = ieee80211_ref_node(vap->iv_bss); 58425591b213SSam Leffler 5843b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, 5844b032f27cSSam Leffler "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " 5845b032f27cSSam Leffler "capinfo 0x%04x chan %d\n", __func__, 5846b032f27cSSam Leffler vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), 5847b032f27cSSam Leffler ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); 5848b032f27cSSam Leffler 5849b032f27cSSam Leffler switch (vap->iv_opmode) { 5850584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 585110ad9a77SSam Leffler case IEEE80211_M_AHDEMO: 585210ad9a77SSam Leffler if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) 585310ad9a77SSam Leffler break; 585410ad9a77SSam Leffler /* fall thru... */ 585510ad9a77SSam Leffler #endif 5856e8fd88a3SSam Leffler case IEEE80211_M_HOSTAP: 5857e8fd88a3SSam Leffler case IEEE80211_M_IBSS: 585859aa14a9SRui Paulo case IEEE80211_M_MBSS: 58595591b213SSam Leffler /* 5860e8fd88a3SSam Leffler * Allocate and setup the beacon frame. 5861e8fd88a3SSam Leffler * 5862f818612bSSam Leffler * Stop any previous beacon DMA. This may be 5863f818612bSSam Leffler * necessary, for example, when an ibss merge 5864f818612bSSam Leffler * causes reconfiguration; there will be a state 5865f818612bSSam Leffler * transition from RUN->RUN that means we may 5866f818612bSSam Leffler * be called with beacon transmission active. 5867f818612bSSam Leffler */ 5868f818612bSSam Leffler ath_hal_stoptxdma(ah, sc->sc_bhalq); 5869b032f27cSSam Leffler 58705591b213SSam Leffler error = ath_beacon_alloc(sc, ni); 58715591b213SSam Leffler if (error != 0) 58725591b213SSam Leffler goto bad; 58737a04dc27SSam Leffler /* 587480d939bfSSam Leffler * If joining an adhoc network defer beacon timer 587580d939bfSSam Leffler * configuration to the next beacon frame so we 587680d939bfSSam Leffler * have a current TSF to use. Otherwise we're 5877b032f27cSSam Leffler * starting an ibss/bss so there's no need to delay; 5878b032f27cSSam Leffler * if this is the first vap moving to RUN state, then 5879b032f27cSSam Leffler * beacon state needs to be [re]configured. 58807a04dc27SSam Leffler */ 5881b032f27cSSam Leffler if (vap->iv_opmode == IEEE80211_M_IBSS && 5882b032f27cSSam Leffler ni->ni_tstamp.tsf != 0) { 588380d939bfSSam Leffler sc->sc_syncbeacon = 1; 5884b032f27cSSam Leffler } else if (!sc->sc_beacons) { 5885584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 588610ad9a77SSam Leffler if (vap->iv_caps & IEEE80211_C_TDMA) 588710ad9a77SSam Leffler ath_tdma_config(sc, vap); 588810ad9a77SSam Leffler else 588910ad9a77SSam Leffler #endif 5890b032f27cSSam Leffler ath_beacon_config(sc, vap); 5891b032f27cSSam Leffler sc->sc_beacons = 1; 5892b032f27cSSam Leffler } 5893e8fd88a3SSam Leffler break; 5894e8fd88a3SSam Leffler case IEEE80211_M_STA: 5895e8fd88a3SSam Leffler /* 589680d939bfSSam Leffler * Defer beacon timer configuration to the next 589780d939bfSSam Leffler * beacon frame so we have a current TSF to use 589880d939bfSSam Leffler * (any TSF collected when scanning is likely old). 5899f52efb6dSAdrian Chadd * However if it's due to a CSA -> RUN transition, 5900f52efb6dSAdrian Chadd * force a beacon update so we pick up a lack of 5901f52efb6dSAdrian Chadd * beacons from an AP in CAC and thus force a 5902f52efb6dSAdrian Chadd * scan. 59037a04dc27SSam Leffler */ 590480d939bfSSam Leffler sc->sc_syncbeacon = 1; 5905f52efb6dSAdrian Chadd if (csa_run_transition) 5906f52efb6dSAdrian Chadd ath_beacon_config(sc, vap); 5907e8fd88a3SSam Leffler break; 5908b032f27cSSam Leffler case IEEE80211_M_MONITOR: 5909b032f27cSSam Leffler /* 5910b032f27cSSam Leffler * Monitor mode vaps have only INIT->RUN and RUN->RUN 5911b032f27cSSam Leffler * transitions so we must re-enable interrupts here to 5912b032f27cSSam Leffler * handle the case of a single monitor mode vap. 5913b032f27cSSam Leffler */ 5914b032f27cSSam Leffler ath_hal_intrset(ah, sc->sc_imask); 5915b032f27cSSam Leffler break; 5916b032f27cSSam Leffler case IEEE80211_M_WDS: 5917b032f27cSSam Leffler break; 5918e8fd88a3SSam Leffler default: 5919e8fd88a3SSam Leffler break; 59205591b213SSam Leffler } 59215591b213SSam Leffler /* 59227b0c77ecSSam Leffler * Let the hal process statistics collected during a 59237b0c77ecSSam Leffler * scan so it can provide calibrated noise floor data. 59247b0c77ecSSam Leffler */ 59257b0c77ecSSam Leffler ath_hal_process_noisefloor(ah); 59267b0c77ecSSam Leffler /* 5927ffa2cab6SSam Leffler * Reset rssi stats; maybe not the best place... 5928ffa2cab6SSam Leffler */ 5929ffa2cab6SSam Leffler sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 5930ffa2cab6SSam Leffler sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 5931ffa2cab6SSam Leffler sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 593245bbf62fSSam Leffler /* 5933b032f27cSSam Leffler * Finally, start any timers and the task q thread 5934b032f27cSSam Leffler * (in case we didn't go through SCAN state). 593545bbf62fSSam Leffler */ 59362dc7fcc4SSam Leffler if (ath_longcalinterval != 0) { 5937c42a7b7eSSam Leffler /* start periodic recalibration timer */ 59382dc7fcc4SSam Leffler callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 59392dc7fcc4SSam Leffler } else { 59402dc7fcc4SSam Leffler DPRINTF(sc, ATH_DEBUG_CALIBRATE, 59412dc7fcc4SSam Leffler "%s: calibration disabled\n", __func__); 5942c42a7b7eSSam Leffler } 5943b032f27cSSam Leffler taskqueue_unblock(sc->sc_tq); 5944b032f27cSSam Leffler } else if (nstate == IEEE80211_S_INIT) { 5945b032f27cSSam Leffler /* 5946b032f27cSSam Leffler * If there are no vaps left in RUN state then 5947b032f27cSSam Leffler * shutdown host/driver operation: 5948b032f27cSSam Leffler * o disable interrupts 5949b032f27cSSam Leffler * o disable the task queue thread 5950b032f27cSSam Leffler * o mark beacon processing as stopped 5951b032f27cSSam Leffler */ 5952b032f27cSSam Leffler if (!ath_isanyrunningvaps(vap)) { 5953b032f27cSSam Leffler sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5954b032f27cSSam Leffler /* disable interrupts */ 5955b032f27cSSam Leffler ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 5956b032f27cSSam Leffler taskqueue_block(sc->sc_tq); 5957b032f27cSSam Leffler sc->sc_beacons = 0; 5958b032f27cSSam Leffler } 5959584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 596010ad9a77SSam Leffler ath_hal_setcca(ah, AH_TRUE); 596110ad9a77SSam Leffler #endif 5962b032f27cSSam Leffler } 59635591b213SSam Leffler bad: 596480767531SAdrian Chadd ieee80211_free_node(ni); 59655591b213SSam Leffler return error; 59665591b213SSam Leffler } 59675591b213SSam Leffler 59685591b213SSam Leffler /* 5969e8fd88a3SSam Leffler * Allocate a key cache slot to the station so we can 5970e8fd88a3SSam Leffler * setup a mapping from key index to node. The key cache 5971e8fd88a3SSam Leffler * slot is needed for managing antenna state and for 5972e8fd88a3SSam Leffler * compression when stations do not use crypto. We do 5973e8fd88a3SSam Leffler * it uniliaterally here; if crypto is employed this slot 5974e8fd88a3SSam Leffler * will be reassigned. 5975e8fd88a3SSam Leffler */ 5976e8fd88a3SSam Leffler static void 5977e8fd88a3SSam Leffler ath_setup_stationkey(struct ieee80211_node *ni) 5978e8fd88a3SSam Leffler { 5979b032f27cSSam Leffler struct ieee80211vap *vap = ni->ni_vap; 5980b032f27cSSam Leffler struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 5981c1225b52SSam Leffler ieee80211_keyix keyix, rxkeyix; 5982e8fd88a3SSam Leffler 598380767531SAdrian Chadd /* XXX should take a locked ref to vap->iv_bss */ 5984b032f27cSSam Leffler if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 5985e8fd88a3SSam Leffler /* 5986e8fd88a3SSam Leffler * Key cache is full; we'll fall back to doing 5987e8fd88a3SSam Leffler * the more expensive lookup in software. Note 5988e8fd88a3SSam Leffler * this also means no h/w compression. 5989e8fd88a3SSam Leffler */ 5990e8fd88a3SSam Leffler /* XXX msg+statistic */ 5991e8fd88a3SSam Leffler } else { 5992c1225b52SSam Leffler /* XXX locking? */ 5993e8fd88a3SSam Leffler ni->ni_ucastkey.wk_keyix = keyix; 5994c1225b52SSam Leffler ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 599533052833SSam Leffler /* NB: must mark device key to get called back on delete */ 599633052833SSam Leffler ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; 5997d3ac945bSSam Leffler IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); 5998e8fd88a3SSam Leffler /* NB: this will create a pass-thru key entry */ 599955c7b877SAdrian Chadd ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss); 6000e8fd88a3SSam Leffler } 6001e8fd88a3SSam Leffler } 6002e8fd88a3SSam Leffler 6003e8fd88a3SSam Leffler /* 60045591b213SSam Leffler * Setup driver-specific state for a newly associated node. 60055591b213SSam Leffler * Note that we're called also on a re-associate, the isnew 60065591b213SSam Leffler * param tells us if this is the first time or not. 60075591b213SSam Leffler */ 60085591b213SSam Leffler static void 6009e9962332SSam Leffler ath_newassoc(struct ieee80211_node *ni, int isnew) 60105591b213SSam Leffler { 6011b032f27cSSam Leffler struct ath_node *an = ATH_NODE(ni); 6012b032f27cSSam Leffler struct ieee80211vap *vap = ni->ni_vap; 6013b032f27cSSam Leffler struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 6014c62362cbSSam Leffler const struct ieee80211_txparam *tp = ni->ni_txparms; 60155591b213SSam Leffler 6016ab06fdf2SSam Leffler an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); 6017ab06fdf2SSam Leffler an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); 6018b032f27cSSam Leffler 6019b032f27cSSam Leffler ath_rate_newassoc(sc, an, isnew); 6020e8fd88a3SSam Leffler if (isnew && 6021b032f27cSSam Leffler (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && 6022b032f27cSSam Leffler ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 6023e8fd88a3SSam Leffler ath_setup_stationkey(ni); 6024e8fd88a3SSam Leffler } 60255591b213SSam Leffler 60265591b213SSam Leffler static int 602759efa8b5SSam Leffler ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, 6028b032f27cSSam Leffler int nchans, struct ieee80211_channel chans[]) 6029b032f27cSSam Leffler { 6030b032f27cSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 6031b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 603259efa8b5SSam Leffler HAL_STATUS status; 6033b032f27cSSam Leffler 6034033022a9SSam Leffler DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 603559efa8b5SSam Leffler "%s: rd %u cc %u location %c%s\n", 603659efa8b5SSam Leffler __func__, reg->regdomain, reg->country, reg->location, 603759efa8b5SSam Leffler reg->ecm ? " ecm" : ""); 6038033022a9SSam Leffler 603959efa8b5SSam Leffler status = ath_hal_set_channels(ah, chans, nchans, 604059efa8b5SSam Leffler reg->country, reg->regdomain); 604159efa8b5SSam Leffler if (status != HAL_OK) { 604259efa8b5SSam Leffler DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", 604359efa8b5SSam Leffler __func__, status); 604459efa8b5SSam Leffler return EINVAL; /* XXX */ 6045b032f27cSSam Leffler } 60468db87e40SAdrian Chadd 6047b032f27cSSam Leffler return 0; 6048b032f27cSSam Leffler } 6049b032f27cSSam Leffler 6050b032f27cSSam Leffler static void 6051b032f27cSSam Leffler ath_getradiocaps(struct ieee80211com *ic, 60525fe9f044SSam Leffler int maxchans, int *nchans, struct ieee80211_channel chans[]) 6053b032f27cSSam Leffler { 6054b032f27cSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 6055b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 6056b032f27cSSam Leffler 605759efa8b5SSam Leffler DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", 605859efa8b5SSam Leffler __func__, SKU_DEBUG, CTRY_DEFAULT); 6059033022a9SSam Leffler 606059efa8b5SSam Leffler /* XXX check return */ 606159efa8b5SSam Leffler (void) ath_hal_getchannels(ah, chans, maxchans, nchans, 606259efa8b5SSam Leffler HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); 6063033022a9SSam Leffler 6064b032f27cSSam Leffler } 6065b032f27cSSam Leffler 6066b032f27cSSam Leffler static int 6067b032f27cSSam Leffler ath_getchannels(struct ath_softc *sc) 6068b032f27cSSam Leffler { 6069b032f27cSSam Leffler struct ifnet *ifp = sc->sc_ifp; 6070b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 6071b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 607259efa8b5SSam Leffler HAL_STATUS status; 6073b032f27cSSam Leffler 6074b032f27cSSam Leffler /* 607559efa8b5SSam Leffler * Collect channel set based on EEPROM contents. 6076b032f27cSSam Leffler */ 607759efa8b5SSam Leffler status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, 607859efa8b5SSam Leffler &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); 607959efa8b5SSam Leffler if (status != HAL_OK) { 608059efa8b5SSam Leffler if_printf(ifp, "%s: unable to collect channel list from hal, " 608159efa8b5SSam Leffler "status %d\n", __func__, status); 608259efa8b5SSam Leffler return EINVAL; 608359efa8b5SSam Leffler } 6084ca876918SSam Leffler (void) ath_hal_getregdomain(ah, &sc->sc_eerd); 6085ca876918SSam Leffler ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ 608659efa8b5SSam Leffler /* XXX map Atheros sku's to net80211 SKU's */ 608759efa8b5SSam Leffler /* XXX net80211 types too small */ 608859efa8b5SSam Leffler ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; 608959efa8b5SSam Leffler ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; 609059efa8b5SSam Leffler ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ 609159efa8b5SSam Leffler ic->ic_regdomain.isocc[1] = ' '; 609259efa8b5SSam Leffler 6093b032f27cSSam Leffler ic->ic_regdomain.ecm = 1; 6094b032f27cSSam Leffler ic->ic_regdomain.location = 'I'; 6095033022a9SSam Leffler 6096033022a9SSam Leffler DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 609759efa8b5SSam Leffler "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", 6098033022a9SSam Leffler __func__, sc->sc_eerd, sc->sc_eecc, 6099033022a9SSam Leffler ic->ic_regdomain.regdomain, ic->ic_regdomain.country, 610059efa8b5SSam Leffler ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); 61015591b213SSam Leffler return 0; 61025591b213SSam Leffler } 61035591b213SSam Leffler 61046c4612b9SSam Leffler static int 61056c4612b9SSam Leffler ath_rate_setup(struct ath_softc *sc, u_int mode) 61066c4612b9SSam Leffler { 61076c4612b9SSam Leffler struct ath_hal *ah = sc->sc_ah; 61086c4612b9SSam Leffler const HAL_RATE_TABLE *rt; 61096c4612b9SSam Leffler 61106c4612b9SSam Leffler switch (mode) { 61116c4612b9SSam Leffler case IEEE80211_MODE_11A: 61126c4612b9SSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11A); 61136c4612b9SSam Leffler break; 6114724c193aSSam Leffler case IEEE80211_MODE_HALF: 6115aaa70f2fSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 6116aaa70f2fSSam Leffler break; 6117724c193aSSam Leffler case IEEE80211_MODE_QUARTER: 6118aaa70f2fSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 6119aaa70f2fSSam Leffler break; 61206c4612b9SSam Leffler case IEEE80211_MODE_11B: 61216c4612b9SSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11B); 61226c4612b9SSam Leffler break; 61236c4612b9SSam Leffler case IEEE80211_MODE_11G: 61246c4612b9SSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11G); 61256c4612b9SSam Leffler break; 61266c4612b9SSam Leffler case IEEE80211_MODE_TURBO_A: 612768e8e04eSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_108A); 61286c4612b9SSam Leffler break; 61296c4612b9SSam Leffler case IEEE80211_MODE_TURBO_G: 61306c4612b9SSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_108G); 61316c4612b9SSam Leffler break; 613268e8e04eSSam Leffler case IEEE80211_MODE_STURBO_A: 613368e8e04eSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 613468e8e04eSSam Leffler break; 613568e8e04eSSam Leffler case IEEE80211_MODE_11NA: 613668e8e04eSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); 613768e8e04eSSam Leffler break; 613868e8e04eSSam Leffler case IEEE80211_MODE_11NG: 613968e8e04eSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); 614068e8e04eSSam Leffler break; 61416c4612b9SSam Leffler default: 61426c4612b9SSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 61436c4612b9SSam Leffler __func__, mode); 61446c4612b9SSam Leffler return 0; 61456c4612b9SSam Leffler } 61466c4612b9SSam Leffler sc->sc_rates[mode] = rt; 6147aaa70f2fSSam Leffler return (rt != NULL); 61485591b213SSam Leffler } 61495591b213SSam Leffler 61505591b213SSam Leffler static void 61515591b213SSam Leffler ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 61525591b213SSam Leffler { 61533e50ec2cSSam Leffler #define N(a) (sizeof(a)/sizeof(a[0])) 61543e50ec2cSSam Leffler /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 61553e50ec2cSSam Leffler static const struct { 61563e50ec2cSSam Leffler u_int rate; /* tx/rx 802.11 rate */ 61573e50ec2cSSam Leffler u_int16_t timeOn; /* LED on time (ms) */ 61583e50ec2cSSam Leffler u_int16_t timeOff; /* LED off time (ms) */ 61593e50ec2cSSam Leffler } blinkrates[] = { 61603e50ec2cSSam Leffler { 108, 40, 10 }, 61613e50ec2cSSam Leffler { 96, 44, 11 }, 61623e50ec2cSSam Leffler { 72, 50, 13 }, 61633e50ec2cSSam Leffler { 48, 57, 14 }, 61643e50ec2cSSam Leffler { 36, 67, 16 }, 61653e50ec2cSSam Leffler { 24, 80, 20 }, 61663e50ec2cSSam Leffler { 22, 100, 25 }, 61673e50ec2cSSam Leffler { 18, 133, 34 }, 61683e50ec2cSSam Leffler { 12, 160, 40 }, 61693e50ec2cSSam Leffler { 10, 200, 50 }, 61703e50ec2cSSam Leffler { 6, 240, 58 }, 61713e50ec2cSSam Leffler { 4, 267, 66 }, 61723e50ec2cSSam Leffler { 2, 400, 100 }, 61733e50ec2cSSam Leffler { 0, 500, 130 }, 6174724c193aSSam Leffler /* XXX half/quarter rates */ 61753e50ec2cSSam Leffler }; 61765591b213SSam Leffler const HAL_RATE_TABLE *rt; 61773e50ec2cSSam Leffler int i, j; 61785591b213SSam Leffler 61795591b213SSam Leffler memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 61805591b213SSam Leffler rt = sc->sc_rates[mode]; 61815591b213SSam Leffler KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 6182180f268dSSam Leffler for (i = 0; i < rt->rateCount; i++) { 6183180f268dSSam Leffler uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 6184180f268dSSam Leffler if (rt->info[i].phy != IEEE80211_T_HT) 6185180f268dSSam Leffler sc->sc_rixmap[ieeerate] = i; 6186180f268dSSam Leffler else 6187180f268dSSam Leffler sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; 6188180f268dSSam Leffler } 61891b1a8e41SSam Leffler memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 619046d4d74cSSam Leffler for (i = 0; i < N(sc->sc_hwmap); i++) { 619146d4d74cSSam Leffler if (i >= rt->rateCount) { 61923e50ec2cSSam Leffler sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 61933e50ec2cSSam Leffler sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 619416b4851aSSam Leffler continue; 61953e50ec2cSSam Leffler } 61963e50ec2cSSam Leffler sc->sc_hwmap[i].ieeerate = 619746d4d74cSSam Leffler rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 619846d4d74cSSam Leffler if (rt->info[i].phy == IEEE80211_T_HT) 619926041a14SSam Leffler sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; 6200d3be6f5bSSam Leffler sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 620146d4d74cSSam Leffler if (rt->info[i].shortPreamble || 620246d4d74cSSam Leffler rt->info[i].phy == IEEE80211_T_OFDM) 6203d3be6f5bSSam Leffler sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 62045463c4a4SSam Leffler sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; 62053e50ec2cSSam Leffler for (j = 0; j < N(blinkrates)-1; j++) 62063e50ec2cSSam Leffler if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 62073e50ec2cSSam Leffler break; 62083e50ec2cSSam Leffler /* NB: this uses the last entry if the rate isn't found */ 62093e50ec2cSSam Leffler /* XXX beware of overlow */ 62103e50ec2cSSam Leffler sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 62113e50ec2cSSam Leffler sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 6212c42a7b7eSSam Leffler } 62135591b213SSam Leffler sc->sc_currates = rt; 62145591b213SSam Leffler sc->sc_curmode = mode; 62155591b213SSam Leffler /* 6216c42a7b7eSSam Leffler * All protection frames are transmited at 2Mb/s for 6217c42a7b7eSSam Leffler * 11g, otherwise at 1Mb/s. 62185591b213SSam Leffler */ 6219913a1ba1SSam Leffler if (mode == IEEE80211_MODE_11G) 6220ab06fdf2SSam Leffler sc->sc_protrix = ath_tx_findrix(sc, 2*2); 6221913a1ba1SSam Leffler else 6222ab06fdf2SSam Leffler sc->sc_protrix = ath_tx_findrix(sc, 2*1); 62234fa8d4efSDaniel Eischen /* NB: caller is responsible for resetting rate control state */ 62243e50ec2cSSam Leffler #undef N 62255591b213SSam Leffler } 62265591b213SSam Leffler 6227c42a7b7eSSam Leffler static void 62282e986da5SSam Leffler ath_watchdog(void *arg) 6229c42a7b7eSSam Leffler { 62302e986da5SSam Leffler struct ath_softc *sc = arg; 6231ef27340cSAdrian Chadd int do_reset = 0; 6232c42a7b7eSSam Leffler 62332e986da5SSam Leffler if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { 62342e986da5SSam Leffler struct ifnet *ifp = sc->sc_ifp; 6235459bc4f0SSam Leffler uint32_t hangs; 6236459bc4f0SSam Leffler 6237459bc4f0SSam Leffler if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && 6238459bc4f0SSam Leffler hangs != 0) { 6239459bc4f0SSam Leffler if_printf(ifp, "%s hang detected (0x%x)\n", 6240459bc4f0SSam Leffler hangs & 0xff ? "bb" : "mac", hangs); 6241459bc4f0SSam Leffler } else 6242c42a7b7eSSam Leffler if_printf(ifp, "device timeout\n"); 6243ef27340cSAdrian Chadd do_reset = 1; 6244c42a7b7eSSam Leffler ifp->if_oerrors++; 6245c42a7b7eSSam Leffler sc->sc_stats.ast_watchdog++; 6246c42a7b7eSSam Leffler } 6247ef27340cSAdrian Chadd 6248ef27340cSAdrian Chadd /* 6249ef27340cSAdrian Chadd * We can't hold the lock across the ath_reset() call. 6250d52f7132SAdrian Chadd * 6251d52f7132SAdrian Chadd * And since this routine can't hold a lock and sleep, 6252d52f7132SAdrian Chadd * do the reset deferred. 6253ef27340cSAdrian Chadd */ 6254ef27340cSAdrian Chadd if (do_reset) { 6255d52f7132SAdrian Chadd taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 6256ef27340cSAdrian Chadd } 6257ef27340cSAdrian Chadd 62582e986da5SSam Leffler callout_schedule(&sc->sc_wd_ch, hz); 6259c42a7b7eSSam Leffler } 6260c42a7b7eSSam Leffler 6261a585a9a1SSam Leffler #ifdef ATH_DIAGAPI 6262c42a7b7eSSam Leffler /* 6263c42a7b7eSSam Leffler * Diagnostic interface to the HAL. This is used by various 6264c42a7b7eSSam Leffler * tools to do things like retrieve register contents for 6265c42a7b7eSSam Leffler * debugging. The mechanism is intentionally opaque so that 6266c42a7b7eSSam Leffler * it can change frequently w/o concern for compatiblity. 6267c42a7b7eSSam Leffler */ 6268c42a7b7eSSam Leffler static int 6269c42a7b7eSSam Leffler ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 6270c42a7b7eSSam Leffler { 6271c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 6272c42a7b7eSSam Leffler u_int id = ad->ad_id & ATH_DIAG_ID; 6273c42a7b7eSSam Leffler void *indata = NULL; 6274c42a7b7eSSam Leffler void *outdata = NULL; 6275c42a7b7eSSam Leffler u_int32_t insize = ad->ad_in_size; 6276c42a7b7eSSam Leffler u_int32_t outsize = ad->ad_out_size; 6277c42a7b7eSSam Leffler int error = 0; 6278c42a7b7eSSam Leffler 6279c42a7b7eSSam Leffler if (ad->ad_id & ATH_DIAG_IN) { 6280c42a7b7eSSam Leffler /* 6281c42a7b7eSSam Leffler * Copy in data. 6282c42a7b7eSSam Leffler */ 6283c42a7b7eSSam Leffler indata = malloc(insize, M_TEMP, M_NOWAIT); 6284c42a7b7eSSam Leffler if (indata == NULL) { 6285c42a7b7eSSam Leffler error = ENOMEM; 6286c42a7b7eSSam Leffler goto bad; 6287c42a7b7eSSam Leffler } 6288c42a7b7eSSam Leffler error = copyin(ad->ad_in_data, indata, insize); 6289c42a7b7eSSam Leffler if (error) 6290c42a7b7eSSam Leffler goto bad; 6291c42a7b7eSSam Leffler } 6292c42a7b7eSSam Leffler if (ad->ad_id & ATH_DIAG_DYN) { 6293c42a7b7eSSam Leffler /* 6294c42a7b7eSSam Leffler * Allocate a buffer for the results (otherwise the HAL 6295c42a7b7eSSam Leffler * returns a pointer to a buffer where we can read the 6296c42a7b7eSSam Leffler * results). Note that we depend on the HAL leaving this 6297c42a7b7eSSam Leffler * pointer for us to use below in reclaiming the buffer; 6298c42a7b7eSSam Leffler * may want to be more defensive. 6299c42a7b7eSSam Leffler */ 6300c42a7b7eSSam Leffler outdata = malloc(outsize, M_TEMP, M_NOWAIT); 6301c42a7b7eSSam Leffler if (outdata == NULL) { 6302c42a7b7eSSam Leffler error = ENOMEM; 6303c42a7b7eSSam Leffler goto bad; 6304c42a7b7eSSam Leffler } 6305c42a7b7eSSam Leffler } 6306c42a7b7eSSam Leffler if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 6307c42a7b7eSSam Leffler if (outsize < ad->ad_out_size) 6308c42a7b7eSSam Leffler ad->ad_out_size = outsize; 6309c42a7b7eSSam Leffler if (outdata != NULL) 6310c42a7b7eSSam Leffler error = copyout(outdata, ad->ad_out_data, 6311c42a7b7eSSam Leffler ad->ad_out_size); 6312c42a7b7eSSam Leffler } else { 6313c42a7b7eSSam Leffler error = EINVAL; 6314c42a7b7eSSam Leffler } 6315c42a7b7eSSam Leffler bad: 6316c42a7b7eSSam Leffler if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 6317c42a7b7eSSam Leffler free(indata, M_TEMP); 6318c42a7b7eSSam Leffler if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 6319c42a7b7eSSam Leffler free(outdata, M_TEMP); 6320c42a7b7eSSam Leffler return error; 6321c42a7b7eSSam Leffler } 6322a585a9a1SSam Leffler #endif /* ATH_DIAGAPI */ 6323c42a7b7eSSam Leffler 6324c42a7b7eSSam Leffler static int 6325c42a7b7eSSam Leffler ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 6326c42a7b7eSSam Leffler { 6327c42a7b7eSSam Leffler #define IS_RUNNING(ifp) \ 632813f4c340SRobert Watson ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 6329c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 6330b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 6331c42a7b7eSSam Leffler struct ifreq *ifr = (struct ifreq *)data; 633284784be1SSam Leffler const HAL_RATE_TABLE *rt; 6333c42a7b7eSSam Leffler int error = 0; 6334c42a7b7eSSam Leffler 6335c42a7b7eSSam Leffler switch (cmd) { 6336c42a7b7eSSam Leffler case SIOCSIFFLAGS: 633731a8c1edSAndrew Thompson ATH_LOCK(sc); 6338c42a7b7eSSam Leffler if (IS_RUNNING(ifp)) { 6339c42a7b7eSSam Leffler /* 6340c42a7b7eSSam Leffler * To avoid rescanning another access point, 6341c42a7b7eSSam Leffler * do not call ath_init() here. Instead, 6342c42a7b7eSSam Leffler * only reflect promisc mode settings. 6343c42a7b7eSSam Leffler */ 6344c42a7b7eSSam Leffler ath_mode_init(sc); 6345c42a7b7eSSam Leffler } else if (ifp->if_flags & IFF_UP) { 6346c42a7b7eSSam Leffler /* 6347c42a7b7eSSam Leffler * Beware of being called during attach/detach 6348c42a7b7eSSam Leffler * to reset promiscuous mode. In that case we 6349c42a7b7eSSam Leffler * will still be marked UP but not RUNNING. 6350c42a7b7eSSam Leffler * However trying to re-init the interface 6351c42a7b7eSSam Leffler * is the wrong thing to do as we've already 6352c42a7b7eSSam Leffler * torn down much of our state. There's 6353c42a7b7eSSam Leffler * probably a better way to deal with this. 6354c42a7b7eSSam Leffler */ 6355b032f27cSSam Leffler if (!sc->sc_invalid) 6356fc74a9f9SBrooks Davis ath_init(sc); /* XXX lose error */ 6357d3ac945bSSam Leffler } else { 6358c42a7b7eSSam Leffler ath_stop_locked(ifp); 6359d3ac945bSSam Leffler #ifdef notyet 6360d3ac945bSSam Leffler /* XXX must wakeup in places like ath_vap_delete */ 6361d3ac945bSSam Leffler if (!sc->sc_invalid) 6362d3ac945bSSam Leffler ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP); 6363d3ac945bSSam Leffler #endif 6364d3ac945bSSam Leffler } 636531a8c1edSAndrew Thompson ATH_UNLOCK(sc); 6366c42a7b7eSSam Leffler break; 6367b032f27cSSam Leffler case SIOCGIFMEDIA: 6368b032f27cSSam Leffler case SIOCSIFMEDIA: 6369b032f27cSSam Leffler error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 6370b032f27cSSam Leffler break; 6371c42a7b7eSSam Leffler case SIOCGATHSTATS: 6372c42a7b7eSSam Leffler /* NB: embed these numbers to get a consistent view */ 6373c42a7b7eSSam Leffler sc->sc_stats.ast_tx_packets = ifp->if_opackets; 6374c42a7b7eSSam Leffler sc->sc_stats.ast_rx_packets = ifp->if_ipackets; 637584784be1SSam Leffler sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi); 637684784be1SSam Leffler sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi); 6377584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 637810ad9a77SSam Leffler sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap); 637910ad9a77SSam Leffler sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam); 638010ad9a77SSam Leffler #endif 638184784be1SSam Leffler rt = sc->sc_currates; 638246d4d74cSSam Leffler sc->sc_stats.ast_tx_rate = 638346d4d74cSSam Leffler rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC; 63846aa113fdSAdrian Chadd if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT) 63856aa113fdSAdrian Chadd sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS; 6386c42a7b7eSSam Leffler return copyout(&sc->sc_stats, 6387c42a7b7eSSam Leffler ifr->ifr_data, sizeof (sc->sc_stats)); 63883fc21fedSSam Leffler case SIOCZATHSTATS: 63893fc21fedSSam Leffler error = priv_check(curthread, PRIV_DRIVER); 63903fc21fedSSam Leffler if (error == 0) 63913fc21fedSSam Leffler memset(&sc->sc_stats, 0, sizeof(sc->sc_stats)); 63923fc21fedSSam Leffler break; 6393a585a9a1SSam Leffler #ifdef ATH_DIAGAPI 6394c42a7b7eSSam Leffler case SIOCGATHDIAG: 6395c42a7b7eSSam Leffler error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 6396c42a7b7eSSam Leffler break; 6397f51c84eaSAdrian Chadd case SIOCGATHPHYERR: 6398f51c84eaSAdrian Chadd error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr); 6399f51c84eaSAdrian Chadd break; 6400a585a9a1SSam Leffler #endif 640131a8c1edSAndrew Thompson case SIOCGIFADDR: 6402b032f27cSSam Leffler error = ether_ioctl(ifp, cmd, data); 6403c42a7b7eSSam Leffler break; 640431a8c1edSAndrew Thompson default: 640531a8c1edSAndrew Thompson error = EINVAL; 640631a8c1edSAndrew Thompson break; 6407c42a7b7eSSam Leffler } 6408c42a7b7eSSam Leffler return error; 6409a614e076SSam Leffler #undef IS_RUNNING 6410c42a7b7eSSam Leffler } 6411c42a7b7eSSam Leffler 6412c42a7b7eSSam Leffler /* 6413c42a7b7eSSam Leffler * Announce various information on device/driver attach. 6414c42a7b7eSSam Leffler */ 6415c42a7b7eSSam Leffler static void 6416c42a7b7eSSam Leffler ath_announce(struct ath_softc *sc) 6417c42a7b7eSSam Leffler { 6418fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 6419c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 6420c42a7b7eSSam Leffler 6421498657cfSSam Leffler if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n", 6422498657cfSSam Leffler ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, 6423498657cfSSam Leffler ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 642446a924c4SAdrian Chadd if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n", 642546a924c4SAdrian Chadd ah->ah_analog2GhzRev, ah->ah_analog5GhzRev); 6426c42a7b7eSSam Leffler if (bootverbose) { 6427c42a7b7eSSam Leffler int i; 6428c42a7b7eSSam Leffler for (i = 0; i <= WME_AC_VO; i++) { 6429c42a7b7eSSam Leffler struct ath_txq *txq = sc->sc_ac2q[i]; 6430c42a7b7eSSam Leffler if_printf(ifp, "Use hw queue %u for %s traffic\n", 6431c42a7b7eSSam Leffler txq->axq_qnum, ieee80211_wme_acnames[i]); 6432c42a7b7eSSam Leffler } 6433c42a7b7eSSam Leffler if_printf(ifp, "Use hw queue %u for CAB traffic\n", 6434c42a7b7eSSam Leffler sc->sc_cabq->axq_qnum); 6435c42a7b7eSSam Leffler if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 6436c42a7b7eSSam Leffler } 6437e2d787faSSam Leffler if (ath_rxbuf != ATH_RXBUF) 6438e2d787faSSam Leffler if_printf(ifp, "using %u rx buffers\n", ath_rxbuf); 6439e2d787faSSam Leffler if (ath_txbuf != ATH_TXBUF) 6440e2d787faSSam Leffler if_printf(ifp, "using %u tx buffers\n", ath_txbuf); 64419ac01d39SRui Paulo if (sc->sc_mcastkey && bootverbose) 64429ac01d39SRui Paulo if_printf(ifp, "using multicast key search\n"); 6443c42a7b7eSSam Leffler } 644410ad9a77SSam Leffler 6445584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 644610ad9a77SSam Leffler static void 644710ad9a77SSam Leffler ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, u_int32_t bintval) 644810ad9a77SSam Leffler { 644910ad9a77SSam Leffler struct ath_hal *ah = sc->sc_ah; 645010ad9a77SSam Leffler HAL_BEACON_TIMERS bt; 645110ad9a77SSam Leffler 645210ad9a77SSam Leffler bt.bt_intval = bintval | HAL_BEACON_ENA; 645310ad9a77SSam Leffler bt.bt_nexttbtt = nexttbtt; 645410ad9a77SSam Leffler bt.bt_nextdba = (nexttbtt<<3) - sc->sc_tdmadbaprep; 645510ad9a77SSam Leffler bt.bt_nextswba = (nexttbtt<<3) - sc->sc_tdmaswbaprep; 645610ad9a77SSam Leffler bt.bt_nextatim = nexttbtt+1; 6457f3fb1687SAdrian Chadd /* Enables TBTT, DBA, SWBA timers by default */ 6458f3fb1687SAdrian Chadd bt.bt_flags = 0; 645910ad9a77SSam Leffler ath_hal_beaconsettimers(ah, &bt); 646010ad9a77SSam Leffler } 646110ad9a77SSam Leffler 646210ad9a77SSam Leffler /* 646310ad9a77SSam Leffler * Calculate the beacon interval. This is periodic in the 646410ad9a77SSam Leffler * superframe for the bss. We assume each station is configured 646510ad9a77SSam Leffler * identically wrt transmit rate so the guard time we calculate 646610ad9a77SSam Leffler * above will be the same on all stations. Note we need to 646710ad9a77SSam Leffler * factor in the xmit time because the hardware will schedule 646810ad9a77SSam Leffler * a frame for transmit if the start of the frame is within 646910ad9a77SSam Leffler * the burst time. When we get hardware that properly kills 647010ad9a77SSam Leffler * frames in the PCU we can reduce/eliminate the guard time. 647110ad9a77SSam Leffler * 647210ad9a77SSam Leffler * Roundup to 1024 is so we have 1 TU buffer in the guard time 647310ad9a77SSam Leffler * to deal with the granularity of the nexttbtt timer. 11n MAC's 647410ad9a77SSam Leffler * with 1us timer granularity should allow us to reduce/eliminate 647510ad9a77SSam Leffler * this. 647610ad9a77SSam Leffler */ 647710ad9a77SSam Leffler static void 647810ad9a77SSam Leffler ath_tdma_bintvalsetup(struct ath_softc *sc, 647910ad9a77SSam Leffler const struct ieee80211_tdma_state *tdma) 648010ad9a77SSam Leffler { 648110ad9a77SSam Leffler /* copy from vap state (XXX check all vaps have same value?) */ 648210ad9a77SSam Leffler sc->sc_tdmaslotlen = tdma->tdma_slotlen; 648310ad9a77SSam Leffler 648410ad9a77SSam Leffler sc->sc_tdmabintval = roundup((sc->sc_tdmaslotlen+sc->sc_tdmaguard) * 648510ad9a77SSam Leffler tdma->tdma_slotcnt, 1024); 648610ad9a77SSam Leffler sc->sc_tdmabintval >>= 10; /* TSF -> TU */ 648710ad9a77SSam Leffler if (sc->sc_tdmabintval & 1) 648810ad9a77SSam Leffler sc->sc_tdmabintval++; 648910ad9a77SSam Leffler 649010ad9a77SSam Leffler if (tdma->tdma_slot == 0) { 649110ad9a77SSam Leffler /* 649210ad9a77SSam Leffler * Only slot 0 beacons; other slots respond. 649310ad9a77SSam Leffler */ 649410ad9a77SSam Leffler sc->sc_imask |= HAL_INT_SWBA; 649510ad9a77SSam Leffler sc->sc_tdmaswba = 0; /* beacon immediately */ 649610ad9a77SSam Leffler } else { 649710ad9a77SSam Leffler /* XXX all vaps must be slot 0 or slot !0 */ 649810ad9a77SSam Leffler sc->sc_imask &= ~HAL_INT_SWBA; 649910ad9a77SSam Leffler } 650010ad9a77SSam Leffler } 650110ad9a77SSam Leffler 650210ad9a77SSam Leffler /* 650310ad9a77SSam Leffler * Max 802.11 overhead. This assumes no 4-address frames and 650410ad9a77SSam Leffler * the encapsulation done by ieee80211_encap (llc). We also 650510ad9a77SSam Leffler * include potential crypto overhead. 650610ad9a77SSam Leffler */ 650710ad9a77SSam Leffler #define IEEE80211_MAXOVERHEAD \ 650810ad9a77SSam Leffler (sizeof(struct ieee80211_qosframe) \ 650910ad9a77SSam Leffler + sizeof(struct llc) \ 651010ad9a77SSam Leffler + IEEE80211_ADDR_LEN \ 651110ad9a77SSam Leffler + IEEE80211_WEP_IVLEN \ 651210ad9a77SSam Leffler + IEEE80211_WEP_KIDLEN \ 651310ad9a77SSam Leffler + IEEE80211_WEP_CRCLEN \ 651410ad9a77SSam Leffler + IEEE80211_WEP_MICLEN \ 651510ad9a77SSam Leffler + IEEE80211_CRC_LEN) 651610ad9a77SSam Leffler 651710ad9a77SSam Leffler /* 651810ad9a77SSam Leffler * Setup initially for tdma operation. Start the beacon 651910ad9a77SSam Leffler * timers and enable SWBA if we are slot 0. Otherwise 652010ad9a77SSam Leffler * we wait for slot 0 to arrive so we can sync up before 652110ad9a77SSam Leffler * starting to transmit. 652210ad9a77SSam Leffler */ 652310ad9a77SSam Leffler static void 652410ad9a77SSam Leffler ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap) 652510ad9a77SSam Leffler { 652610ad9a77SSam Leffler struct ath_hal *ah = sc->sc_ah; 652710ad9a77SSam Leffler struct ifnet *ifp = sc->sc_ifp; 652810ad9a77SSam Leffler struct ieee80211com *ic = ifp->if_l2com; 652910ad9a77SSam Leffler const struct ieee80211_txparam *tp; 653010ad9a77SSam Leffler const struct ieee80211_tdma_state *tdma = NULL; 653110ad9a77SSam Leffler int rix; 653210ad9a77SSam Leffler 653310ad9a77SSam Leffler if (vap == NULL) { 653410ad9a77SSam Leffler vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */ 653510ad9a77SSam Leffler if (vap == NULL) { 653610ad9a77SSam Leffler if_printf(ifp, "%s: no vaps?\n", __func__); 653710ad9a77SSam Leffler return; 653810ad9a77SSam Leffler } 653910ad9a77SSam Leffler } 654080767531SAdrian Chadd /* XXX should take a locked ref to iv_bss */ 654110ad9a77SSam Leffler tp = vap->iv_bss->ni_txparms; 654210ad9a77SSam Leffler /* 654310ad9a77SSam Leffler * Calculate the guard time for each slot. This is the 654410ad9a77SSam Leffler * time to send a maximal-size frame according to the 654510ad9a77SSam Leffler * fixed/lowest transmit rate. Note that the interface 654610ad9a77SSam Leffler * mtu does not include the 802.11 overhead so we must 654710ad9a77SSam Leffler * tack that on (ath_hal_computetxtime includes the 654810ad9a77SSam Leffler * preamble and plcp in it's calculation). 654910ad9a77SSam Leffler */ 655010ad9a77SSam Leffler tdma = vap->iv_tdma; 655110ad9a77SSam Leffler if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 6552ab06fdf2SSam Leffler rix = ath_tx_findrix(sc, tp->ucastrate); 655310ad9a77SSam Leffler else 6554ab06fdf2SSam Leffler rix = ath_tx_findrix(sc, tp->mcastrate); 655510ad9a77SSam Leffler /* XXX short preamble assumed */ 655610ad9a77SSam Leffler sc->sc_tdmaguard = ath_hal_computetxtime(ah, sc->sc_currates, 655710ad9a77SSam Leffler ifp->if_mtu + IEEE80211_MAXOVERHEAD, rix, AH_TRUE); 655810ad9a77SSam Leffler 655910ad9a77SSam Leffler ath_hal_intrset(ah, 0); 656010ad9a77SSam Leffler 656110ad9a77SSam Leffler ath_beaconq_config(sc); /* setup h/w beacon q */ 65629c859a04SSam Leffler if (sc->sc_setcca) 656310ad9a77SSam Leffler ath_hal_setcca(ah, AH_FALSE); /* disable CCA */ 656410ad9a77SSam Leffler ath_tdma_bintvalsetup(sc, tdma); /* calculate beacon interval */ 656510ad9a77SSam Leffler ath_tdma_settimers(sc, sc->sc_tdmabintval, 656610ad9a77SSam Leffler sc->sc_tdmabintval | HAL_BEACON_RESET_TSF); 656710ad9a77SSam Leffler sc->sc_syncbeacon = 0; 656810ad9a77SSam Leffler 656910ad9a77SSam Leffler sc->sc_avgtsfdeltap = TDMA_DUMMY_MARKER; 657010ad9a77SSam Leffler sc->sc_avgtsfdeltam = TDMA_DUMMY_MARKER; 657110ad9a77SSam Leffler 657210ad9a77SSam Leffler ath_hal_intrset(ah, sc->sc_imask); 657310ad9a77SSam Leffler 657410ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_TDMA, "%s: slot %u len %uus cnt %u " 657510ad9a77SSam Leffler "bsched %u guard %uus bintval %u TU dba prep %u\n", __func__, 657610ad9a77SSam Leffler tdma->tdma_slot, tdma->tdma_slotlen, tdma->tdma_slotcnt, 657710ad9a77SSam Leffler tdma->tdma_bintval, sc->sc_tdmaguard, sc->sc_tdmabintval, 657810ad9a77SSam Leffler sc->sc_tdmadbaprep); 657910ad9a77SSam Leffler } 658010ad9a77SSam Leffler 658110ad9a77SSam Leffler /* 658210ad9a77SSam Leffler * Update tdma operation. Called from the 802.11 layer 658310ad9a77SSam Leffler * when a beacon is received from the TDMA station operating 658410ad9a77SSam Leffler * in the slot immediately preceding us in the bss. Use 658510ad9a77SSam Leffler * the rx timestamp for the beacon frame to update our 658610ad9a77SSam Leffler * beacon timers so we follow their schedule. Note that 658710ad9a77SSam Leffler * by using the rx timestamp we implicitly include the 658810ad9a77SSam Leffler * propagation delay in our schedule. 658910ad9a77SSam Leffler */ 659010ad9a77SSam Leffler static void 659110ad9a77SSam Leffler ath_tdma_update(struct ieee80211_node *ni, 65922bc3ce77SSam Leffler const struct ieee80211_tdma_param *tdma, int changed) 659310ad9a77SSam Leffler { 659410ad9a77SSam Leffler #define TSF_TO_TU(_h,_l) \ 659510ad9a77SSam Leffler ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) 659610ad9a77SSam Leffler #define TU_TO_TSF(_tu) (((u_int64_t)(_tu)) << 10) 659710ad9a77SSam Leffler struct ieee80211vap *vap = ni->ni_vap; 659810ad9a77SSam Leffler struct ieee80211com *ic = ni->ni_ic; 659910ad9a77SSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 660010ad9a77SSam Leffler struct ath_hal *ah = sc->sc_ah; 660110ad9a77SSam Leffler const HAL_RATE_TABLE *rt = sc->sc_currates; 6602fc4de9b7SAdrian Chadd u_int64_t tsf, rstamp, nextslot, nexttbtt; 6603fc4de9b7SAdrian Chadd u_int32_t txtime, nextslottu; 660410ad9a77SSam Leffler int32_t tudelta, tsfdelta; 660510ad9a77SSam Leffler const struct ath_rx_status *rs; 660610ad9a77SSam Leffler int rix; 660710ad9a77SSam Leffler 660810ad9a77SSam Leffler sc->sc_stats.ast_tdma_update++; 660910ad9a77SSam Leffler 661010ad9a77SSam Leffler /* 661110ad9a77SSam Leffler * Check for and adopt configuration changes. 661210ad9a77SSam Leffler */ 66132bc3ce77SSam Leffler if (changed != 0) { 661410ad9a77SSam Leffler const struct ieee80211_tdma_state *ts = vap->iv_tdma; 661510ad9a77SSam Leffler 661610ad9a77SSam Leffler ath_tdma_bintvalsetup(sc, ts); 6617040972a1SSam Leffler if (changed & TDMA_UPDATE_SLOTLEN) 6618040972a1SSam Leffler ath_wme_update(ic); 661910ad9a77SSam Leffler 662010ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_TDMA, 662110ad9a77SSam Leffler "%s: adopt slot %u slotcnt %u slotlen %u us " 662210ad9a77SSam Leffler "bintval %u TU\n", __func__, 662310ad9a77SSam Leffler ts->tdma_slot, ts->tdma_slotcnt, ts->tdma_slotlen, 662410ad9a77SSam Leffler sc->sc_tdmabintval); 662510ad9a77SSam Leffler 662610ad9a77SSam Leffler /* XXX right? */ 662710ad9a77SSam Leffler ath_hal_intrset(ah, sc->sc_imask); 662810ad9a77SSam Leffler /* NB: beacon timers programmed below */ 662910ad9a77SSam Leffler } 663010ad9a77SSam Leffler 663110ad9a77SSam Leffler /* extend rx timestamp to 64 bits */ 66325463c4a4SSam Leffler rs = sc->sc_lastrs; 663310ad9a77SSam Leffler tsf = ath_hal_gettsf64(ah); 6634fc4de9b7SAdrian Chadd rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf); 663510ad9a77SSam Leffler /* 663610ad9a77SSam Leffler * The rx timestamp is set by the hardware on completing 663710ad9a77SSam Leffler * reception (at the point where the rx descriptor is DMA'd 663810ad9a77SSam Leffler * to the host). To find the start of our next slot we 663910ad9a77SSam Leffler * must adjust this time by the time required to send 664010ad9a77SSam Leffler * the packet just received. 664110ad9a77SSam Leffler */ 664210ad9a77SSam Leffler rix = rt->rateCodeToIndex[rs->rs_rate]; 664310ad9a77SSam Leffler txtime = ath_hal_computetxtime(ah, rt, rs->rs_datalen, rix, 664410ad9a77SSam Leffler rt->info[rix].shortPreamble); 664510ad9a77SSam Leffler /* NB: << 9 is to cvt to TU and /2 */ 664610ad9a77SSam Leffler nextslot = (rstamp - txtime) + (sc->sc_tdmabintval << 9); 664710ad9a77SSam Leffler nextslottu = TSF_TO_TU(nextslot>>32, nextslot) & HAL_BEACON_PERIOD; 664810ad9a77SSam Leffler 664910ad9a77SSam Leffler /* 6650fc4de9b7SAdrian Chadd * Retrieve the hardware NextTBTT in usecs 6651fc4de9b7SAdrian Chadd * and calculate the difference between what the 665210ad9a77SSam Leffler * other station thinks and what we have programmed. This 665310ad9a77SSam Leffler * lets us figure how to adjust our timers to match. The 665410ad9a77SSam Leffler * adjustments are done by pulling the TSF forward and possibly 665510ad9a77SSam Leffler * rewriting the beacon timers. 665610ad9a77SSam Leffler */ 6657fc4de9b7SAdrian Chadd nexttbtt = ath_hal_getnexttbtt(ah); 6658fc4de9b7SAdrian Chadd tsfdelta = (int32_t)((nextslot % TU_TO_TSF(HAL_BEACON_PERIOD + 1)) - nexttbtt); 665910ad9a77SSam Leffler 666010ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_TDMA_TIMER, 666110ad9a77SSam Leffler "tsfdelta %d avg +%d/-%d\n", tsfdelta, 666210ad9a77SSam Leffler TDMA_AVG(sc->sc_avgtsfdeltap), TDMA_AVG(sc->sc_avgtsfdeltam)); 666310ad9a77SSam Leffler 666410ad9a77SSam Leffler if (tsfdelta < 0) { 666510ad9a77SSam Leffler TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0); 666610ad9a77SSam Leffler TDMA_SAMPLE(sc->sc_avgtsfdeltam, -tsfdelta); 666710ad9a77SSam Leffler tsfdelta = -tsfdelta % 1024; 666810ad9a77SSam Leffler nextslottu++; 666910ad9a77SSam Leffler } else if (tsfdelta > 0) { 667010ad9a77SSam Leffler TDMA_SAMPLE(sc->sc_avgtsfdeltap, tsfdelta); 667110ad9a77SSam Leffler TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0); 667210ad9a77SSam Leffler tsfdelta = 1024 - (tsfdelta % 1024); 667310ad9a77SSam Leffler nextslottu++; 667410ad9a77SSam Leffler } else { 667510ad9a77SSam Leffler TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0); 667610ad9a77SSam Leffler TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0); 667710ad9a77SSam Leffler } 6678fc4de9b7SAdrian Chadd tudelta = nextslottu - TSF_TO_TU(nexttbtt >> 32, nexttbtt); 667910ad9a77SSam Leffler 668010ad9a77SSam Leffler /* 668110ad9a77SSam Leffler * Copy sender's timetstamp into tdma ie so they can 668210ad9a77SSam Leffler * calculate roundtrip time. We submit a beacon frame 668310ad9a77SSam Leffler * below after any timer adjustment. The frame goes out 668410ad9a77SSam Leffler * at the next TBTT so the sender can calculate the 668510ad9a77SSam Leffler * roundtrip by inspecting the tdma ie in our beacon frame. 668610ad9a77SSam Leffler * 668710ad9a77SSam Leffler * NB: This tstamp is subtlely preserved when 668810ad9a77SSam Leffler * IEEE80211_BEACON_TDMA is marked (e.g. when the 668910ad9a77SSam Leffler * slot position changes) because ieee80211_add_tdma 669010ad9a77SSam Leffler * skips over the data. 669110ad9a77SSam Leffler */ 669210ad9a77SSam Leffler memcpy(ATH_VAP(vap)->av_boff.bo_tdma + 669310ad9a77SSam Leffler __offsetof(struct ieee80211_tdma_param, tdma_tstamp), 669410ad9a77SSam Leffler &ni->ni_tstamp.data, 8); 669510ad9a77SSam Leffler #if 0 669610ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_TDMA_TIMER, 6697fc4de9b7SAdrian Chadd "tsf %llu nextslot %llu (%d, %d) nextslottu %u nexttbtt %llu (%d)\n", 669810ad9a77SSam Leffler (unsigned long long) tsf, (unsigned long long) nextslot, 6699fc4de9b7SAdrian Chadd (int)(nextslot - tsf), tsfdelta, nextslottu, nexttbtt, tudelta); 670010ad9a77SSam Leffler #endif 670110ad9a77SSam Leffler /* 670210ad9a77SSam Leffler * Adjust the beacon timers only when pulling them forward 670310ad9a77SSam Leffler * or when going back by less than the beacon interval. 670410ad9a77SSam Leffler * Negative jumps larger than the beacon interval seem to 670510ad9a77SSam Leffler * cause the timers to stop and generally cause instability. 670610ad9a77SSam Leffler * This basically filters out jumps due to missed beacons. 670710ad9a77SSam Leffler */ 670810ad9a77SSam Leffler if (tudelta != 0 && (tudelta > 0 || -tudelta < sc->sc_tdmabintval)) { 670910ad9a77SSam Leffler ath_tdma_settimers(sc, nextslottu, sc->sc_tdmabintval); 671010ad9a77SSam Leffler sc->sc_stats.ast_tdma_timers++; 671110ad9a77SSam Leffler } 671210ad9a77SSam Leffler if (tsfdelta > 0) { 671310ad9a77SSam Leffler ath_hal_adjusttsf(ah, tsfdelta); 671410ad9a77SSam Leffler sc->sc_stats.ast_tdma_tsf++; 671510ad9a77SSam Leffler } 671610ad9a77SSam Leffler ath_tdma_beacon_send(sc, vap); /* prepare response */ 671710ad9a77SSam Leffler #undef TU_TO_TSF 671810ad9a77SSam Leffler #undef TSF_TO_TU 671910ad9a77SSam Leffler } 672010ad9a77SSam Leffler 672110ad9a77SSam Leffler /* 672210ad9a77SSam Leffler * Transmit a beacon frame at SWBA. Dynamic updates 672310ad9a77SSam Leffler * to the frame contents are done as needed. 672410ad9a77SSam Leffler */ 672510ad9a77SSam Leffler static void 672610ad9a77SSam Leffler ath_tdma_beacon_send(struct ath_softc *sc, struct ieee80211vap *vap) 672710ad9a77SSam Leffler { 672810ad9a77SSam Leffler struct ath_hal *ah = sc->sc_ah; 672910ad9a77SSam Leffler struct ath_buf *bf; 673010ad9a77SSam Leffler int otherant; 673110ad9a77SSam Leffler 673210ad9a77SSam Leffler /* 673310ad9a77SSam Leffler * Check if the previous beacon has gone out. If 673410ad9a77SSam Leffler * not don't try to post another, skip this period 673510ad9a77SSam Leffler * and wait for the next. Missed beacons indicate 673610ad9a77SSam Leffler * a problem and should not occur. If we miss too 673710ad9a77SSam Leffler * many consecutive beacons reset the device. 673810ad9a77SSam Leffler */ 673910ad9a77SSam Leffler if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { 674010ad9a77SSam Leffler sc->sc_bmisscount++; 674110ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 674210ad9a77SSam Leffler "%s: missed %u consecutive beacons\n", 674310ad9a77SSam Leffler __func__, sc->sc_bmisscount); 6744a32ac9d3SSam Leffler if (sc->sc_bmisscount >= ath_bstuck_threshold) 674510ad9a77SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask); 674610ad9a77SSam Leffler return; 674710ad9a77SSam Leffler } 674810ad9a77SSam Leffler if (sc->sc_bmisscount != 0) { 674910ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 675010ad9a77SSam Leffler "%s: resume beacon xmit after %u misses\n", 675110ad9a77SSam Leffler __func__, sc->sc_bmisscount); 675210ad9a77SSam Leffler sc->sc_bmisscount = 0; 675310ad9a77SSam Leffler } 675410ad9a77SSam Leffler 675510ad9a77SSam Leffler /* 675610ad9a77SSam Leffler * Check recent per-antenna transmit statistics and flip 675710ad9a77SSam Leffler * the default antenna if noticeably more frames went out 675810ad9a77SSam Leffler * on the non-default antenna. 675910ad9a77SSam Leffler * XXX assumes 2 anntenae 676010ad9a77SSam Leffler */ 676110ad9a77SSam Leffler if (!sc->sc_diversity) { 676210ad9a77SSam Leffler otherant = sc->sc_defant & 1 ? 2 : 1; 676310ad9a77SSam Leffler if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) 676410ad9a77SSam Leffler ath_setdefantenna(sc, otherant); 676510ad9a77SSam Leffler sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; 676610ad9a77SSam Leffler } 676710ad9a77SSam Leffler 676810ad9a77SSam Leffler bf = ath_beacon_generate(sc, vap); 676910ad9a77SSam Leffler if (bf != NULL) { 677010ad9a77SSam Leffler /* 677110ad9a77SSam Leffler * Stop any current dma and put the new frame on the queue. 677210ad9a77SSam Leffler * This should never fail since we check above that no frames 677310ad9a77SSam Leffler * are still pending on the queue. 677410ad9a77SSam Leffler */ 677510ad9a77SSam Leffler if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { 677610ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, 677710ad9a77SSam Leffler "%s: beacon queue %u did not stop?\n", 677810ad9a77SSam Leffler __func__, sc->sc_bhalq); 677910ad9a77SSam Leffler /* NB: the HAL still stops DMA, so proceed */ 678010ad9a77SSam Leffler } 678110ad9a77SSam Leffler ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); 678210ad9a77SSam Leffler ath_hal_txstart(ah, sc->sc_bhalq); 678310ad9a77SSam Leffler 678410ad9a77SSam Leffler sc->sc_stats.ast_be_xmit++; /* XXX per-vap? */ 678510ad9a77SSam Leffler 678610ad9a77SSam Leffler /* 678710ad9a77SSam Leffler * Record local TSF for our last send for use 678810ad9a77SSam Leffler * in arbitrating slot collisions. 678910ad9a77SSam Leffler */ 679080767531SAdrian Chadd /* XXX should take a locked ref to iv_bss */ 679110ad9a77SSam Leffler vap->iv_bss->ni_tstamp.tsf = ath_hal_gettsf64(ah); 679210ad9a77SSam Leffler } 679310ad9a77SSam Leffler } 6794584f7327SSam Leffler #endif /* IEEE80211_SUPPORT_TDMA */ 6795e8dabfbeSAdrian Chadd 679648237774SAdrian Chadd static void 679748237774SAdrian Chadd ath_dfs_tasklet(void *p, int npending) 679848237774SAdrian Chadd { 679948237774SAdrian Chadd struct ath_softc *sc = (struct ath_softc *) p; 680048237774SAdrian Chadd struct ifnet *ifp = sc->sc_ifp; 680148237774SAdrian Chadd struct ieee80211com *ic = ifp->if_l2com; 680248237774SAdrian Chadd 680348237774SAdrian Chadd /* 680448237774SAdrian Chadd * If previous processing has found a radar event, 680548237774SAdrian Chadd * signal this to the net80211 layer to begin DFS 680648237774SAdrian Chadd * processing. 680748237774SAdrian Chadd */ 680848237774SAdrian Chadd if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { 680948237774SAdrian Chadd /* DFS event found, initiate channel change */ 681006fc4a10SAdrian Chadd /* 681106fc4a10SAdrian Chadd * XXX doesn't currently tell us whether the event 681206fc4a10SAdrian Chadd * XXX was found in the primary or extension 681306fc4a10SAdrian Chadd * XXX channel! 681406fc4a10SAdrian Chadd */ 681506fc4a10SAdrian Chadd IEEE80211_LOCK(ic); 681648237774SAdrian Chadd ieee80211_dfs_notify_radar(ic, sc->sc_curchan); 681706fc4a10SAdrian Chadd IEEE80211_UNLOCK(ic); 681848237774SAdrian Chadd } 681948237774SAdrian Chadd } 682048237774SAdrian Chadd 6821dba9c859SAdrian Chadd MODULE_VERSION(if_ath, 1); 6822dba9c859SAdrian Chadd MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */ 6823*58816f3fSAdrian Chadd #if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) 6824*58816f3fSAdrian Chadd MODULE_DEPEND(if_ath, alq, 1, 1, 1); 6825*58816f3fSAdrian Chadd #endif 6826