15591b213SSam Leffler /*- 210ad9a77SSam Leffler * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 35591b213SSam Leffler * All rights reserved. 45591b213SSam Leffler * 55591b213SSam Leffler * Redistribution and use in source and binary forms, with or without 65591b213SSam Leffler * modification, are permitted provided that the following conditions 75591b213SSam Leffler * are met: 85591b213SSam Leffler * 1. Redistributions of source code must retain the above copyright 95591b213SSam Leffler * notice, this list of conditions and the following disclaimer, 105591b213SSam Leffler * without modification. 115591b213SSam Leffler * 2. Redistributions in binary form must reproduce at minimum a disclaimer 125591b213SSam Leffler * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 135591b213SSam Leffler * redistribution must be conditioned upon including a substantially 145591b213SSam Leffler * similar Disclaimer requirement for further binary redistribution. 155591b213SSam Leffler * 165591b213SSam Leffler * NO WARRANTY 175591b213SSam Leffler * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 185591b213SSam Leffler * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 195591b213SSam Leffler * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 205591b213SSam Leffler * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 215591b213SSam Leffler * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 225591b213SSam Leffler * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 235591b213SSam Leffler * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 245591b213SSam Leffler * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 255591b213SSam Leffler * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 265591b213SSam Leffler * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 275591b213SSam Leffler * THE POSSIBILITY OF SUCH DAMAGES. 285591b213SSam Leffler */ 295591b213SSam Leffler 305591b213SSam Leffler #include <sys/cdefs.h> 315591b213SSam Leffler __FBSDID("$FreeBSD$"); 325591b213SSam Leffler 335591b213SSam Leffler /* 345591b213SSam Leffler * Driver for the Atheros Wireless LAN controller. 355f3721d5SSam Leffler * 365f3721d5SSam Leffler * This software is derived from work of Atsushi Onoe; his contribution 375f3721d5SSam Leffler * is greatly appreciated. 385591b213SSam Leffler */ 395591b213SSam Leffler 405591b213SSam Leffler #include "opt_inet.h" 41a585a9a1SSam Leffler #include "opt_ath.h" 423f3087fdSAdrian Chadd /* 433f3087fdSAdrian Chadd * This is needed for register operations which are performed 443f3087fdSAdrian Chadd * by the driver - eg, calls to ath_hal_gettsf32(). 453f3087fdSAdrian Chadd */ 463f3087fdSAdrian Chadd #include "opt_ah.h" 47584f7327SSam Leffler #include "opt_wlan.h" 485591b213SSam Leffler 495591b213SSam Leffler #include <sys/param.h> 505591b213SSam Leffler #include <sys/systm.h> 515591b213SSam Leffler #include <sys/sysctl.h> 525591b213SSam Leffler #include <sys/mbuf.h> 535591b213SSam Leffler #include <sys/malloc.h> 545591b213SSam Leffler #include <sys/lock.h> 555591b213SSam Leffler #include <sys/mutex.h> 565591b213SSam Leffler #include <sys/kernel.h> 575591b213SSam Leffler #include <sys/socket.h> 585591b213SSam Leffler #include <sys/sockio.h> 595591b213SSam Leffler #include <sys/errno.h> 605591b213SSam Leffler #include <sys/callout.h> 615591b213SSam Leffler #include <sys/bus.h> 625591b213SSam Leffler #include <sys/endian.h> 630bbf5441SSam Leffler #include <sys/kthread.h> 640bbf5441SSam Leffler #include <sys/taskqueue.h> 653fc21fedSSam Leffler #include <sys/priv.h> 66dba9c859SAdrian Chadd #include <sys/module.h> 67f52d3452SAdrian Chadd #include <sys/ktr.h> 68ddbe3036SAdrian Chadd #include <sys/smp.h> /* for mp_ncpus */ 695591b213SSam Leffler 705591b213SSam Leffler #include <machine/bus.h> 715591b213SSam Leffler 725591b213SSam Leffler #include <net/if.h> 735591b213SSam Leffler #include <net/if_dl.h> 745591b213SSam Leffler #include <net/if_media.h> 75fc74a9f9SBrooks Davis #include <net/if_types.h> 765591b213SSam Leffler #include <net/if_arp.h> 775591b213SSam Leffler #include <net/ethernet.h> 785591b213SSam Leffler #include <net/if_llc.h> 795591b213SSam Leffler 805591b213SSam Leffler #include <net80211/ieee80211_var.h> 8159efa8b5SSam Leffler #include <net80211/ieee80211_regdomain.h> 82339ccfb3SSam Leffler #ifdef IEEE80211_SUPPORT_SUPERG 83339ccfb3SSam Leffler #include <net80211/ieee80211_superg.h> 84339ccfb3SSam Leffler #endif 85584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 8610ad9a77SSam Leffler #include <net80211/ieee80211_tdma.h> 8710ad9a77SSam Leffler #endif 885591b213SSam Leffler 895591b213SSam Leffler #include <net/bpf.h> 905591b213SSam Leffler 915591b213SSam Leffler #ifdef INET 925591b213SSam Leffler #include <netinet/in.h> 935591b213SSam Leffler #include <netinet/if_ether.h> 945591b213SSam Leffler #endif 955591b213SSam Leffler 965591b213SSam Leffler #include <dev/ath/if_athvar.h> 9733644623SSam Leffler #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 980dbe9289SAdrian Chadd #include <dev/ath/ath_hal/ah_diagcodes.h> 995591b213SSam Leffler 1005bc8125aSAdrian Chadd #include <dev/ath/if_ath_debug.h> 101b8e788a5SAdrian Chadd #include <dev/ath/if_ath_misc.h> 102b8e788a5SAdrian Chadd #include <dev/ath/if_ath_tx.h> 1036079fdbeSAdrian Chadd #include <dev/ath/if_ath_sysctl.h> 104c65ee21dSAdrian Chadd #include <dev/ath/if_ath_led.h> 105d2d7a00aSAdrian Chadd #include <dev/ath/if_ath_keycache.h> 10648237774SAdrian Chadd #include <dev/ath/if_athdfs.h> 1075bc8125aSAdrian Chadd 10886e07743SSam Leffler #ifdef ATH_TX99_DIAG 10986e07743SSam Leffler #include <dev/ath/ath_tx99/ath_tx99.h> 11086e07743SSam Leffler #endif 11186e07743SSam Leffler 112f52d3452SAdrian Chadd #define ATH_KTR_INTR KTR_SPARE4 113f52d3452SAdrian Chadd #define ATH_KTR_ERR KTR_SPARE3 11448237774SAdrian Chadd 115b032f27cSSam Leffler /* 116b032f27cSSam Leffler * ATH_BCBUF determines the number of vap's that can transmit 117b032f27cSSam Leffler * beacons and also (currently) the number of vap's that can 118b032f27cSSam Leffler * have unique mac addresses/bssid. When staggering beacons 119b032f27cSSam Leffler * 4 is probably a good max as otherwise the beacons become 120b032f27cSSam Leffler * very closely spaced and there is limited time for cab q traffic 121b032f27cSSam Leffler * to go out. You can burst beacons instead but that is not good 122b032f27cSSam Leffler * for stations in power save and at some point you really want 123b032f27cSSam Leffler * another radio (and channel). 124b032f27cSSam Leffler * 125b032f27cSSam Leffler * The limit on the number of mac addresses is tied to our use of 126b032f27cSSam Leffler * the U/L bit and tracking addresses in a byte; it would be 127b032f27cSSam Leffler * worthwhile to allow more for applications like proxy sta. 128b032f27cSSam Leffler */ 129b032f27cSSam Leffler CTASSERT(ATH_BCBUF <= 8); 130b032f27cSSam Leffler 131b032f27cSSam Leffler static struct ieee80211vap *ath_vap_create(struct ieee80211com *, 132fcd9500fSBernhard Schmidt const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 133fcd9500fSBernhard Schmidt const uint8_t [IEEE80211_ADDR_LEN], 134fcd9500fSBernhard Schmidt const uint8_t [IEEE80211_ADDR_LEN]); 135b032f27cSSam Leffler static void ath_vap_delete(struct ieee80211vap *); 1365591b213SSam Leffler static void ath_init(void *); 137c42a7b7eSSam Leffler static void ath_stop_locked(struct ifnet *); 1385591b213SSam Leffler static void ath_stop(struct ifnet *); 1395591b213SSam Leffler static void ath_start(struct ifnet *); 140b032f27cSSam Leffler static int ath_reset_vap(struct ieee80211vap *, u_long); 1415591b213SSam Leffler static int ath_media_change(struct ifnet *); 1422e986da5SSam Leffler static void ath_watchdog(void *); 1435591b213SSam Leffler static int ath_ioctl(struct ifnet *, u_long, caddr_t); 1445591b213SSam Leffler static void ath_fatal_proc(void *, int); 145b032f27cSSam Leffler static void ath_bmiss_vap(struct ieee80211vap *); 1465591b213SSam Leffler static void ath_bmiss_proc(void *, int); 147b032f27cSSam Leffler static void ath_key_update_begin(struct ieee80211vap *); 148b032f27cSSam Leffler static void ath_key_update_end(struct ieee80211vap *); 149b032f27cSSam Leffler static void ath_update_mcast(struct ifnet *); 150b032f27cSSam Leffler static void ath_update_promisc(struct ifnet *); 1515591b213SSam Leffler static void ath_mode_init(struct ath_softc *); 152c42a7b7eSSam Leffler static void ath_setslottime(struct ath_softc *); 153c42a7b7eSSam Leffler static void ath_updateslot(struct ifnet *); 15480d2765fSSam Leffler static int ath_beaconq_setup(struct ath_hal *); 1555591b213SSam Leffler static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); 156b032f27cSSam Leffler static void ath_beacon_update(struct ieee80211vap *, int item); 157c42a7b7eSSam Leffler static void ath_beacon_setup(struct ath_softc *, struct ath_buf *); 1585591b213SSam Leffler static void ath_beacon_proc(void *, int); 159b032f27cSSam Leffler static struct ath_buf *ath_beacon_generate(struct ath_softc *, 160b032f27cSSam Leffler struct ieee80211vap *); 161c42a7b7eSSam Leffler static void ath_bstuck_proc(void *, int); 162b032f27cSSam Leffler static void ath_beacon_return(struct ath_softc *, struct ath_buf *); 1635591b213SSam Leffler static void ath_beacon_free(struct ath_softc *); 164b032f27cSSam Leffler static void ath_beacon_config(struct ath_softc *, struct ieee80211vap *); 165c42a7b7eSSam Leffler static void ath_descdma_cleanup(struct ath_softc *sc, 166c42a7b7eSSam Leffler struct ath_descdma *, ath_bufhead *); 1675591b213SSam Leffler static int ath_desc_alloc(struct ath_softc *); 1685591b213SSam Leffler static void ath_desc_free(struct ath_softc *); 16938c208f8SSam Leffler static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, 17038c208f8SSam Leffler const uint8_t [IEEE80211_ADDR_LEN]); 1714afa805eSAdrian Chadd static void ath_node_cleanup(struct ieee80211_node *); 172c42a7b7eSSam Leffler static void ath_node_free(struct ieee80211_node *); 17368e8e04eSSam Leffler static void ath_node_getsignal(const struct ieee80211_node *, 17468e8e04eSSam Leffler int8_t *, int8_t *); 1755591b213SSam Leffler static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); 176b032f27cSSam Leffler static void ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, 1775463c4a4SSam Leffler int subtype, int rssi, int nf); 178c42a7b7eSSam Leffler static void ath_setdefantenna(struct ath_softc *, u_int); 17996ff485dSAdrian Chadd static void ath_rx_proc(struct ath_softc *sc, int); 18096ff485dSAdrian Chadd static void ath_rx_tasklet(void *, int); 181622b3fd2SSam Leffler static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 182c42a7b7eSSam Leffler static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 183c42a7b7eSSam Leffler static int ath_tx_setup(struct ath_softc *, int, int); 184c42a7b7eSSam Leffler static int ath_wme_update(struct ieee80211com *); 185c42a7b7eSSam Leffler static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 186c42a7b7eSSam Leffler static void ath_tx_cleanup(struct ath_softc *); 187c42a7b7eSSam Leffler static void ath_tx_proc_q0(void *, int); 188c42a7b7eSSam Leffler static void ath_tx_proc_q0123(void *, int); 1895591b213SSam Leffler static void ath_tx_proc(void *, int); 1905591b213SSam Leffler static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 191517526efSAdrian Chadd static void ath_draintxq(struct ath_softc *, ATH_RESET_TYPE reset_type); 1929a842e8bSAdrian Chadd static void ath_stoprecv(struct ath_softc *, int); 1935591b213SSam Leffler static int ath_startrecv(struct ath_softc *); 194c42a7b7eSSam Leffler static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 19568e8e04eSSam Leffler static void ath_scan_start(struct ieee80211com *); 19668e8e04eSSam Leffler static void ath_scan_end(struct ieee80211com *); 19768e8e04eSSam Leffler static void ath_set_channel(struct ieee80211com *); 1985591b213SSam Leffler static void ath_calibrate(void *); 199b032f27cSSam Leffler static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); 200e8fd88a3SSam Leffler static void ath_setup_stationkey(struct ieee80211_node *); 201e9962332SSam Leffler static void ath_newassoc(struct ieee80211_node *, int); 202b032f27cSSam Leffler static int ath_setregdomain(struct ieee80211com *, 203b032f27cSSam Leffler struct ieee80211_regdomain *, int, 204b032f27cSSam Leffler struct ieee80211_channel []); 2055fe9f044SSam Leffler static void ath_getradiocaps(struct ieee80211com *, int, int *, 206b032f27cSSam Leffler struct ieee80211_channel []); 207b032f27cSSam Leffler static int ath_getchannels(struct ath_softc *); 2085591b213SSam Leffler 209c42a7b7eSSam Leffler static int ath_rate_setup(struct ath_softc *, u_int mode); 2105591b213SSam Leffler static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 211c42a7b7eSSam Leffler 212c42a7b7eSSam Leffler static void ath_announce(struct ath_softc *); 2135591b213SSam Leffler 21448237774SAdrian Chadd static void ath_dfs_tasklet(void *, int); 21548237774SAdrian Chadd 216584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 21710ad9a77SSam Leffler static void ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, 21810ad9a77SSam Leffler u_int32_t bintval); 21910ad9a77SSam Leffler static void ath_tdma_bintvalsetup(struct ath_softc *sc, 22010ad9a77SSam Leffler const struct ieee80211_tdma_state *tdma); 22110ad9a77SSam Leffler static void ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap); 22210ad9a77SSam Leffler static void ath_tdma_update(struct ieee80211_node *ni, 2232bc3ce77SSam Leffler const struct ieee80211_tdma_param *tdma, int); 22410ad9a77SSam Leffler static void ath_tdma_beacon_send(struct ath_softc *sc, 22510ad9a77SSam Leffler struct ieee80211vap *vap); 22610ad9a77SSam Leffler 22710ad9a77SSam Leffler #define TDMA_EP_MULTIPLIER (1<<10) /* pow2 to optimize out * and / */ 22810ad9a77SSam Leffler #define TDMA_LPF_LEN 6 22910ad9a77SSam Leffler #define TDMA_DUMMY_MARKER 0x127 23010ad9a77SSam Leffler #define TDMA_EP_MUL(x, mul) ((x) * (mul)) 23110ad9a77SSam Leffler #define TDMA_IN(x) (TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER)) 23210ad9a77SSam Leffler #define TDMA_LPF(x, y, len) \ 23310ad9a77SSam Leffler ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y)) 23410ad9a77SSam Leffler #define TDMA_SAMPLE(x, y) do { \ 23510ad9a77SSam Leffler x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN); \ 23610ad9a77SSam Leffler } while (0) 23710ad9a77SSam Leffler #define TDMA_EP_RND(x,mul) \ 23810ad9a77SSam Leffler ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 23910ad9a77SSam Leffler #define TDMA_AVG(x) TDMA_EP_RND(x, TDMA_EP_MULTIPLIER) 240584f7327SSam Leffler #endif /* IEEE80211_SUPPORT_TDMA */ 24110ad9a77SSam Leffler 2425591b213SSam Leffler SYSCTL_DECL(_hw_ath); 2435591b213SSam Leffler 2445591b213SSam Leffler /* XXX validate sysctl values */ 2452dc7fcc4SSam Leffler static int ath_longcalinterval = 30; /* long cals every 30 secs */ 2462dc7fcc4SSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, 2472dc7fcc4SSam Leffler 0, "long chip calibration interval (secs)"); 2482dc7fcc4SSam Leffler static int ath_shortcalinterval = 100; /* short cals every 100 ms */ 2492dc7fcc4SSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, 2502dc7fcc4SSam Leffler 0, "short chip calibration interval (msecs)"); 2512dc7fcc4SSam Leffler static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ 2522dc7fcc4SSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, 2532dc7fcc4SSam Leffler 0, "reset chip calibration results (secs)"); 254a108ab63SAdrian Chadd static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ 255a108ab63SAdrian Chadd SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, 256a108ab63SAdrian Chadd 0, "ANI calibration (msecs)"); 2575591b213SSam Leffler 258e2d787faSSam Leffler static int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 259aaa70f2fSSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf, 260e2d787faSSam Leffler 0, "rx buffers allocated"); 261e2d787faSSam Leffler TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf); 262e2d787faSSam Leffler static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 263aaa70f2fSSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf, 264e2d787faSSam Leffler 0, "tx buffers allocated"); 265e2d787faSSam Leffler TUNABLE_INT("hw.ath.txbuf", &ath_txbuf); 266e2d787faSSam Leffler 267a32ac9d3SSam Leffler static int ath_bstuck_threshold = 4; /* max missed beacons */ 268a32ac9d3SSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, 269a32ac9d3SSam Leffler 0, "max missed beacon xmits before chip reset"); 270a32ac9d3SSam Leffler 2716b349e5aSAdrian Chadd MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 272c42a7b7eSSam Leffler 27367397d39SAdrian Chadd #define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) 27467397d39SAdrian Chadd #define HAL_MODE_HT40 \ 27567397d39SAdrian Chadd (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ 27667397d39SAdrian Chadd HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) 2775591b213SSam Leffler int 2785591b213SSam Leffler ath_attach(u_int16_t devid, struct ath_softc *sc) 2795591b213SSam Leffler { 280fc74a9f9SBrooks Davis struct ifnet *ifp; 281b032f27cSSam Leffler struct ieee80211com *ic; 282fc74a9f9SBrooks Davis struct ath_hal *ah = NULL; 2835591b213SSam Leffler HAL_STATUS status; 284c42a7b7eSSam Leffler int error = 0, i; 285411373ebSSam Leffler u_int wmodes; 28629aca940SSam Leffler uint8_t macaddr[IEEE80211_ADDR_LEN]; 287a865860dSAdrian Chadd int rx_chainmask, tx_chainmask; 2885591b213SSam Leffler 289c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 2905591b213SSam Leffler 291b032f27cSSam Leffler ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 292fc74a9f9SBrooks Davis if (ifp == NULL) { 293fc74a9f9SBrooks Davis device_printf(sc->sc_dev, "can not if_alloc()\n"); 294fc74a9f9SBrooks Davis error = ENOSPC; 295fc74a9f9SBrooks Davis goto bad; 296fc74a9f9SBrooks Davis } 297b032f27cSSam Leffler ic = ifp->if_l2com; 298fc74a9f9SBrooks Davis 2995591b213SSam Leffler /* set these up early for if_printf use */ 3009bf40edeSBrooks Davis if_initname(ifp, device_get_name(sc->sc_dev), 3019bf40edeSBrooks Davis device_get_unit(sc->sc_dev)); 3025591b213SSam Leffler 3037e97436bSAdrian Chadd ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 3047e97436bSAdrian Chadd sc->sc_eepromdata, &status); 3055591b213SSam Leffler if (ah == NULL) { 3065591b213SSam Leffler if_printf(ifp, "unable to attach hardware; HAL status %u\n", 3075591b213SSam Leffler status); 3085591b213SSam Leffler error = ENXIO; 3095591b213SSam Leffler goto bad; 3105591b213SSam Leffler } 3115591b213SSam Leffler sc->sc_ah = ah; 312b58b3803SSam Leffler sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 3133297be13SSam Leffler #ifdef ATH_DEBUG 3143297be13SSam Leffler sc->sc_debug = ath_debug; 3153297be13SSam Leffler #endif 3165591b213SSam Leffler 3175591b213SSam Leffler /* 318c42a7b7eSSam Leffler * Check if the MAC has multi-rate retry support. 319c42a7b7eSSam Leffler * We do this by trying to setup a fake extended 320c42a7b7eSSam Leffler * descriptor. MAC's that don't have support will 321c42a7b7eSSam Leffler * return false w/o doing anything. MAC's that do 322c42a7b7eSSam Leffler * support it will return true w/o doing anything. 323c42a7b7eSSam Leffler */ 324c42a7b7eSSam Leffler sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 325c42a7b7eSSam Leffler 326c42a7b7eSSam Leffler /* 327c42a7b7eSSam Leffler * Check if the device has hardware counters for PHY 328c42a7b7eSSam Leffler * errors. If so we need to enable the MIB interrupt 329c42a7b7eSSam Leffler * so we can act on stat triggers. 330c42a7b7eSSam Leffler */ 331c42a7b7eSSam Leffler if (ath_hal_hwphycounters(ah)) 332c42a7b7eSSam Leffler sc->sc_needmib = 1; 333c42a7b7eSSam Leffler 334c42a7b7eSSam Leffler /* 335c42a7b7eSSam Leffler * Get the hardware key cache size. 336c42a7b7eSSam Leffler */ 337c42a7b7eSSam Leffler sc->sc_keymax = ath_hal_keycachesize(ah); 338e8fd88a3SSam Leffler if (sc->sc_keymax > ATH_KEYMAX) { 339e8fd88a3SSam Leffler if_printf(ifp, "Warning, using only %u of %u key cache slots\n", 340e8fd88a3SSam Leffler ATH_KEYMAX, sc->sc_keymax); 341e8fd88a3SSam Leffler sc->sc_keymax = ATH_KEYMAX; 342c42a7b7eSSam Leffler } 343c42a7b7eSSam Leffler /* 344c42a7b7eSSam Leffler * Reset the key cache since some parts do not 345c42a7b7eSSam Leffler * reset the contents on initial power up. 346c42a7b7eSSam Leffler */ 347c42a7b7eSSam Leffler for (i = 0; i < sc->sc_keymax; i++) 348c42a7b7eSSam Leffler ath_hal_keyreset(ah, i); 349c42a7b7eSSam Leffler 350c42a7b7eSSam Leffler /* 351b032f27cSSam Leffler * Collect the default channel list. 3525591b213SSam Leffler */ 353b032f27cSSam Leffler error = ath_getchannels(sc); 3545591b213SSam Leffler if (error != 0) 3555591b213SSam Leffler goto bad; 3565591b213SSam Leffler 3575591b213SSam Leffler /* 3585591b213SSam Leffler * Setup rate tables for all potential media types. 3595591b213SSam Leffler */ 3605591b213SSam Leffler ath_rate_setup(sc, IEEE80211_MODE_11A); 3615591b213SSam Leffler ath_rate_setup(sc, IEEE80211_MODE_11B); 3625591b213SSam Leffler ath_rate_setup(sc, IEEE80211_MODE_11G); 363c42a7b7eSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 364c42a7b7eSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 36568e8e04eSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); 36668e8e04eSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_11NA); 36768e8e04eSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_11NG); 368724c193aSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_HALF); 369724c193aSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 370aaa70f2fSSam Leffler 371c42a7b7eSSam Leffler /* NB: setup here so ath_rate_update is happy */ 372c42a7b7eSSam Leffler ath_setcurmode(sc, IEEE80211_MODE_11A); 3735591b213SSam Leffler 374c42a7b7eSSam Leffler /* 375c42a7b7eSSam Leffler * Allocate tx+rx descriptors and populate the lists. 376c42a7b7eSSam Leffler */ 3775591b213SSam Leffler error = ath_desc_alloc(sc); 3785591b213SSam Leffler if (error != 0) { 3795591b213SSam Leffler if_printf(ifp, "failed to allocate descriptors: %d\n", error); 3805591b213SSam Leffler goto bad; 3815591b213SSam Leffler } 3822e986da5SSam Leffler callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0); 3832e986da5SSam Leffler callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0); 3845591b213SSam Leffler 385f0b2a0beSSam Leffler ATH_TXBUF_LOCK_INIT(sc); 3865591b213SSam Leffler 3870bbf5441SSam Leffler sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, 3880bbf5441SSam Leffler taskqueue_thread_enqueue, &sc->sc_tq); 3890bbf5441SSam Leffler taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, 3900bbf5441SSam Leffler "%s taskq", ifp->if_xname); 3910bbf5441SSam Leffler 39296ff485dSAdrian Chadd TASK_INIT(&sc->sc_rxtask, 0, ath_rx_tasklet, sc); 3935591b213SSam Leffler TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 394c42a7b7eSSam Leffler TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 3955591b213SSam Leffler 3965591b213SSam Leffler /* 397c42a7b7eSSam Leffler * Allocate hardware transmit queues: one queue for 398c42a7b7eSSam Leffler * beacon frames and one data queue for each QoS 3994fa8d4efSDaniel Eischen * priority. Note that the hal handles resetting 400c42a7b7eSSam Leffler * these queues at the needed time. 401c42a7b7eSSam Leffler * 402c42a7b7eSSam Leffler * XXX PS-Poll 4035591b213SSam Leffler */ 40480d2765fSSam Leffler sc->sc_bhalq = ath_beaconq_setup(ah); 4055591b213SSam Leffler if (sc->sc_bhalq == (u_int) -1) { 4065591b213SSam Leffler if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 407c42a7b7eSSam Leffler error = EIO; 408b28b4653SSam Leffler goto bad2; 4095591b213SSam Leffler } 410c42a7b7eSSam Leffler sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 411c42a7b7eSSam Leffler if (sc->sc_cabq == NULL) { 412c42a7b7eSSam Leffler if_printf(ifp, "unable to setup CAB xmit queue!\n"); 413c42a7b7eSSam Leffler error = EIO; 414c42a7b7eSSam Leffler goto bad2; 415c42a7b7eSSam Leffler } 416c42a7b7eSSam Leffler /* NB: insure BK queue is the lowest priority h/w queue */ 417c42a7b7eSSam Leffler if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 418c42a7b7eSSam Leffler if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 419c42a7b7eSSam Leffler ieee80211_wme_acnames[WME_AC_BK]); 420c42a7b7eSSam Leffler error = EIO; 421c42a7b7eSSam Leffler goto bad2; 422c42a7b7eSSam Leffler } 423c42a7b7eSSam Leffler if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 424c42a7b7eSSam Leffler !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 425c42a7b7eSSam Leffler !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 426c42a7b7eSSam Leffler /* 427c42a7b7eSSam Leffler * Not enough hardware tx queues to properly do WME; 428c42a7b7eSSam Leffler * just punt and assign them all to the same h/w queue. 429c42a7b7eSSam Leffler * We could do a better job of this if, for example, 430c42a7b7eSSam Leffler * we allocate queues when we switch from station to 431c42a7b7eSSam Leffler * AP mode. 432c42a7b7eSSam Leffler */ 433c42a7b7eSSam Leffler if (sc->sc_ac2q[WME_AC_VI] != NULL) 434c42a7b7eSSam Leffler ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 435c42a7b7eSSam Leffler if (sc->sc_ac2q[WME_AC_BE] != NULL) 436c42a7b7eSSam Leffler ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 437c42a7b7eSSam Leffler sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 438c42a7b7eSSam Leffler sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 439c42a7b7eSSam Leffler sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 440c42a7b7eSSam Leffler } 441c42a7b7eSSam Leffler 442c42a7b7eSSam Leffler /* 443c42a7b7eSSam Leffler * Special case certain configurations. Note the 444c42a7b7eSSam Leffler * CAB queue is handled by these specially so don't 445c42a7b7eSSam Leffler * include them when checking the txq setup mask. 446c42a7b7eSSam Leffler */ 447c42a7b7eSSam Leffler switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 448c42a7b7eSSam Leffler case 0x01: 449c42a7b7eSSam Leffler TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 450c42a7b7eSSam Leffler break; 451c42a7b7eSSam Leffler case 0x0f: 452c42a7b7eSSam Leffler TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 453c42a7b7eSSam Leffler break; 454c42a7b7eSSam Leffler default: 455c42a7b7eSSam Leffler TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 456c42a7b7eSSam Leffler break; 457c42a7b7eSSam Leffler } 458c42a7b7eSSam Leffler 459c42a7b7eSSam Leffler /* 460c42a7b7eSSam Leffler * Setup rate control. Some rate control modules 461c42a7b7eSSam Leffler * call back to change the anntena state so expose 462c42a7b7eSSam Leffler * the necessary entry points. 463c42a7b7eSSam Leffler * XXX maybe belongs in struct ath_ratectrl? 464c42a7b7eSSam Leffler */ 465c42a7b7eSSam Leffler sc->sc_setdefantenna = ath_setdefantenna; 466c42a7b7eSSam Leffler sc->sc_rc = ath_rate_attach(sc); 467c42a7b7eSSam Leffler if (sc->sc_rc == NULL) { 468c42a7b7eSSam Leffler error = EIO; 469c42a7b7eSSam Leffler goto bad2; 470c42a7b7eSSam Leffler } 471c42a7b7eSSam Leffler 47248237774SAdrian Chadd /* Attach DFS module */ 47348237774SAdrian Chadd if (! ath_dfs_attach(sc)) { 4747e97436bSAdrian Chadd device_printf(sc->sc_dev, 4757e97436bSAdrian Chadd "%s: unable to attach DFS\n", __func__); 47648237774SAdrian Chadd error = EIO; 47748237774SAdrian Chadd goto bad2; 47848237774SAdrian Chadd } 47948237774SAdrian Chadd 48048237774SAdrian Chadd /* Start DFS processing tasklet */ 48148237774SAdrian Chadd TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); 48248237774SAdrian Chadd 4833440495aSAdrian Chadd /* Configure LED state */ 4843e50ec2cSSam Leffler sc->sc_blinking = 0; 485c42a7b7eSSam Leffler sc->sc_ledstate = 1; 4863e50ec2cSSam Leffler sc->sc_ledon = 0; /* low true */ 4873e50ec2cSSam Leffler sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 4883e50ec2cSSam Leffler callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE); 4893440495aSAdrian Chadd 4903440495aSAdrian Chadd /* 4913440495aSAdrian Chadd * Don't setup hardware-based blinking. 4923440495aSAdrian Chadd * 4933440495aSAdrian Chadd * Although some NICs may have this configured in the 4943440495aSAdrian Chadd * default reset register values, the user may wish 4953440495aSAdrian Chadd * to alter which pins have which function. 4963440495aSAdrian Chadd * 4973440495aSAdrian Chadd * The reference driver attaches the MAC network LED to GPIO1 and 4983440495aSAdrian Chadd * the MAC power LED to GPIO2. However, the DWA-552 cardbus 4993440495aSAdrian Chadd * NIC has these reversed. 5003440495aSAdrian Chadd */ 5013440495aSAdrian Chadd sc->sc_hardled = (1 == 0); 5023440495aSAdrian Chadd sc->sc_led_net_pin = -1; 5033440495aSAdrian Chadd sc->sc_led_pwr_pin = -1; 504c42a7b7eSSam Leffler /* 505c42a7b7eSSam Leffler * Auto-enable soft led processing for IBM cards and for 506c42a7b7eSSam Leffler * 5211 minipci cards. Users can also manually enable/disable 507c42a7b7eSSam Leffler * support with a sysctl. 508c42a7b7eSSam Leffler */ 509c42a7b7eSSam Leffler sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 5106558ffd9SAdrian Chadd ath_led_config(sc); 511a497cd88SAdrian Chadd ath_hal_setledstate(ah, HAL_LED_INIT); 5125591b213SSam Leffler 5135591b213SSam Leffler ifp->if_softc = sc; 5145591b213SSam Leffler ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 5155591b213SSam Leffler ifp->if_start = ath_start; 5165591b213SSam Leffler ifp->if_ioctl = ath_ioctl; 5175591b213SSam Leffler ifp->if_init = ath_init; 518e50d35e6SMaxim Sobolev IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 519e50d35e6SMaxim Sobolev ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 520154b8df2SMax Laier IFQ_SET_READY(&ifp->if_snd); 5215591b213SSam Leffler 522c42a7b7eSSam Leffler ic->ic_ifp = ifp; 5235591b213SSam Leffler /* XXX not right but it's not used anywhere important */ 5245591b213SSam Leffler ic->ic_phytype = IEEE80211_T_OFDM; 5255591b213SSam Leffler ic->ic_opmode = IEEE80211_M_STA; 526c42a7b7eSSam Leffler ic->ic_caps = 527c43feedeSSam Leffler IEEE80211_C_STA /* station mode */ 528c43feedeSSam Leffler | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 529fe32c3efSSam Leffler | IEEE80211_C_HOSTAP /* hostap mode */ 530fe32c3efSSam Leffler | IEEE80211_C_MONITOR /* monitor mode */ 5317a04dc27SSam Leffler | IEEE80211_C_AHDEMO /* adhoc demo mode */ 532b032f27cSSam Leffler | IEEE80211_C_WDS /* 4-address traffic works */ 53359aa14a9SRui Paulo | IEEE80211_C_MBSS /* mesh point link mode */ 534fe32c3efSSam Leffler | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 535c42a7b7eSSam Leffler | IEEE80211_C_SHSLOT /* short slot time supported */ 536c42a7b7eSSam Leffler | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 53768e8e04eSSam Leffler | IEEE80211_C_BGSCAN /* capable of bg scanning */ 53868e8e04eSSam Leffler | IEEE80211_C_TXFRAG /* handle tx frags */ 53910dc8de4SAdrian Chadd #ifdef ATH_ENABLE_DFS 5407e97436bSAdrian Chadd | IEEE80211_C_DFS /* Enable radar detection */ 54110dc8de4SAdrian Chadd #endif 54201e7e035SSam Leffler ; 543c42a7b7eSSam Leffler /* 544c42a7b7eSSam Leffler * Query the hal to figure out h/w crypto support. 545c42a7b7eSSam Leffler */ 546c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 547b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; 548c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 549b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; 550c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 551b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 552c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 553b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; 554c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 555b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; 556c42a7b7eSSam Leffler /* 557c42a7b7eSSam Leffler * Check if h/w does the MIC and/or whether the 558c42a7b7eSSam Leffler * separate key cache entries are required to 559c42a7b7eSSam Leffler * handle both tx+rx MIC keys. 560c42a7b7eSSam Leffler */ 561c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 562b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 5635901d2d3SSam Leffler /* 5645901d2d3SSam Leffler * If the h/w supports storing tx+rx MIC keys 5655901d2d3SSam Leffler * in one cache slot automatically enable use. 5665901d2d3SSam Leffler */ 5675901d2d3SSam Leffler if (ath_hal_hastkipsplit(ah) || 5685901d2d3SSam Leffler !ath_hal_settkipsplit(ah, AH_FALSE)) 569c42a7b7eSSam Leffler sc->sc_splitmic = 1; 570b032f27cSSam Leffler /* 571b032f27cSSam Leffler * If the h/w can do TKIP MIC together with WME then 572b032f27cSSam Leffler * we use it; otherwise we force the MIC to be done 573b032f27cSSam Leffler * in software by the net80211 layer. 574b032f27cSSam Leffler */ 575b032f27cSSam Leffler if (ath_hal_haswmetkipmic(ah)) 576b032f27cSSam Leffler sc->sc_wmetkipmic = 1; 577c42a7b7eSSam Leffler } 578e8fd88a3SSam Leffler sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 5799ac01d39SRui Paulo /* 5801ac5dac2SRui Paulo * Check for multicast key search support. 5819ac01d39SRui Paulo */ 5829ac01d39SRui Paulo if (ath_hal_hasmcastkeysearch(sc->sc_ah) && 5839ac01d39SRui Paulo !ath_hal_getmcastkeysearch(sc->sc_ah)) { 5849ac01d39SRui Paulo ath_hal_setmcastkeysearch(sc->sc_ah, 1); 5859ac01d39SRui Paulo } 586e8fd88a3SSam Leffler sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 587c42a7b7eSSam Leffler /* 5885901d2d3SSam Leffler * Mark key cache slots associated with global keys 5895901d2d3SSam Leffler * as in use. If we knew TKIP was not to be used we 5905901d2d3SSam Leffler * could leave the +32, +64, and +32+64 slots free. 5915901d2d3SSam Leffler */ 5925901d2d3SSam Leffler for (i = 0; i < IEEE80211_WEP_NKID; i++) { 5935901d2d3SSam Leffler setbit(sc->sc_keymap, i); 5945901d2d3SSam Leffler setbit(sc->sc_keymap, i+64); 5955901d2d3SSam Leffler if (sc->sc_splitmic) { 5965901d2d3SSam Leffler setbit(sc->sc_keymap, i+32); 5975901d2d3SSam Leffler setbit(sc->sc_keymap, i+32+64); 5985901d2d3SSam Leffler } 5995901d2d3SSam Leffler } 6005901d2d3SSam Leffler /* 601c42a7b7eSSam Leffler * TPC support can be done either with a global cap or 602c42a7b7eSSam Leffler * per-packet support. The latter is not available on 603c42a7b7eSSam Leffler * all parts. We're a bit pedantic here as all parts 604c42a7b7eSSam Leffler * support a global cap. 605c42a7b7eSSam Leffler */ 606c59005e9SSam Leffler if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 607c42a7b7eSSam Leffler ic->ic_caps |= IEEE80211_C_TXPMGT; 608c42a7b7eSSam Leffler 609c42a7b7eSSam Leffler /* 610c42a7b7eSSam Leffler * Mark WME capability only if we have sufficient 611c42a7b7eSSam Leffler * hardware queues to do proper priority scheduling. 612c42a7b7eSSam Leffler */ 613c42a7b7eSSam Leffler if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 614c42a7b7eSSam Leffler ic->ic_caps |= IEEE80211_C_WME; 615c42a7b7eSSam Leffler /* 616e8fd88a3SSam Leffler * Check for misc other capabilities. 617c42a7b7eSSam Leffler */ 618c42a7b7eSSam Leffler if (ath_hal_hasbursting(ah)) 619c42a7b7eSSam Leffler ic->ic_caps |= IEEE80211_C_BURST; 620b032f27cSSam Leffler sc->sc_hasbmask = ath_hal_hasbssidmask(ah); 62159aa14a9SRui Paulo sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); 622b032f27cSSam Leffler sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); 6238a2a6beeSAdrian Chadd sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); 624fc4de9b7SAdrian Chadd sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah); 62568e8e04eSSam Leffler if (ath_hal_hasfastframes(ah)) 62668e8e04eSSam Leffler ic->ic_caps |= IEEE80211_C_FF; 62759efa8b5SSam Leffler wmodes = ath_hal_getwirelessmodes(ah); 628411373ebSSam Leffler if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) 62968e8e04eSSam Leffler ic->ic_caps |= IEEE80211_C_TURBOP; 630584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 63110ad9a77SSam Leffler if (ath_hal_macversion(ah) > 0x78) { 63210ad9a77SSam Leffler ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ 63310ad9a77SSam Leffler ic->ic_tdma_update = ath_tdma_update; 63410ad9a77SSam Leffler } 63510ad9a77SSam Leffler #endif 63667397d39SAdrian Chadd 63767397d39SAdrian Chadd /* 638a865860dSAdrian Chadd * Allow the TX and RX chainmasks to be overridden by 639a865860dSAdrian Chadd * environment variables and/or device.hints. 640a865860dSAdrian Chadd * 641a865860dSAdrian Chadd * This must be done early - before the hardware is 642a865860dSAdrian Chadd * calibrated or before the 802.11n stream calculation 643a865860dSAdrian Chadd * is done. 644a865860dSAdrian Chadd */ 645a865860dSAdrian Chadd if (resource_int_value(device_get_name(sc->sc_dev), 646a865860dSAdrian Chadd device_get_unit(sc->sc_dev), "rx_chainmask", 647a865860dSAdrian Chadd &rx_chainmask) == 0) { 648a865860dSAdrian Chadd device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n", 649a865860dSAdrian Chadd rx_chainmask); 650a865860dSAdrian Chadd (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask); 651a865860dSAdrian Chadd } 652a865860dSAdrian Chadd if (resource_int_value(device_get_name(sc->sc_dev), 653a865860dSAdrian Chadd device_get_unit(sc->sc_dev), "tx_chainmask", 654a865860dSAdrian Chadd &tx_chainmask) == 0) { 655a865860dSAdrian Chadd device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n", 656a865860dSAdrian Chadd tx_chainmask); 657dc8552d5SAdrian Chadd (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask); 658a865860dSAdrian Chadd } 659a865860dSAdrian Chadd 660a865860dSAdrian Chadd /* 66167397d39SAdrian Chadd * The if_ath 11n support is completely not ready for normal use. 66267397d39SAdrian Chadd * Enabling this option will likely break everything and everything. 66367397d39SAdrian Chadd * Don't think of doing that unless you know what you're doing. 66467397d39SAdrian Chadd */ 66567397d39SAdrian Chadd 6668fd67f92SAdrian Chadd #ifdef ATH_ENABLE_11N 66767397d39SAdrian Chadd /* 66867397d39SAdrian Chadd * Query HT capabilities 66967397d39SAdrian Chadd */ 67067397d39SAdrian Chadd if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && 67167397d39SAdrian Chadd (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { 67267397d39SAdrian Chadd int rxs, txs; 67367397d39SAdrian Chadd 67467397d39SAdrian Chadd device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); 67567397d39SAdrian Chadd ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ 67667397d39SAdrian Chadd | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ 67767397d39SAdrian Chadd | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ 6787e97436bSAdrian Chadd | IEEE80211_HTCAP_MAXAMSDU_3839 6797e97436bSAdrian Chadd /* max A-MSDU length */ 68067397d39SAdrian Chadd | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ 68167397d39SAdrian Chadd ; 68267397d39SAdrian Chadd 68376355edbSAdrian Chadd /* 68476355edbSAdrian Chadd * Enable short-GI for HT20 only if the hardware 68576355edbSAdrian Chadd * advertises support. 68676355edbSAdrian Chadd * Notably, anything earlier than the AR9287 doesn't. 68776355edbSAdrian Chadd */ 68876355edbSAdrian Chadd if ((ath_hal_getcapability(ah, 68976355edbSAdrian Chadd HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && 69076355edbSAdrian Chadd (wmodes & HAL_MODE_HT20)) { 69176355edbSAdrian Chadd device_printf(sc->sc_dev, 69276355edbSAdrian Chadd "[HT] enabling short-GI in 20MHz mode\n"); 69376355edbSAdrian Chadd ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; 69476355edbSAdrian Chadd } 69576355edbSAdrian Chadd 69667397d39SAdrian Chadd if (wmodes & HAL_MODE_HT40) 69767397d39SAdrian Chadd ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 69867397d39SAdrian Chadd | IEEE80211_HTCAP_SHORTGI40; 69967397d39SAdrian Chadd 70067397d39SAdrian Chadd /* 7017e97436bSAdrian Chadd * TX/RX streams need to be taken into account when 7027e97436bSAdrian Chadd * negotiating which MCS rates it'll receive and 70367397d39SAdrian Chadd * what MCS rates are available for TX. 70467397d39SAdrian Chadd */ 70554517070SAdrian Chadd (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs); 70654517070SAdrian Chadd (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs); 70767397d39SAdrian Chadd 70867397d39SAdrian Chadd ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); 70967397d39SAdrian Chadd ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); 71067397d39SAdrian Chadd 71167397d39SAdrian Chadd ic->ic_txstream = txs; 71267397d39SAdrian Chadd ic->ic_rxstream = rxs; 71367397d39SAdrian Chadd 7147e97436bSAdrian Chadd device_printf(sc->sc_dev, 7157e97436bSAdrian Chadd "[HT] %d RX streams; %d TX streams\n", rxs, txs); 71667397d39SAdrian Chadd } 71767397d39SAdrian Chadd #endif 71867397d39SAdrian Chadd 719c42a7b7eSSam Leffler /* 720ddbe3036SAdrian Chadd * Check if the hardware requires PCI register serialisation. 721ddbe3036SAdrian Chadd * Some of the Owl based MACs require this. 722ddbe3036SAdrian Chadd */ 723ddbe3036SAdrian Chadd if (mp_ncpus > 1 && 724ddbe3036SAdrian Chadd ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR, 725ddbe3036SAdrian Chadd 0, NULL) == HAL_OK) { 726ddbe3036SAdrian Chadd sc->sc_ah->ah_config.ah_serialise_reg_war = 1; 7277e97436bSAdrian Chadd device_printf(sc->sc_dev, 7287e97436bSAdrian Chadd "Enabling register serialisation\n"); 729ddbe3036SAdrian Chadd } 730ddbe3036SAdrian Chadd 731ddbe3036SAdrian Chadd /* 732c42a7b7eSSam Leffler * Indicate we need the 802.11 header padded to a 733c42a7b7eSSam Leffler * 32-bit boundary for 4-address and QoS frames. 734c42a7b7eSSam Leffler */ 735c42a7b7eSSam Leffler ic->ic_flags |= IEEE80211_F_DATAPAD; 736c42a7b7eSSam Leffler 737c42a7b7eSSam Leffler /* 738c42a7b7eSSam Leffler * Query the hal about antenna support. 739c42a7b7eSSam Leffler */ 740c42a7b7eSSam Leffler sc->sc_defant = ath_hal_getdefantenna(ah); 741c42a7b7eSSam Leffler 742c42a7b7eSSam Leffler /* 743c42a7b7eSSam Leffler * Not all chips have the VEOL support we want to 744c42a7b7eSSam Leffler * use with IBSS beacons; check here for it. 745c42a7b7eSSam Leffler */ 746c42a7b7eSSam Leffler sc->sc_hasveol = ath_hal_hasveol(ah); 7475591b213SSam Leffler 7485591b213SSam Leffler /* get mac address from hardware */ 74929aca940SSam Leffler ath_hal_getmac(ah, macaddr); 750b032f27cSSam Leffler if (sc->sc_hasbmask) 751b032f27cSSam Leffler ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); 7525591b213SSam Leffler 753b032f27cSSam Leffler /* NB: used to size node table key mapping array */ 754b032f27cSSam Leffler ic->ic_max_keyix = sc->sc_keymax; 7555591b213SSam Leffler /* call MI attach routine. */ 75629aca940SSam Leffler ieee80211_ifattach(ic, macaddr); 757b032f27cSSam Leffler ic->ic_setregdomain = ath_setregdomain; 758b032f27cSSam Leffler ic->ic_getradiocaps = ath_getradiocaps; 759b032f27cSSam Leffler sc->sc_opmode = HAL_M_STA; 760b032f27cSSam Leffler 7615591b213SSam Leffler /* override default methods */ 762b032f27cSSam Leffler ic->ic_newassoc = ath_newassoc; 763b032f27cSSam Leffler ic->ic_updateslot = ath_updateslot; 764b032f27cSSam Leffler ic->ic_wme.wme_update = ath_wme_update; 765b032f27cSSam Leffler ic->ic_vap_create = ath_vap_create; 766b032f27cSSam Leffler ic->ic_vap_delete = ath_vap_delete; 767b032f27cSSam Leffler ic->ic_raw_xmit = ath_raw_xmit; 768b032f27cSSam Leffler ic->ic_update_mcast = ath_update_mcast; 769b032f27cSSam Leffler ic->ic_update_promisc = ath_update_promisc; 7705591b213SSam Leffler ic->ic_node_alloc = ath_node_alloc; 7711e774079SSam Leffler sc->sc_node_free = ic->ic_node_free; 7725591b213SSam Leffler ic->ic_node_free = ath_node_free; 7734afa805eSAdrian Chadd sc->sc_node_cleanup = ic->ic_node_cleanup; 7744afa805eSAdrian Chadd ic->ic_node_cleanup = ath_node_cleanup; 77568e8e04eSSam Leffler ic->ic_node_getsignal = ath_node_getsignal; 77668e8e04eSSam Leffler ic->ic_scan_start = ath_scan_start; 77768e8e04eSSam Leffler ic->ic_scan_end = ath_scan_end; 77868e8e04eSSam Leffler ic->ic_set_channel = ath_set_channel; 7795591b213SSam Leffler 780eb6f0de0SAdrian Chadd /* 802.11n specific - but just override anyway */ 781eb6f0de0SAdrian Chadd sc->sc_addba_request = ic->ic_addba_request; 782eb6f0de0SAdrian Chadd sc->sc_addba_response = ic->ic_addba_response; 783eb6f0de0SAdrian Chadd sc->sc_addba_stop = ic->ic_addba_stop; 784eb6f0de0SAdrian Chadd sc->sc_bar_response = ic->ic_bar_response; 785eb6f0de0SAdrian Chadd sc->sc_addba_response_timeout = ic->ic_addba_response_timeout; 786eb6f0de0SAdrian Chadd 787eb6f0de0SAdrian Chadd ic->ic_addba_request = ath_addba_request; 788eb6f0de0SAdrian Chadd ic->ic_addba_response = ath_addba_response; 789eb6f0de0SAdrian Chadd ic->ic_addba_response_timeout = ath_addba_response_timeout; 790eb6f0de0SAdrian Chadd ic->ic_addba_stop = ath_addba_stop; 791eb6f0de0SAdrian Chadd ic->ic_bar_response = ath_bar_response; 792eb6f0de0SAdrian Chadd 7935463c4a4SSam Leffler ieee80211_radiotap_attach(ic, 7945463c4a4SSam Leffler &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 7955463c4a4SSam Leffler ATH_TX_RADIOTAP_PRESENT, 7965463c4a4SSam Leffler &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 7975463c4a4SSam Leffler ATH_RX_RADIOTAP_PRESENT); 7985463c4a4SSam Leffler 7994866e6c2SSam Leffler /* 8004866e6c2SSam Leffler * Setup dynamic sysctl's now that country code and 8014866e6c2SSam Leffler * regdomain are available from the hal. 8024866e6c2SSam Leffler */ 8034866e6c2SSam Leffler ath_sysctlattach(sc); 804e8dabfbeSAdrian Chadd ath_sysctl_stats_attach(sc); 80537931a35SAdrian Chadd ath_sysctl_hal_attach(sc); 80673454c73SSam Leffler 807c42a7b7eSSam Leffler if (bootverbose) 808c42a7b7eSSam Leffler ieee80211_announce(ic); 809c42a7b7eSSam Leffler ath_announce(sc); 8105591b213SSam Leffler return 0; 811b28b4653SSam Leffler bad2: 812c42a7b7eSSam Leffler ath_tx_cleanup(sc); 813b28b4653SSam Leffler ath_desc_free(sc); 8145591b213SSam Leffler bad: 8155591b213SSam Leffler if (ah) 8165591b213SSam Leffler ath_hal_detach(ah); 817fc74a9f9SBrooks Davis if (ifp != NULL) 818fc74a9f9SBrooks Davis if_free(ifp); 8195591b213SSam Leffler sc->sc_invalid = 1; 8205591b213SSam Leffler return error; 8215591b213SSam Leffler } 8225591b213SSam Leffler 8235591b213SSam Leffler int 8245591b213SSam Leffler ath_detach(struct ath_softc *sc) 8255591b213SSam Leffler { 826fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 8275591b213SSam Leffler 828c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 829c42a7b7eSSam Leffler __func__, ifp->if_flags); 8305591b213SSam Leffler 831c42a7b7eSSam Leffler /* 832c42a7b7eSSam Leffler * NB: the order of these is important: 83371b85077SSam Leffler * o stop the chip so no more interrupts will fire 834c42a7b7eSSam Leffler * o call the 802.11 layer before detaching the hal to 835c42a7b7eSSam Leffler * insure callbacks into the driver to delete global 836c42a7b7eSSam Leffler * key cache entries can be handled 83771b85077SSam Leffler * o free the taskqueue which drains any pending tasks 838c42a7b7eSSam Leffler * o reclaim the tx queue data structures after calling 839c42a7b7eSSam Leffler * the 802.11 layer as we'll get called back to reclaim 840c42a7b7eSSam Leffler * node state and potentially want to use them 841c42a7b7eSSam Leffler * o to cleanup the tx queues the hal is called, so detach 842c42a7b7eSSam Leffler * it last 843c42a7b7eSSam Leffler * Other than that, it's straightforward... 844c42a7b7eSSam Leffler */ 84571b85077SSam Leffler ath_stop(ifp); 846b032f27cSSam Leffler ieee80211_ifdetach(ifp->if_l2com); 84771b85077SSam Leffler taskqueue_free(sc->sc_tq); 84886e07743SSam Leffler #ifdef ATH_TX99_DIAG 84986e07743SSam Leffler if (sc->sc_tx99 != NULL) 85086e07743SSam Leffler sc->sc_tx99->detach(sc->sc_tx99); 85186e07743SSam Leffler #endif 852c42a7b7eSSam Leffler ath_rate_detach(sc->sc_rc); 85348237774SAdrian Chadd 85448237774SAdrian Chadd ath_dfs_detach(sc); 8555591b213SSam Leffler ath_desc_free(sc); 856c42a7b7eSSam Leffler ath_tx_cleanup(sc); 85771b85077SSam Leffler ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ 858c4c6f08fSRuslan Ermilov if_free(ifp); 859f0b2a0beSSam Leffler 8605591b213SSam Leffler return 0; 8615591b213SSam Leffler } 8625591b213SSam Leffler 863b032f27cSSam Leffler /* 864b032f27cSSam Leffler * MAC address handling for multiple BSS on the same radio. 865b032f27cSSam Leffler * The first vap uses the MAC address from the EEPROM. For 866b032f27cSSam Leffler * subsequent vap's we set the U/L bit (bit 1) in the MAC 867b032f27cSSam Leffler * address and use the next six bits as an index. 868b032f27cSSam Leffler */ 869b032f27cSSam Leffler static void 870b032f27cSSam Leffler assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) 871b032f27cSSam Leffler { 872b032f27cSSam Leffler int i; 873b032f27cSSam Leffler 874b032f27cSSam Leffler if (clone && sc->sc_hasbmask) { 875b032f27cSSam Leffler /* NB: we only do this if h/w supports multiple bssid */ 876b032f27cSSam Leffler for (i = 0; i < 8; i++) 877b032f27cSSam Leffler if ((sc->sc_bssidmask & (1<<i)) == 0) 878b032f27cSSam Leffler break; 879b032f27cSSam Leffler if (i != 0) 880b032f27cSSam Leffler mac[0] |= (i << 2)|0x2; 881b032f27cSSam Leffler } else 882b032f27cSSam Leffler i = 0; 883b032f27cSSam Leffler sc->sc_bssidmask |= 1<<i; 884b032f27cSSam Leffler sc->sc_hwbssidmask[0] &= ~mac[0]; 885b032f27cSSam Leffler if (i == 0) 886b032f27cSSam Leffler sc->sc_nbssid0++; 887b032f27cSSam Leffler } 888b032f27cSSam Leffler 889b032f27cSSam Leffler static void 890b032f27cSSam Leffler reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) 891b032f27cSSam Leffler { 892b032f27cSSam Leffler int i = mac[0] >> 2; 893b032f27cSSam Leffler uint8_t mask; 894b032f27cSSam Leffler 895b032f27cSSam Leffler if (i != 0 || --sc->sc_nbssid0 == 0) { 896b032f27cSSam Leffler sc->sc_bssidmask &= ~(1<<i); 897b032f27cSSam Leffler /* recalculate bssid mask from remaining addresses */ 898b032f27cSSam Leffler mask = 0xff; 899b032f27cSSam Leffler for (i = 1; i < 8; i++) 900b032f27cSSam Leffler if (sc->sc_bssidmask & (1<<i)) 901b032f27cSSam Leffler mask &= ~((i<<2)|0x2); 902b032f27cSSam Leffler sc->sc_hwbssidmask[0] |= mask; 903b032f27cSSam Leffler } 904b032f27cSSam Leffler } 905b032f27cSSam Leffler 906b032f27cSSam Leffler /* 907b032f27cSSam Leffler * Assign a beacon xmit slot. We try to space out 908b032f27cSSam Leffler * assignments so when beacons are staggered the 909b032f27cSSam Leffler * traffic coming out of the cab q has maximal time 910b032f27cSSam Leffler * to go out before the next beacon is scheduled. 911b032f27cSSam Leffler */ 912b032f27cSSam Leffler static int 913b032f27cSSam Leffler assign_bslot(struct ath_softc *sc) 914b032f27cSSam Leffler { 915b032f27cSSam Leffler u_int slot, free; 916b032f27cSSam Leffler 917b032f27cSSam Leffler free = 0; 918b032f27cSSam Leffler for (slot = 0; slot < ATH_BCBUF; slot++) 919b032f27cSSam Leffler if (sc->sc_bslot[slot] == NULL) { 920b032f27cSSam Leffler if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && 921b032f27cSSam Leffler sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) 922b032f27cSSam Leffler return slot; 923b032f27cSSam Leffler free = slot; 924b032f27cSSam Leffler /* NB: keep looking for a double slot */ 925b032f27cSSam Leffler } 926b032f27cSSam Leffler return free; 927b032f27cSSam Leffler } 928b032f27cSSam Leffler 929b032f27cSSam Leffler static struct ieee80211vap * 930fcd9500fSBernhard Schmidt ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 931fcd9500fSBernhard Schmidt enum ieee80211_opmode opmode, int flags, 932b032f27cSSam Leffler const uint8_t bssid[IEEE80211_ADDR_LEN], 933b032f27cSSam Leffler const uint8_t mac0[IEEE80211_ADDR_LEN]) 934b032f27cSSam Leffler { 935b032f27cSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 936b032f27cSSam Leffler struct ath_vap *avp; 937b032f27cSSam Leffler struct ieee80211vap *vap; 938b032f27cSSam Leffler uint8_t mac[IEEE80211_ADDR_LEN]; 939fcd9500fSBernhard Schmidt int needbeacon, error; 940fcd9500fSBernhard Schmidt enum ieee80211_opmode ic_opmode; 941b032f27cSSam Leffler 942b032f27cSSam Leffler avp = (struct ath_vap *) malloc(sizeof(struct ath_vap), 943b032f27cSSam Leffler M_80211_VAP, M_WAITOK | M_ZERO); 944b032f27cSSam Leffler needbeacon = 0; 945b032f27cSSam Leffler IEEE80211_ADDR_COPY(mac, mac0); 946b032f27cSSam Leffler 947b032f27cSSam Leffler ATH_LOCK(sc); 948a8962181SSam Leffler ic_opmode = opmode; /* default to opmode of new vap */ 949b032f27cSSam Leffler switch (opmode) { 950b032f27cSSam Leffler case IEEE80211_M_STA: 951a8962181SSam Leffler if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ 952b032f27cSSam Leffler device_printf(sc->sc_dev, "only 1 sta vap supported\n"); 953b032f27cSSam Leffler goto bad; 954b032f27cSSam Leffler } 955b032f27cSSam Leffler if (sc->sc_nvaps) { 956b032f27cSSam Leffler /* 957a8962181SSam Leffler * With multiple vaps we must fall back 958a8962181SSam Leffler * to s/w beacon miss handling. 959b032f27cSSam Leffler */ 960b032f27cSSam Leffler flags |= IEEE80211_CLONE_NOBEACONS; 961b032f27cSSam Leffler } 962a8962181SSam Leffler if (flags & IEEE80211_CLONE_NOBEACONS) { 963a8962181SSam Leffler /* 964a8962181SSam Leffler * Station mode w/o beacons are implemented w/ AP mode. 965a8962181SSam Leffler */ 966b032f27cSSam Leffler ic_opmode = IEEE80211_M_HOSTAP; 967a8962181SSam Leffler } 968b032f27cSSam Leffler break; 969b032f27cSSam Leffler case IEEE80211_M_IBSS: 970b032f27cSSam Leffler if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ 971b032f27cSSam Leffler device_printf(sc->sc_dev, 972b032f27cSSam Leffler "only 1 ibss vap supported\n"); 973b032f27cSSam Leffler goto bad; 974b032f27cSSam Leffler } 975b032f27cSSam Leffler needbeacon = 1; 976b032f27cSSam Leffler break; 977b032f27cSSam Leffler case IEEE80211_M_AHDEMO: 978584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 97910ad9a77SSam Leffler if (flags & IEEE80211_CLONE_TDMA) { 980a8962181SSam Leffler if (sc->sc_nvaps != 0) { 981a8962181SSam Leffler device_printf(sc->sc_dev, 982a8962181SSam Leffler "only 1 tdma vap supported\n"); 983a8962181SSam Leffler goto bad; 984a8962181SSam Leffler } 98510ad9a77SSam Leffler needbeacon = 1; 98610ad9a77SSam Leffler flags |= IEEE80211_CLONE_NOBEACONS; 98710ad9a77SSam Leffler } 988b032f27cSSam Leffler /* fall thru... */ 98910ad9a77SSam Leffler #endif 990b032f27cSSam Leffler case IEEE80211_M_MONITOR: 991b032f27cSSam Leffler if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { 992a8962181SSam Leffler /* 993a8962181SSam Leffler * Adopt existing mode. Adding a monitor or ahdemo 994a8962181SSam Leffler * vap to an existing configuration is of dubious 995a8962181SSam Leffler * value but should be ok. 996a8962181SSam Leffler */ 997b032f27cSSam Leffler /* XXX not right for monitor mode */ 998b032f27cSSam Leffler ic_opmode = ic->ic_opmode; 999a8962181SSam Leffler } 1000b032f27cSSam Leffler break; 1001b032f27cSSam Leffler case IEEE80211_M_HOSTAP: 100259aa14a9SRui Paulo case IEEE80211_M_MBSS: 1003b032f27cSSam Leffler needbeacon = 1; 1004a8962181SSam Leffler break; 1005b032f27cSSam Leffler case IEEE80211_M_WDS: 1006a8962181SSam Leffler if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { 1007b032f27cSSam Leffler device_printf(sc->sc_dev, 1008b032f27cSSam Leffler "wds not supported in sta mode\n"); 1009b032f27cSSam Leffler goto bad; 1010b032f27cSSam Leffler } 1011b032f27cSSam Leffler /* 1012b032f27cSSam Leffler * Silently remove any request for a unique 1013b032f27cSSam Leffler * bssid; WDS vap's always share the local 1014b032f27cSSam Leffler * mac address. 1015b032f27cSSam Leffler */ 1016b032f27cSSam Leffler flags &= ~IEEE80211_CLONE_BSSID; 1017a8962181SSam Leffler if (sc->sc_nvaps == 0) 1018b032f27cSSam Leffler ic_opmode = IEEE80211_M_HOSTAP; 1019a8962181SSam Leffler else 1020a8962181SSam Leffler ic_opmode = ic->ic_opmode; 10217d261891SRui Paulo break; 1022b032f27cSSam Leffler default: 1023b032f27cSSam Leffler device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 1024b032f27cSSam Leffler goto bad; 1025b032f27cSSam Leffler } 1026b032f27cSSam Leffler /* 1027b032f27cSSam Leffler * Check that a beacon buffer is available; the code below assumes it. 1028b032f27cSSam Leffler */ 10296b349e5aSAdrian Chadd if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { 1030b032f27cSSam Leffler device_printf(sc->sc_dev, "no beacon buffer available\n"); 1031b032f27cSSam Leffler goto bad; 1032b032f27cSSam Leffler } 1033b032f27cSSam Leffler 1034b032f27cSSam Leffler /* STA, AHDEMO? */ 103559aa14a9SRui Paulo if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { 1036b032f27cSSam Leffler assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 1037b032f27cSSam Leffler ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1038b032f27cSSam Leffler } 1039b032f27cSSam Leffler 1040b032f27cSSam Leffler vap = &avp->av_vap; 1041b032f27cSSam Leffler /* XXX can't hold mutex across if_alloc */ 1042b032f27cSSam Leffler ATH_UNLOCK(sc); 1043b032f27cSSam Leffler error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, 1044b032f27cSSam Leffler bssid, mac); 1045b032f27cSSam Leffler ATH_LOCK(sc); 1046b032f27cSSam Leffler if (error != 0) { 1047b032f27cSSam Leffler device_printf(sc->sc_dev, "%s: error %d creating vap\n", 1048b032f27cSSam Leffler __func__, error); 1049b032f27cSSam Leffler goto bad2; 1050b032f27cSSam Leffler } 1051b032f27cSSam Leffler 1052b032f27cSSam Leffler /* h/w crypto support */ 1053b032f27cSSam Leffler vap->iv_key_alloc = ath_key_alloc; 1054b032f27cSSam Leffler vap->iv_key_delete = ath_key_delete; 1055b032f27cSSam Leffler vap->iv_key_set = ath_key_set; 1056b032f27cSSam Leffler vap->iv_key_update_begin = ath_key_update_begin; 1057b032f27cSSam Leffler vap->iv_key_update_end = ath_key_update_end; 1058b032f27cSSam Leffler 1059b032f27cSSam Leffler /* override various methods */ 1060b032f27cSSam Leffler avp->av_recv_mgmt = vap->iv_recv_mgmt; 1061b032f27cSSam Leffler vap->iv_recv_mgmt = ath_recv_mgmt; 1062b032f27cSSam Leffler vap->iv_reset = ath_reset_vap; 1063b032f27cSSam Leffler vap->iv_update_beacon = ath_beacon_update; 1064b032f27cSSam Leffler avp->av_newstate = vap->iv_newstate; 1065b032f27cSSam Leffler vap->iv_newstate = ath_newstate; 1066b032f27cSSam Leffler avp->av_bmiss = vap->iv_bmiss; 1067b032f27cSSam Leffler vap->iv_bmiss = ath_bmiss_vap; 1068b032f27cSSam Leffler 10699be25f4aSAdrian Chadd /* Set default parameters */ 10709be25f4aSAdrian Chadd 10719be25f4aSAdrian Chadd /* 10729be25f4aSAdrian Chadd * Anything earlier than some AR9300 series MACs don't 10739be25f4aSAdrian Chadd * support a smaller MPDU density. 10749be25f4aSAdrian Chadd */ 10759be25f4aSAdrian Chadd vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; 10769be25f4aSAdrian Chadd /* 10779be25f4aSAdrian Chadd * All NICs can handle the maximum size, however 10789be25f4aSAdrian Chadd * AR5416 based MACs can only TX aggregates w/ RTS 10799be25f4aSAdrian Chadd * protection when the total aggregate size is <= 8k. 10809be25f4aSAdrian Chadd * However, for now that's enforced by the TX path. 10819be25f4aSAdrian Chadd */ 10829be25f4aSAdrian Chadd vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 10839be25f4aSAdrian Chadd 1084b032f27cSSam Leffler avp->av_bslot = -1; 1085b032f27cSSam Leffler if (needbeacon) { 1086b032f27cSSam Leffler /* 1087b032f27cSSam Leffler * Allocate beacon state and setup the q for buffered 1088b032f27cSSam Leffler * multicast frames. We know a beacon buffer is 1089b032f27cSSam Leffler * available because we checked above. 1090b032f27cSSam Leffler */ 10916b349e5aSAdrian Chadd avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); 10926b349e5aSAdrian Chadd TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); 1093b032f27cSSam Leffler if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { 1094b032f27cSSam Leffler /* 1095b032f27cSSam Leffler * Assign the vap to a beacon xmit slot. As above 1096b032f27cSSam Leffler * this cannot fail to find a free one. 1097b032f27cSSam Leffler */ 1098b032f27cSSam Leffler avp->av_bslot = assign_bslot(sc); 1099b032f27cSSam Leffler KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, 1100b032f27cSSam Leffler ("beacon slot %u not empty", avp->av_bslot)); 1101b032f27cSSam Leffler sc->sc_bslot[avp->av_bslot] = vap; 1102b032f27cSSam Leffler sc->sc_nbcnvaps++; 1103b032f27cSSam Leffler } 1104b032f27cSSam Leffler if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { 1105b032f27cSSam Leffler /* 1106b032f27cSSam Leffler * Multple vaps are to transmit beacons and we 1107b032f27cSSam Leffler * have h/w support for TSF adjusting; enable 1108b032f27cSSam Leffler * use of staggered beacons. 1109b032f27cSSam Leffler */ 1110b032f27cSSam Leffler sc->sc_stagbeacons = 1; 1111b032f27cSSam Leffler } 1112b032f27cSSam Leffler ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); 1113b032f27cSSam Leffler } 1114b032f27cSSam Leffler 1115b032f27cSSam Leffler ic->ic_opmode = ic_opmode; 1116b032f27cSSam Leffler if (opmode != IEEE80211_M_WDS) { 1117b032f27cSSam Leffler sc->sc_nvaps++; 1118b032f27cSSam Leffler if (opmode == IEEE80211_M_STA) 1119b032f27cSSam Leffler sc->sc_nstavaps++; 1120fe0dd789SSam Leffler if (opmode == IEEE80211_M_MBSS) 1121fe0dd789SSam Leffler sc->sc_nmeshvaps++; 1122b032f27cSSam Leffler } 1123b032f27cSSam Leffler switch (ic_opmode) { 1124b032f27cSSam Leffler case IEEE80211_M_IBSS: 1125b032f27cSSam Leffler sc->sc_opmode = HAL_M_IBSS; 1126b032f27cSSam Leffler break; 1127b032f27cSSam Leffler case IEEE80211_M_STA: 1128b032f27cSSam Leffler sc->sc_opmode = HAL_M_STA; 1129b032f27cSSam Leffler break; 1130b032f27cSSam Leffler case IEEE80211_M_AHDEMO: 1131584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 113210ad9a77SSam Leffler if (vap->iv_caps & IEEE80211_C_TDMA) { 113310ad9a77SSam Leffler sc->sc_tdma = 1; 113410ad9a77SSam Leffler /* NB: disable tsf adjust */ 113510ad9a77SSam Leffler sc->sc_stagbeacons = 0; 113610ad9a77SSam Leffler } 113710ad9a77SSam Leffler /* 113810ad9a77SSam Leffler * NB: adhoc demo mode is a pseudo mode; to the hal it's 113910ad9a77SSam Leffler * just ap mode. 114010ad9a77SSam Leffler */ 114110ad9a77SSam Leffler /* fall thru... */ 114210ad9a77SSam Leffler #endif 1143b032f27cSSam Leffler case IEEE80211_M_HOSTAP: 114459aa14a9SRui Paulo case IEEE80211_M_MBSS: 1145b032f27cSSam Leffler sc->sc_opmode = HAL_M_HOSTAP; 1146b032f27cSSam Leffler break; 1147b032f27cSSam Leffler case IEEE80211_M_MONITOR: 1148b032f27cSSam Leffler sc->sc_opmode = HAL_M_MONITOR; 1149b032f27cSSam Leffler break; 1150b032f27cSSam Leffler default: 1151b032f27cSSam Leffler /* XXX should not happen */ 1152b032f27cSSam Leffler break; 1153b032f27cSSam Leffler } 1154b032f27cSSam Leffler if (sc->sc_hastsfadd) { 1155b032f27cSSam Leffler /* 1156b032f27cSSam Leffler * Configure whether or not TSF adjust should be done. 1157b032f27cSSam Leffler */ 1158b032f27cSSam Leffler ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); 1159b032f27cSSam Leffler } 116010ad9a77SSam Leffler if (flags & IEEE80211_CLONE_NOBEACONS) { 116110ad9a77SSam Leffler /* 116210ad9a77SSam Leffler * Enable s/w beacon miss handling. 116310ad9a77SSam Leffler */ 116410ad9a77SSam Leffler sc->sc_swbmiss = 1; 116510ad9a77SSam Leffler } 1166b032f27cSSam Leffler ATH_UNLOCK(sc); 1167b032f27cSSam Leffler 1168b032f27cSSam Leffler /* complete setup */ 1169b032f27cSSam Leffler ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status); 1170b032f27cSSam Leffler return vap; 1171b032f27cSSam Leffler bad2: 1172b032f27cSSam Leffler reclaim_address(sc, mac); 1173b032f27cSSam Leffler ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1174b032f27cSSam Leffler bad: 1175b032f27cSSam Leffler free(avp, M_80211_VAP); 1176b032f27cSSam Leffler ATH_UNLOCK(sc); 1177b032f27cSSam Leffler return NULL; 1178b032f27cSSam Leffler } 1179b032f27cSSam Leffler 1180b032f27cSSam Leffler static void 1181b032f27cSSam Leffler ath_vap_delete(struct ieee80211vap *vap) 1182b032f27cSSam Leffler { 1183b032f27cSSam Leffler struct ieee80211com *ic = vap->iv_ic; 1184b032f27cSSam Leffler struct ifnet *ifp = ic->ic_ifp; 1185b032f27cSSam Leffler struct ath_softc *sc = ifp->if_softc; 1186b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 1187b032f27cSSam Leffler struct ath_vap *avp = ATH_VAP(vap); 1188b032f27cSSam Leffler 1189f52d3452SAdrian Chadd DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 1190b032f27cSSam Leffler if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1191b032f27cSSam Leffler /* 1192b032f27cSSam Leffler * Quiesce the hardware while we remove the vap. In 1193b032f27cSSam Leffler * particular we need to reclaim all references to 1194b032f27cSSam Leffler * the vap state by any frames pending on the tx queues. 1195b032f27cSSam Leffler */ 1196b032f27cSSam Leffler ath_hal_intrset(ah, 0); /* disable interrupts */ 1197517526efSAdrian Chadd ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */ 1198517526efSAdrian Chadd /* XXX Do all frames from all vaps/nodes need draining here? */ 11999a842e8bSAdrian Chadd ath_stoprecv(sc, 1); /* stop recv side */ 1200b032f27cSSam Leffler } 1201b032f27cSSam Leffler 1202b032f27cSSam Leffler ieee80211_vap_detach(vap); 120316d4de92SAdrian Chadd 120416d4de92SAdrian Chadd /* 120516d4de92SAdrian Chadd * XXX Danger Will Robinson! Danger! 120616d4de92SAdrian Chadd * 120716d4de92SAdrian Chadd * Because ieee80211_vap_detach() can queue a frame (the station 120816d4de92SAdrian Chadd * diassociate message?) after we've drained the TXQ and 120916d4de92SAdrian Chadd * flushed the software TXQ, we will end up with a frame queued 121016d4de92SAdrian Chadd * to a node whose vap is about to be freed. 121116d4de92SAdrian Chadd * 121216d4de92SAdrian Chadd * To work around this, flush the hardware/software again. 121316d4de92SAdrian Chadd * This may be racy - the ath task may be running and the packet 121416d4de92SAdrian Chadd * may be being scheduled between sw->hw txq. Tsk. 121516d4de92SAdrian Chadd * 121616d4de92SAdrian Chadd * TODO: figure out why a new node gets allocated somewhere around 121716d4de92SAdrian Chadd * here (after the ath_tx_swq() call; and after an ath_stop_locked() 121816d4de92SAdrian Chadd * call!) 121916d4de92SAdrian Chadd */ 122016d4de92SAdrian Chadd 122116d4de92SAdrian Chadd ath_draintxq(sc, ATH_RESET_DEFAULT); 122216d4de92SAdrian Chadd 1223b032f27cSSam Leffler ATH_LOCK(sc); 1224b032f27cSSam Leffler /* 1225b032f27cSSam Leffler * Reclaim beacon state. Note this must be done before 1226b032f27cSSam Leffler * the vap instance is reclaimed as we may have a reference 1227b032f27cSSam Leffler * to it in the buffer for the beacon frame. 1228b032f27cSSam Leffler */ 1229b032f27cSSam Leffler if (avp->av_bcbuf != NULL) { 1230b032f27cSSam Leffler if (avp->av_bslot != -1) { 1231b032f27cSSam Leffler sc->sc_bslot[avp->av_bslot] = NULL; 1232b032f27cSSam Leffler sc->sc_nbcnvaps--; 1233b032f27cSSam Leffler } 1234b032f27cSSam Leffler ath_beacon_return(sc, avp->av_bcbuf); 1235b032f27cSSam Leffler avp->av_bcbuf = NULL; 1236b032f27cSSam Leffler if (sc->sc_nbcnvaps == 0) { 1237b032f27cSSam Leffler sc->sc_stagbeacons = 0; 1238b032f27cSSam Leffler if (sc->sc_hastsfadd) 1239b032f27cSSam Leffler ath_hal_settsfadjust(sc->sc_ah, 0); 1240b032f27cSSam Leffler } 1241b032f27cSSam Leffler /* 1242b032f27cSSam Leffler * Reclaim any pending mcast frames for the vap. 1243b032f27cSSam Leffler */ 1244b032f27cSSam Leffler ath_tx_draintxq(sc, &avp->av_mcastq); 1245b032f27cSSam Leffler ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq); 1246b032f27cSSam Leffler } 1247b032f27cSSam Leffler /* 1248b032f27cSSam Leffler * Update bookkeeping. 1249b032f27cSSam Leffler */ 1250b032f27cSSam Leffler if (vap->iv_opmode == IEEE80211_M_STA) { 1251b032f27cSSam Leffler sc->sc_nstavaps--; 1252b032f27cSSam Leffler if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) 1253b032f27cSSam Leffler sc->sc_swbmiss = 0; 125459aa14a9SRui Paulo } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || 125559aa14a9SRui Paulo vap->iv_opmode == IEEE80211_M_MBSS) { 1256b032f27cSSam Leffler reclaim_address(sc, vap->iv_myaddr); 1257b032f27cSSam Leffler ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); 1258fe0dd789SSam Leffler if (vap->iv_opmode == IEEE80211_M_MBSS) 1259fe0dd789SSam Leffler sc->sc_nmeshvaps--; 1260b032f27cSSam Leffler } 1261b032f27cSSam Leffler if (vap->iv_opmode != IEEE80211_M_WDS) 1262b032f27cSSam Leffler sc->sc_nvaps--; 1263584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 126410ad9a77SSam Leffler /* TDMA operation ceases when the last vap is destroyed */ 126510ad9a77SSam Leffler if (sc->sc_tdma && sc->sc_nvaps == 0) { 126610ad9a77SSam Leffler sc->sc_tdma = 0; 126710ad9a77SSam Leffler sc->sc_swbmiss = 0; 126810ad9a77SSam Leffler } 126910ad9a77SSam Leffler #endif 1270b032f27cSSam Leffler free(avp, M_80211_VAP); 1271b032f27cSSam Leffler 1272b032f27cSSam Leffler if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1273b032f27cSSam Leffler /* 1274b032f27cSSam Leffler * Restart rx+tx machines if still running (RUNNING will 1275b032f27cSSam Leffler * be reset if we just destroyed the last vap). 1276b032f27cSSam Leffler */ 1277b032f27cSSam Leffler if (ath_startrecv(sc) != 0) 1278b032f27cSSam Leffler if_printf(ifp, "%s: unable to restart recv logic\n", 1279b032f27cSSam Leffler __func__); 1280c89b957aSSam Leffler if (sc->sc_beacons) { /* restart beacons */ 1281c89b957aSSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 1282c89b957aSSam Leffler if (sc->sc_tdma) 1283c89b957aSSam Leffler ath_tdma_config(sc, NULL); 1284c89b957aSSam Leffler else 1285c89b957aSSam Leffler #endif 1286b032f27cSSam Leffler ath_beacon_config(sc, NULL); 1287c89b957aSSam Leffler } 1288b032f27cSSam Leffler ath_hal_intrset(ah, sc->sc_imask); 1289b032f27cSSam Leffler } 129016d4de92SAdrian Chadd ATH_UNLOCK(sc); 1291b032f27cSSam Leffler } 1292b032f27cSSam Leffler 12935591b213SSam Leffler void 12945591b213SSam Leffler ath_suspend(struct ath_softc *sc) 12955591b213SSam Leffler { 1296fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 1297d3ac945bSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 12985591b213SSam Leffler 1299c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1300c42a7b7eSSam Leffler __func__, ifp->if_flags); 13015591b213SSam Leffler 1302d3ac945bSSam Leffler sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0; 1303d3ac945bSSam Leffler if (ic->ic_opmode == IEEE80211_M_STA) 13045591b213SSam Leffler ath_stop(ifp); 1305d3ac945bSSam Leffler else 1306d3ac945bSSam Leffler ieee80211_suspend_all(ic); 1307d3ac945bSSam Leffler /* 1308d3ac945bSSam Leffler * NB: don't worry about putting the chip in low power 1309d3ac945bSSam Leffler * mode; pci will power off our socket on suspend and 1310f29b8b7fSWarner Losh * CardBus detaches the device. 1311d3ac945bSSam Leffler */ 1312d3ac945bSSam Leffler } 1313d3ac945bSSam Leffler 1314d3ac945bSSam Leffler /* 1315d3ac945bSSam Leffler * Reset the key cache since some parts do not reset the 1316d3ac945bSSam Leffler * contents on resume. First we clear all entries, then 1317d3ac945bSSam Leffler * re-load keys that the 802.11 layer assumes are setup 1318d3ac945bSSam Leffler * in h/w. 1319d3ac945bSSam Leffler */ 1320d3ac945bSSam Leffler static void 1321d3ac945bSSam Leffler ath_reset_keycache(struct ath_softc *sc) 1322d3ac945bSSam Leffler { 1323d3ac945bSSam Leffler struct ifnet *ifp = sc->sc_ifp; 1324d3ac945bSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 1325d3ac945bSSam Leffler struct ath_hal *ah = sc->sc_ah; 1326d3ac945bSSam Leffler int i; 1327d3ac945bSSam Leffler 1328d3ac945bSSam Leffler for (i = 0; i < sc->sc_keymax; i++) 1329d3ac945bSSam Leffler ath_hal_keyreset(ah, i); 1330d3ac945bSSam Leffler ieee80211_crypto_reload_keys(ic); 13315591b213SSam Leffler } 13325591b213SSam Leffler 13335591b213SSam Leffler void 13345591b213SSam Leffler ath_resume(struct ath_softc *sc) 13355591b213SSam Leffler { 1336fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 1337d3ac945bSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 1338d3ac945bSSam Leffler struct ath_hal *ah = sc->sc_ah; 1339d3ac945bSSam Leffler HAL_STATUS status; 13405591b213SSam Leffler 1341c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1342c42a7b7eSSam Leffler __func__, ifp->if_flags); 13435591b213SSam Leffler 1344d3ac945bSSam Leffler /* 1345d3ac945bSSam Leffler * Must reset the chip before we reload the 1346d3ac945bSSam Leffler * keycache as we were powered down on suspend. 1347d3ac945bSSam Leffler */ 1348054d7b69SSam Leffler ath_hal_reset(ah, sc->sc_opmode, 1349054d7b69SSam Leffler sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, 1350054d7b69SSam Leffler AH_FALSE, &status); 1351d3ac945bSSam Leffler ath_reset_keycache(sc); 13527e5eb44dSAdrian Chadd 13537e5eb44dSAdrian Chadd /* Let DFS at it in case it's a DFS channel */ 13547e5eb44dSAdrian Chadd ath_dfs_radar_enable(sc, ic->ic_curchan); 13557e5eb44dSAdrian Chadd 1356a497cd88SAdrian Chadd /* Restore the LED configuration */ 1357a497cd88SAdrian Chadd ath_led_config(sc); 1358a497cd88SAdrian Chadd ath_hal_setledstate(ah, HAL_LED_INIT); 1359a497cd88SAdrian Chadd 1360d3ac945bSSam Leffler if (sc->sc_resume_up) { 1361d3ac945bSSam Leffler if (ic->ic_opmode == IEEE80211_M_STA) { 1362fc74a9f9SBrooks Davis ath_init(sc); 1363a497cd88SAdrian Chadd ath_hal_setledstate(ah, HAL_LED_RUN); 1364394f34a5SSam Leffler /* 1365394f34a5SSam Leffler * Program the beacon registers using the last rx'd 1366394f34a5SSam Leffler * beacon frame and enable sync on the next beacon 1367394f34a5SSam Leffler * we see. This should handle the case where we 1368394f34a5SSam Leffler * wakeup and find the same AP and also the case where 1369394f34a5SSam Leffler * we wakeup and need to roam. For the latter we 1370394f34a5SSam Leffler * should get bmiss events that trigger a roam. 1371394f34a5SSam Leffler */ 1372394f34a5SSam Leffler ath_beacon_config(sc, NULL); 1373394f34a5SSam Leffler sc->sc_syncbeacon = 1; 1374d3ac945bSSam Leffler } else 1375d3ac945bSSam Leffler ieee80211_resume_all(ic); 13765591b213SSam Leffler } 13772fd9aabbSAdrian Chadd 13782fd9aabbSAdrian Chadd /* XXX beacons ? */ 13796b59f5e3SSam Leffler } 13805591b213SSam Leffler 13815591b213SSam Leffler void 13825591b213SSam Leffler ath_shutdown(struct ath_softc *sc) 13835591b213SSam Leffler { 1384fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 13855591b213SSam Leffler 1386c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1387c42a7b7eSSam Leffler __func__, ifp->if_flags); 13885591b213SSam Leffler 13895591b213SSam Leffler ath_stop(ifp); 1390d3ac945bSSam Leffler /* NB: no point powering down chip as we're about to reboot */ 13915591b213SSam Leffler } 13925591b213SSam Leffler 1393c42a7b7eSSam Leffler /* 1394c42a7b7eSSam Leffler * Interrupt handler. Most of the actual processing is deferred. 1395c42a7b7eSSam Leffler */ 13965591b213SSam Leffler void 13975591b213SSam Leffler ath_intr(void *arg) 13985591b213SSam Leffler { 13995591b213SSam Leffler struct ath_softc *sc = arg; 1400fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 14015591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 14026f5fe81eSAdrian Chadd HAL_INT status = 0; 14038f939e79SAdrian Chadd uint32_t txqs; 14045591b213SSam Leffler 1405ef27340cSAdrian Chadd /* 1406ef27340cSAdrian Chadd * If we're inside a reset path, just print a warning and 1407ef27340cSAdrian Chadd * clear the ISR. The reset routine will finish it for us. 1408ef27340cSAdrian Chadd */ 1409ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 1410ef27340cSAdrian Chadd if (sc->sc_inreset_cnt) { 1411ef27340cSAdrian Chadd HAL_INT status; 1412ef27340cSAdrian Chadd ath_hal_getisr(ah, &status); /* clear ISR */ 1413ef27340cSAdrian Chadd ath_hal_intrset(ah, 0); /* disable further intr's */ 1414ef27340cSAdrian Chadd DPRINTF(sc, ATH_DEBUG_ANY, 1415ef27340cSAdrian Chadd "%s: in reset, ignoring: status=0x%x\n", 1416ef27340cSAdrian Chadd __func__, status); 1417ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 1418ef27340cSAdrian Chadd return; 1419ef27340cSAdrian Chadd } 1420ef27340cSAdrian Chadd 14215591b213SSam Leffler if (sc->sc_invalid) { 14225591b213SSam Leffler /* 1423b58b3803SSam Leffler * The hardware is not ready/present, don't touch anything. 1424b58b3803SSam Leffler * Note this can happen early on if the IRQ is shared. 14255591b213SSam Leffler */ 1426c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 1427ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 14285591b213SSam Leffler return; 14295591b213SSam Leffler } 1430ef27340cSAdrian Chadd if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */ 1431ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 1432fdd758d4SSam Leffler return; 1433ef27340cSAdrian Chadd } 1434ef27340cSAdrian Chadd 143568e8e04eSSam Leffler if ((ifp->if_flags & IFF_UP) == 0 || 143668e8e04eSSam Leffler (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 143768e8e04eSSam Leffler HAL_INT status; 143868e8e04eSSam Leffler 1439c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1440c42a7b7eSSam Leffler __func__, ifp->if_flags); 14415591b213SSam Leffler ath_hal_getisr(ah, &status); /* clear ISR */ 14425591b213SSam Leffler ath_hal_intrset(ah, 0); /* disable further intr's */ 1443ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 14445591b213SSam Leffler return; 14455591b213SSam Leffler } 1446ef27340cSAdrian Chadd 1447c42a7b7eSSam Leffler /* 1448c42a7b7eSSam Leffler * Figure out the reason(s) for the interrupt. Note 1449c42a7b7eSSam Leffler * that the hal returns a pseudo-ISR that may include 1450c42a7b7eSSam Leffler * bits we haven't explicitly enabled so we mask the 1451c42a7b7eSSam Leffler * value to insure we only process bits we requested. 1452c42a7b7eSSam Leffler */ 14535591b213SSam Leffler ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 1454c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 1455f52d3452SAdrian Chadd CTR1(ATH_KTR_INTR, "ath_intr: mask=0x%.8x", status); 145631fdf3d6SAdrian Chadd #ifdef ATH_KTR_INTR_DEBUG 1457f52d3452SAdrian Chadd CTR5(ATH_KTR_INTR, 1458f52d3452SAdrian Chadd "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x", 1459f52d3452SAdrian Chadd ah->ah_intrstate[0], 1460f52d3452SAdrian Chadd ah->ah_intrstate[1], 1461f52d3452SAdrian Chadd ah->ah_intrstate[2], 1462f52d3452SAdrian Chadd ah->ah_intrstate[3], 1463f52d3452SAdrian Chadd ah->ah_intrstate[6]); 146431fdf3d6SAdrian Chadd #endif 1465ecddff40SSam Leffler status &= sc->sc_imask; /* discard unasked for bits */ 14666f5fe81eSAdrian Chadd 14676f5fe81eSAdrian Chadd /* Short-circuit un-handled interrupts */ 1468ef27340cSAdrian Chadd if (status == 0x0) { 1469ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 14706f5fe81eSAdrian Chadd return; 1471ef27340cSAdrian Chadd } 14726f5fe81eSAdrian Chadd 1473ef27340cSAdrian Chadd /* 1474ef27340cSAdrian Chadd * Take a note that we're inside the interrupt handler, so 1475ef27340cSAdrian Chadd * the reset routines know to wait. 1476ef27340cSAdrian Chadd */ 1477ef27340cSAdrian Chadd sc->sc_intr_cnt++; 1478ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 1479ef27340cSAdrian Chadd 1480ef27340cSAdrian Chadd /* 1481ef27340cSAdrian Chadd * Handle the interrupt. We won't run concurrent with the reset 1482ef27340cSAdrian Chadd * or channel change routines as they'll wait for sc_intr_cnt 1483ef27340cSAdrian Chadd * to be 0 before continuing. 1484ef27340cSAdrian Chadd */ 14855591b213SSam Leffler if (status & HAL_INT_FATAL) { 14865591b213SSam Leffler sc->sc_stats.ast_hardware++; 14875591b213SSam Leffler ath_hal_intrset(ah, 0); /* disable intr's until reset */ 148816c8acaaSSam Leffler ath_fatal_proc(sc, 0); 14895591b213SSam Leffler } else { 1490c42a7b7eSSam Leffler if (status & HAL_INT_SWBA) { 1491c42a7b7eSSam Leffler /* 1492c42a7b7eSSam Leffler * Software beacon alert--time to send a beacon. 1493c42a7b7eSSam Leffler * Handle beacon transmission directly; deferring 1494c42a7b7eSSam Leffler * this is too slow to meet timing constraints 1495c42a7b7eSSam Leffler * under load. 1496c42a7b7eSSam Leffler */ 1497584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 149810ad9a77SSam Leffler if (sc->sc_tdma) { 149910ad9a77SSam Leffler if (sc->sc_tdmaswba == 0) { 150010ad9a77SSam Leffler struct ieee80211com *ic = ifp->if_l2com; 150110ad9a77SSam Leffler struct ieee80211vap *vap = 150210ad9a77SSam Leffler TAILQ_FIRST(&ic->ic_vaps); 150310ad9a77SSam Leffler ath_tdma_beacon_send(sc, vap); 150410ad9a77SSam Leffler sc->sc_tdmaswba = 150510ad9a77SSam Leffler vap->iv_tdma->tdma_bintval; 150610ad9a77SSam Leffler } else 150710ad9a77SSam Leffler sc->sc_tdmaswba--; 150810ad9a77SSam Leffler } else 150910ad9a77SSam Leffler #endif 1510339ccfb3SSam Leffler { 1511c42a7b7eSSam Leffler ath_beacon_proc(sc, 0); 1512339ccfb3SSam Leffler #ifdef IEEE80211_SUPPORT_SUPERG 1513339ccfb3SSam Leffler /* 1514339ccfb3SSam Leffler * Schedule the rx taskq in case there's no 1515339ccfb3SSam Leffler * traffic so any frames held on the staging 1516339ccfb3SSam Leffler * queue are aged and potentially flushed. 1517339ccfb3SSam Leffler */ 1518339ccfb3SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1519339ccfb3SSam Leffler #endif 1520339ccfb3SSam Leffler } 1521c42a7b7eSSam Leffler } 15225591b213SSam Leffler if (status & HAL_INT_RXEOL) { 15238f939e79SAdrian Chadd int imask; 1524f52d3452SAdrian Chadd CTR0(ATH_KTR_ERR, "ath_intr: RXEOL"); 1525ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 15265591b213SSam Leffler /* 15275591b213SSam Leffler * NB: the hardware should re-read the link when 15285591b213SSam Leffler * RXE bit is written, but it doesn't work at 15295591b213SSam Leffler * least on older hardware revs. 15305591b213SSam Leffler */ 15315591b213SSam Leffler sc->sc_stats.ast_rxeol++; 153273f895fcSAdrian Chadd /* 153373f895fcSAdrian Chadd * Disable RXEOL/RXORN - prevent an interrupt 153473f895fcSAdrian Chadd * storm until the PCU logic can be reset. 15351fdadc0fSAdrian Chadd * In case the interface is reset some other 15361fdadc0fSAdrian Chadd * way before "sc_kickpcu" is called, don't 15371fdadc0fSAdrian Chadd * modify sc_imask - that way if it is reset 15381fdadc0fSAdrian Chadd * by a call to ath_reset() somehow, the 15391fdadc0fSAdrian Chadd * interrupt mask will be correctly reprogrammed. 154073f895fcSAdrian Chadd */ 15418f939e79SAdrian Chadd imask = sc->sc_imask; 15421fdadc0fSAdrian Chadd imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); 15431fdadc0fSAdrian Chadd ath_hal_intrset(ah, imask); 15441fdadc0fSAdrian Chadd /* 15458f939e79SAdrian Chadd * Only blank sc_rxlink if we've not yet kicked 15468f939e79SAdrian Chadd * the PCU. 15478f939e79SAdrian Chadd * 15488f939e79SAdrian Chadd * This isn't entirely correct - the correct solution 15498f939e79SAdrian Chadd * would be to have a PCU lock and engage that for 15508f939e79SAdrian Chadd * the duration of the PCU fiddling; which would include 15518f939e79SAdrian Chadd * running the RX process. Otherwise we could end up 15528f939e79SAdrian Chadd * messing up the RX descriptor chain and making the 15538f939e79SAdrian Chadd * RX desc list much shorter. 15548f939e79SAdrian Chadd */ 15558f939e79SAdrian Chadd if (! sc->sc_kickpcu) 15568f939e79SAdrian Chadd sc->sc_rxlink = NULL; 15578f939e79SAdrian Chadd sc->sc_kickpcu = 1; 15588f939e79SAdrian Chadd /* 15591fdadc0fSAdrian Chadd * Enqueue an RX proc, to handled whatever 15601fdadc0fSAdrian Chadd * is in the RX queue. 15611fdadc0fSAdrian Chadd * This will then kick the PCU. 15621fdadc0fSAdrian Chadd */ 15631fdadc0fSAdrian Chadd taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1564ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 15655591b213SSam Leffler } 15665591b213SSam Leffler if (status & HAL_INT_TXURN) { 15675591b213SSam Leffler sc->sc_stats.ast_txurn++; 15685591b213SSam Leffler /* bump tx trigger level */ 15695591b213SSam Leffler ath_hal_updatetxtriglevel(ah, AH_TRUE); 15705591b213SSam Leffler } 15718f939e79SAdrian Chadd if (status & HAL_INT_RX) { 15728f939e79SAdrian Chadd sc->sc_stats.ast_rx_intr++; 15730bbf5441SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 15748f939e79SAdrian Chadd } 15758f939e79SAdrian Chadd if (status & HAL_INT_TX) { 15768f939e79SAdrian Chadd sc->sc_stats.ast_tx_intr++; 15778f939e79SAdrian Chadd /* 15788f939e79SAdrian Chadd * Grab all the currently set bits in the HAL txq bitmap 15798f939e79SAdrian Chadd * and blank them. This is the only place we should be 15808f939e79SAdrian Chadd * doing this. 15818f939e79SAdrian Chadd */ 1582ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 15838f939e79SAdrian Chadd txqs = 0xffffffff; 15848f939e79SAdrian Chadd ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); 15858f939e79SAdrian Chadd sc->sc_txq_active |= txqs; 15860bbf5441SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 1587ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 15888f939e79SAdrian Chadd } 15895591b213SSam Leffler if (status & HAL_INT_BMISS) { 15905591b213SSam Leffler sc->sc_stats.ast_bmiss++; 15910bbf5441SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 15925591b213SSam Leffler } 15936ad02dbaSAdrian Chadd if (status & HAL_INT_GTT) 15946ad02dbaSAdrian Chadd sc->sc_stats.ast_tx_timeout++; 15955594f5c0SAdrian Chadd if (status & HAL_INT_CST) 15965594f5c0SAdrian Chadd sc->sc_stats.ast_tx_cst++; 1597c42a7b7eSSam Leffler if (status & HAL_INT_MIB) { 1598c42a7b7eSSam Leffler sc->sc_stats.ast_mib++; 1599ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 1600c42a7b7eSSam Leffler /* 1601c42a7b7eSSam Leffler * Disable interrupts until we service the MIB 1602c42a7b7eSSam Leffler * interrupt; otherwise it will continue to fire. 1603c42a7b7eSSam Leffler */ 1604c42a7b7eSSam Leffler ath_hal_intrset(ah, 0); 1605c42a7b7eSSam Leffler /* 1606c42a7b7eSSam Leffler * Let the hal handle the event. We assume it will 1607c42a7b7eSSam Leffler * clear whatever condition caused the interrupt. 1608c42a7b7eSSam Leffler */ 1609ffa2cab6SSam Leffler ath_hal_mibevent(ah, &sc->sc_halstats); 16108f939e79SAdrian Chadd /* 16118f939e79SAdrian Chadd * Don't reset the interrupt if we've just 16128f939e79SAdrian Chadd * kicked the PCU, or we may get a nested 16138f939e79SAdrian Chadd * RXEOL before the rxproc has had a chance 16148f939e79SAdrian Chadd * to run. 16158f939e79SAdrian Chadd */ 16168f939e79SAdrian Chadd if (sc->sc_kickpcu == 0) 1617c42a7b7eSSam Leffler ath_hal_intrset(ah, sc->sc_imask); 1618ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 1619c42a7b7eSSam Leffler } 16209c4fc1e8SSam Leffler if (status & HAL_INT_RXORN) { 16219c4fc1e8SSam Leffler /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ 1622f52d3452SAdrian Chadd CTR0(ATH_KTR_ERR, "ath_intr: RXORN"); 16239c4fc1e8SSam Leffler sc->sc_stats.ast_rxorn++; 16249c4fc1e8SSam Leffler } 16255591b213SSam Leffler } 1626ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 1627ef27340cSAdrian Chadd sc->sc_intr_cnt--; 1628ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 16295591b213SSam Leffler } 16305591b213SSam Leffler 16315591b213SSam Leffler static void 16325591b213SSam Leffler ath_fatal_proc(void *arg, int pending) 16335591b213SSam Leffler { 16345591b213SSam Leffler struct ath_softc *sc = arg; 1635fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 163616c8acaaSSam Leffler u_int32_t *state; 163716c8acaaSSam Leffler u_int32_t len; 163868e8e04eSSam Leffler void *sp; 16395591b213SSam Leffler 1640c42a7b7eSSam Leffler if_printf(ifp, "hardware error; resetting\n"); 164116c8acaaSSam Leffler /* 164216c8acaaSSam Leffler * Fatal errors are unrecoverable. Typically these 164316c8acaaSSam Leffler * are caused by DMA errors. Collect h/w state from 164416c8acaaSSam Leffler * the hal so we can diagnose what's going on. 164516c8acaaSSam Leffler */ 164668e8e04eSSam Leffler if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { 164716c8acaaSSam Leffler KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 164868e8e04eSSam Leffler state = sp; 164916c8acaaSSam Leffler if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", 165016c8acaaSSam Leffler state[0], state[1] , state[2], state[3], 165116c8acaaSSam Leffler state[4], state[5]); 165216c8acaaSSam Leffler } 1653517526efSAdrian Chadd ath_reset(ifp, ATH_RESET_NOLOSS); 16545591b213SSam Leffler } 16555591b213SSam Leffler 16565591b213SSam Leffler static void 1657b032f27cSSam Leffler ath_bmiss_vap(struct ieee80211vap *vap) 16585591b213SSam Leffler { 165959fbb257SSam Leffler /* 166059fbb257SSam Leffler * Workaround phantom bmiss interrupts by sanity-checking 166159fbb257SSam Leffler * the time of our last rx'd frame. If it is within the 166259fbb257SSam Leffler * beacon miss interval then ignore the interrupt. If it's 166359fbb257SSam Leffler * truly a bmiss we'll get another interrupt soon and that'll 166459fbb257SSam Leffler * be dispatched up for processing. Note this applies only 166559fbb257SSam Leffler * for h/w beacon miss events. 166659fbb257SSam Leffler */ 166759fbb257SSam Leffler if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { 1668a7ace843SSam Leffler struct ifnet *ifp = vap->iv_ic->ic_ifp; 1669a7ace843SSam Leffler struct ath_softc *sc = ifp->if_softc; 1670d7736e13SSam Leffler u_int64_t lastrx = sc->sc_lastrx; 1671d7736e13SSam Leffler u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 167280767531SAdrian Chadd /* XXX should take a locked ref to iv_bss */ 1673d7736e13SSam Leffler u_int bmisstimeout = 1674b032f27cSSam Leffler vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; 1675d7736e13SSam Leffler 1676d7736e13SSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 1677d7736e13SSam Leffler "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 1678d7736e13SSam Leffler __func__, (unsigned long long) tsf, 1679d7736e13SSam Leffler (unsigned long long)(tsf - lastrx), 1680d7736e13SSam Leffler (unsigned long long) lastrx, bmisstimeout); 168159fbb257SSam Leffler 168259fbb257SSam Leffler if (tsf - lastrx <= bmisstimeout) { 1683d7736e13SSam Leffler sc->sc_stats.ast_bmiss_phantom++; 168459fbb257SSam Leffler return; 168559fbb257SSam Leffler } 168659fbb257SSam Leffler } 168759fbb257SSam Leffler ATH_VAP(vap)->av_bmiss(vap); 1688e585d188SSam Leffler } 1689b032f27cSSam Leffler 1690459bc4f0SSam Leffler static int 1691459bc4f0SSam Leffler ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) 1692459bc4f0SSam Leffler { 1693459bc4f0SSam Leffler uint32_t rsize; 1694459bc4f0SSam Leffler void *sp; 1695459bc4f0SSam Leffler 169625c96056SAdrian Chadd if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) 1697459bc4f0SSam Leffler return 0; 1698459bc4f0SSam Leffler KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); 1699459bc4f0SSam Leffler *hangs = *(uint32_t *)sp; 1700459bc4f0SSam Leffler return 1; 1701459bc4f0SSam Leffler } 1702459bc4f0SSam Leffler 1703b032f27cSSam Leffler static void 1704b032f27cSSam Leffler ath_bmiss_proc(void *arg, int pending) 1705b032f27cSSam Leffler { 1706b032f27cSSam Leffler struct ath_softc *sc = arg; 1707b032f27cSSam Leffler struct ifnet *ifp = sc->sc_ifp; 1708459bc4f0SSam Leffler uint32_t hangs; 1709b032f27cSSam Leffler 1710b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 1711459bc4f0SSam Leffler 1712459bc4f0SSam Leffler if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { 17134fa8d4efSDaniel Eischen if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs); 1714517526efSAdrian Chadd ath_reset(ifp, ATH_RESET_NOLOSS); 1715459bc4f0SSam Leffler } else 1716b032f27cSSam Leffler ieee80211_beacon_miss(ifp->if_l2com); 17175591b213SSam Leffler } 17185591b213SSam Leffler 1719724c193aSSam Leffler /* 1720b032f27cSSam Leffler * Handle TKIP MIC setup to deal hardware that doesn't do MIC 1721b032f27cSSam Leffler * calcs together with WME. If necessary disable the crypto 1722b032f27cSSam Leffler * hardware and mark the 802.11 state so keys will be setup 1723b032f27cSSam Leffler * with the MIC work done in software. 1724b032f27cSSam Leffler */ 1725b032f27cSSam Leffler static void 1726b032f27cSSam Leffler ath_settkipmic(struct ath_softc *sc) 1727b032f27cSSam Leffler { 1728b032f27cSSam Leffler struct ifnet *ifp = sc->sc_ifp; 1729b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 1730b032f27cSSam Leffler 1731b032f27cSSam Leffler if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { 1732b032f27cSSam Leffler if (ic->ic_flags & IEEE80211_F_WME) { 1733b032f27cSSam Leffler ath_hal_settkipmic(sc->sc_ah, AH_FALSE); 1734b032f27cSSam Leffler ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; 1735b032f27cSSam Leffler } else { 1736b032f27cSSam Leffler ath_hal_settkipmic(sc->sc_ah, AH_TRUE); 1737b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 1738b032f27cSSam Leffler } 1739b032f27cSSam Leffler } 1740b032f27cSSam Leffler } 1741b032f27cSSam Leffler 17425591b213SSam Leffler static void 17435591b213SSam Leffler ath_init(void *arg) 17445591b213SSam Leffler { 17455591b213SSam Leffler struct ath_softc *sc = (struct ath_softc *) arg; 1746fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 1747b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 17485591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 17495591b213SSam Leffler HAL_STATUS status; 17505591b213SSam Leffler 1751c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1752c42a7b7eSSam Leffler __func__, ifp->if_flags); 17535591b213SSam Leffler 1754f0b2a0beSSam Leffler ATH_LOCK(sc); 17555591b213SSam Leffler /* 17565591b213SSam Leffler * Stop anything previously setup. This is safe 17575591b213SSam Leffler * whether this is the first time through or not. 17585591b213SSam Leffler */ 1759c42a7b7eSSam Leffler ath_stop_locked(ifp); 17605591b213SSam Leffler 17615591b213SSam Leffler /* 17625591b213SSam Leffler * The basic interface to setting the hardware in a good 17635591b213SSam Leffler * state is ``reset''. On return the hardware is known to 17645591b213SSam Leffler * be powered up and with interrupts disabled. This must 17655591b213SSam Leffler * be followed by initialization of the appropriate bits 17665591b213SSam Leffler * and then setup of the interrupt mask. 17675591b213SSam Leffler */ 1768b032f27cSSam Leffler ath_settkipmic(sc); 176959efa8b5SSam Leffler if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) { 17705591b213SSam Leffler if_printf(ifp, "unable to reset hardware; hal status %u\n", 17715591b213SSam Leffler status); 1772b032f27cSSam Leffler ATH_UNLOCK(sc); 1773b032f27cSSam Leffler return; 17745591b213SSam Leffler } 1775b032f27cSSam Leffler ath_chan_change(sc, ic->ic_curchan); 17765591b213SSam Leffler 177748237774SAdrian Chadd /* Let DFS at it in case it's a DFS channel */ 177848237774SAdrian Chadd ath_dfs_radar_enable(sc, ic->ic_curchan); 177948237774SAdrian Chadd 17805591b213SSam Leffler /* 1781c59005e9SSam Leffler * Likewise this is set during reset so update 1782c59005e9SSam Leffler * state cached in the driver. 1783c59005e9SSam Leffler */ 1784c59005e9SSam Leffler sc->sc_diversity = ath_hal_getdiversity(ah); 17852dc7fcc4SSam Leffler sc->sc_lastlongcal = 0; 17862dc7fcc4SSam Leffler sc->sc_resetcal = 1; 17872dc7fcc4SSam Leffler sc->sc_lastcalreset = 0; 1788a108ab63SAdrian Chadd sc->sc_lastani = 0; 1789a108ab63SAdrian Chadd sc->sc_lastshortcal = 0; 1790a108ab63SAdrian Chadd sc->sc_doresetcal = AH_FALSE; 17912fd9aabbSAdrian Chadd /* 17922fd9aabbSAdrian Chadd * Beacon timers were cleared here; give ath_newstate() 17932fd9aabbSAdrian Chadd * a hint that the beacon timers should be poked when 17942fd9aabbSAdrian Chadd * things transition to the RUN state. 17952fd9aabbSAdrian Chadd */ 17962fd9aabbSAdrian Chadd sc->sc_beacons = 0; 1797c42a7b7eSSam Leffler 1798c42a7b7eSSam Leffler /* 17998f939e79SAdrian Chadd * Initial aggregation settings. 18008f939e79SAdrian Chadd */ 18018f939e79SAdrian Chadd sc->sc_hwq_limit = ATH_AGGR_MIN_QDEPTH; 18028f939e79SAdrian Chadd sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; 18038f939e79SAdrian Chadd sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; 18048f939e79SAdrian Chadd 18058f939e79SAdrian Chadd /* 18065591b213SSam Leffler * Setup the hardware after reset: the key cache 18075591b213SSam Leffler * is filled as needed and the receive engine is 18085591b213SSam Leffler * set going. Frame transmit is handled entirely 18095591b213SSam Leffler * in the frame output path; there's nothing to do 18105591b213SSam Leffler * here except setup the interrupt mask. 18115591b213SSam Leffler */ 18125591b213SSam Leffler if (ath_startrecv(sc) != 0) { 18135591b213SSam Leffler if_printf(ifp, "unable to start recv logic\n"); 1814b032f27cSSam Leffler ATH_UNLOCK(sc); 1815b032f27cSSam Leffler return; 18165591b213SSam Leffler } 18175591b213SSam Leffler 18185591b213SSam Leffler /* 18195591b213SSam Leffler * Enable interrupts. 18205591b213SSam Leffler */ 18215591b213SSam Leffler sc->sc_imask = HAL_INT_RX | HAL_INT_TX 18225591b213SSam Leffler | HAL_INT_RXEOL | HAL_INT_RXORN 18235591b213SSam Leffler | HAL_INT_FATAL | HAL_INT_GLOBAL; 1824c42a7b7eSSam Leffler /* 1825c42a7b7eSSam Leffler * Enable MIB interrupts when there are hardware phy counters. 1826c42a7b7eSSam Leffler * Note we only do this (at the moment) for station mode. 1827c42a7b7eSSam Leffler */ 1828c42a7b7eSSam Leffler if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 1829c42a7b7eSSam Leffler sc->sc_imask |= HAL_INT_MIB; 18305591b213SSam Leffler 18315594f5c0SAdrian Chadd /* Enable global TX timeout and carrier sense timeout if available */ 18326ad02dbaSAdrian Chadd if (ath_hal_gtxto_supported(ah)) 18333788ebedSAdrian Chadd sc->sc_imask |= HAL_INT_GTT; 1834d0a0ebc6SAdrian Chadd 1835d0a0ebc6SAdrian Chadd DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", 1836d0a0ebc6SAdrian Chadd __func__, sc->sc_imask); 18376ad02dbaSAdrian Chadd 183813f4c340SRobert Watson ifp->if_drv_flags |= IFF_DRV_RUNNING; 18392e986da5SSam Leffler callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 1840b032f27cSSam Leffler ath_hal_intrset(ah, sc->sc_imask); 18415591b213SSam Leffler 1842b032f27cSSam Leffler ATH_UNLOCK(sc); 1843b032f27cSSam Leffler 184486e07743SSam Leffler #ifdef ATH_TX99_DIAG 184586e07743SSam Leffler if (sc->sc_tx99 != NULL) 184686e07743SSam Leffler sc->sc_tx99->start(sc->sc_tx99); 184786e07743SSam Leffler else 184886e07743SSam Leffler #endif 1849b032f27cSSam Leffler ieee80211_start_all(ic); /* start all vap's */ 18505591b213SSam Leffler } 18515591b213SSam Leffler 18525591b213SSam Leffler static void 1853c42a7b7eSSam Leffler ath_stop_locked(struct ifnet *ifp) 18545591b213SSam Leffler { 18555591b213SSam Leffler struct ath_softc *sc = ifp->if_softc; 18565591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 18575591b213SSam Leffler 1858c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 1859c42a7b7eSSam Leffler __func__, sc->sc_invalid, ifp->if_flags); 18605591b213SSam Leffler 1861c42a7b7eSSam Leffler ATH_LOCK_ASSERT(sc); 186213f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 18635591b213SSam Leffler /* 18645591b213SSam Leffler * Shutdown the hardware and driver: 1865c42a7b7eSSam Leffler * reset 802.11 state machine 18665591b213SSam Leffler * turn off timers 1867c42a7b7eSSam Leffler * disable interrupts 1868c42a7b7eSSam Leffler * turn off the radio 18695591b213SSam Leffler * clear transmit machinery 18705591b213SSam Leffler * clear receive machinery 18715591b213SSam Leffler * drain and release tx queues 18725591b213SSam Leffler * reclaim beacon resources 18735591b213SSam Leffler * power down hardware 18745591b213SSam Leffler * 18755591b213SSam Leffler * Note that some of this work is not possible if the 18765591b213SSam Leffler * hardware is gone (invalid). 18775591b213SSam Leffler */ 187886e07743SSam Leffler #ifdef ATH_TX99_DIAG 187986e07743SSam Leffler if (sc->sc_tx99 != NULL) 188086e07743SSam Leffler sc->sc_tx99->stop(sc->sc_tx99); 188186e07743SSam Leffler #endif 18822e986da5SSam Leffler callout_stop(&sc->sc_wd_ch); 18832e986da5SSam Leffler sc->sc_wd_timer = 0; 188413f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1885c42a7b7eSSam Leffler if (!sc->sc_invalid) { 18863e50ec2cSSam Leffler if (sc->sc_softled) { 18873e50ec2cSSam Leffler callout_stop(&sc->sc_ledtimer); 18883e50ec2cSSam Leffler ath_hal_gpioset(ah, sc->sc_ledpin, 18893e50ec2cSSam Leffler !sc->sc_ledon); 18903e50ec2cSSam Leffler sc->sc_blinking = 0; 18913e50ec2cSSam Leffler } 18925591b213SSam Leffler ath_hal_intrset(ah, 0); 1893c42a7b7eSSam Leffler } 1894517526efSAdrian Chadd ath_draintxq(sc, ATH_RESET_DEFAULT); 1895c42a7b7eSSam Leffler if (!sc->sc_invalid) { 18969a842e8bSAdrian Chadd ath_stoprecv(sc, 1); 1897c42a7b7eSSam Leffler ath_hal_phydisable(ah); 1898c42a7b7eSSam Leffler } else 18995591b213SSam Leffler sc->sc_rxlink = NULL; 1900b032f27cSSam Leffler ath_beacon_free(sc); /* XXX not needed */ 1901c42a7b7eSSam Leffler } 1902c42a7b7eSSam Leffler } 1903c42a7b7eSSam Leffler 1904ef27340cSAdrian Chadd #define MAX_TXRX_ITERATIONS 1000 1905ef27340cSAdrian Chadd static void 190621008bf1SAdrian Chadd ath_txrx_stop_locked(struct ath_softc *sc) 1907ef27340cSAdrian Chadd { 1908ef27340cSAdrian Chadd int i = MAX_TXRX_ITERATIONS; 1909ef27340cSAdrian Chadd 1910ef27340cSAdrian Chadd ATH_UNLOCK_ASSERT(sc); 191121008bf1SAdrian Chadd ATH_PCU_LOCK_ASSERT(sc); 191221008bf1SAdrian Chadd 1913ef27340cSAdrian Chadd /* Stop any new TX/RX from occuring */ 1914ef27340cSAdrian Chadd taskqueue_block(sc->sc_tq); 1915ef27340cSAdrian Chadd 1916ef27340cSAdrian Chadd /* 1917ef27340cSAdrian Chadd * Sleep until all the pending operations have completed. 1918ef27340cSAdrian Chadd * 1919ef27340cSAdrian Chadd * The caller must ensure that reset has been incremented 1920ef27340cSAdrian Chadd * or the pending operations may continue being queued. 1921ef27340cSAdrian Chadd */ 1922ef27340cSAdrian Chadd while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt || 1923ef27340cSAdrian Chadd sc->sc_txstart_cnt || sc->sc_intr_cnt) { 1924ef27340cSAdrian Chadd if (i <= 0) 1925ef27340cSAdrian Chadd break; 1926a2d8240dSAdrian Chadd msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 1); 1927ef27340cSAdrian Chadd i--; 1928ef27340cSAdrian Chadd } 1929ef27340cSAdrian Chadd 1930ef27340cSAdrian Chadd if (i <= 0) 1931ef27340cSAdrian Chadd device_printf(sc->sc_dev, 1932ef27340cSAdrian Chadd "%s: didn't finish after %d iterations\n", 1933ef27340cSAdrian Chadd __func__, MAX_TXRX_ITERATIONS); 1934ef27340cSAdrian Chadd } 1935ef27340cSAdrian Chadd #undef MAX_TXRX_ITERATIONS 1936ef27340cSAdrian Chadd 1937e78719adSAdrian Chadd #if 0 1938ef27340cSAdrian Chadd static void 193921008bf1SAdrian Chadd ath_txrx_stop(struct ath_softc *sc) 194021008bf1SAdrian Chadd { 194121008bf1SAdrian Chadd ATH_UNLOCK_ASSERT(sc); 194221008bf1SAdrian Chadd ATH_PCU_UNLOCK_ASSERT(sc); 194321008bf1SAdrian Chadd 194421008bf1SAdrian Chadd ATH_PCU_LOCK(sc); 194521008bf1SAdrian Chadd ath_txrx_stop_locked(sc); 194621008bf1SAdrian Chadd ATH_PCU_UNLOCK(sc); 194721008bf1SAdrian Chadd } 1948e78719adSAdrian Chadd #endif 194921008bf1SAdrian Chadd 195021008bf1SAdrian Chadd static void 1951ef27340cSAdrian Chadd ath_txrx_start(struct ath_softc *sc) 1952ef27340cSAdrian Chadd { 1953ef27340cSAdrian Chadd 1954ef27340cSAdrian Chadd taskqueue_unblock(sc->sc_tq); 1955ef27340cSAdrian Chadd } 1956ef27340cSAdrian Chadd 1957ee321975SAdrian Chadd /* 1958ee321975SAdrian Chadd * Grab the reset lock, and wait around until noone else 1959ee321975SAdrian Chadd * is trying to do anything with it. 1960ee321975SAdrian Chadd * 1961ee321975SAdrian Chadd * This is totally horrible but we can't hold this lock for 1962ee321975SAdrian Chadd * long enough to do TX/RX or we end up with net80211/ip stack 1963ee321975SAdrian Chadd * LORs and eventual deadlock. 1964ee321975SAdrian Chadd * 1965ee321975SAdrian Chadd * "dowait" signals whether to spin, waiting for the reset 1966ee321975SAdrian Chadd * lock count to reach 0. This should (for now) only be used 1967ee321975SAdrian Chadd * during the reset path, as the rest of the code may not 1968ee321975SAdrian Chadd * be locking-reentrant enough to behave correctly. 1969ee321975SAdrian Chadd * 1970ee321975SAdrian Chadd * Another, cleaner way should be found to serialise all of 1971ee321975SAdrian Chadd * these operations. 1972ee321975SAdrian Chadd */ 1973ee321975SAdrian Chadd #define MAX_RESET_ITERATIONS 10 1974ee321975SAdrian Chadd static int 1975ee321975SAdrian Chadd ath_reset_grablock(struct ath_softc *sc, int dowait) 1976ee321975SAdrian Chadd { 1977ee321975SAdrian Chadd int w = 0; 1978ee321975SAdrian Chadd int i = MAX_RESET_ITERATIONS; 1979ee321975SAdrian Chadd 1980ee321975SAdrian Chadd ATH_PCU_LOCK_ASSERT(sc); 1981ee321975SAdrian Chadd do { 1982ee321975SAdrian Chadd if (sc->sc_inreset_cnt == 0) { 1983ee321975SAdrian Chadd w = 1; 1984ee321975SAdrian Chadd break; 1985ee321975SAdrian Chadd } 1986ee321975SAdrian Chadd if (dowait == 0) { 1987ee321975SAdrian Chadd w = 0; 1988ee321975SAdrian Chadd break; 1989ee321975SAdrian Chadd } 1990ee321975SAdrian Chadd ATH_PCU_UNLOCK(sc); 1991ee321975SAdrian Chadd pause("ath_reset_grablock", 1); 1992ee321975SAdrian Chadd i--; 1993ee321975SAdrian Chadd ATH_PCU_LOCK(sc); 1994ee321975SAdrian Chadd } while (i > 0); 1995ee321975SAdrian Chadd 1996ee321975SAdrian Chadd /* 1997ee321975SAdrian Chadd * We always increment the refcounter, regardless 1998ee321975SAdrian Chadd * of whether we succeeded to get it in an exclusive 1999ee321975SAdrian Chadd * way. 2000ee321975SAdrian Chadd */ 2001ee321975SAdrian Chadd sc->sc_inreset_cnt++; 2002ee321975SAdrian Chadd 2003ee321975SAdrian Chadd if (i <= 0) 2004ee321975SAdrian Chadd device_printf(sc->sc_dev, 2005ee321975SAdrian Chadd "%s: didn't finish after %d iterations\n", 2006ee321975SAdrian Chadd __func__, MAX_RESET_ITERATIONS); 2007ee321975SAdrian Chadd 2008ee321975SAdrian Chadd if (w == 0) 2009ee321975SAdrian Chadd device_printf(sc->sc_dev, 2010ee321975SAdrian Chadd "%s: warning, recursive reset path!\n", 2011ee321975SAdrian Chadd __func__); 2012ee321975SAdrian Chadd 2013ee321975SAdrian Chadd return w; 2014ee321975SAdrian Chadd } 2015ee321975SAdrian Chadd #undef MAX_RESET_ITERATIONS 2016ee321975SAdrian Chadd 2017ee321975SAdrian Chadd /* 2018ee321975SAdrian Chadd * XXX TODO: write ath_reset_releaselock 2019ee321975SAdrian Chadd */ 2020ee321975SAdrian Chadd 2021c42a7b7eSSam Leffler static void 2022c42a7b7eSSam Leffler ath_stop(struct ifnet *ifp) 2023c42a7b7eSSam Leffler { 2024c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 2025c42a7b7eSSam Leffler 2026c42a7b7eSSam Leffler ATH_LOCK(sc); 2027c42a7b7eSSam Leffler ath_stop_locked(ifp); 2028f0b2a0beSSam Leffler ATH_UNLOCK(sc); 20295591b213SSam Leffler } 20305591b213SSam Leffler 20315591b213SSam Leffler /* 20325591b213SSam Leffler * Reset the hardware w/o losing operational state. This is 20335591b213SSam Leffler * basically a more efficient way of doing ath_stop, ath_init, 20345591b213SSam Leffler * followed by state transitions to the current 802.11 2035c42a7b7eSSam Leffler * operational state. Used to recover from various errors and 2036c42a7b7eSSam Leffler * to reset or reload hardware state. 20375591b213SSam Leffler */ 20386079fdbeSAdrian Chadd int 2039517526efSAdrian Chadd ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type) 20405591b213SSam Leffler { 2041c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 2042b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 20435591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 20445591b213SSam Leffler HAL_STATUS status; 2045ef27340cSAdrian Chadd int i; 20465591b213SSam Leffler 2047f52d3452SAdrian Chadd DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 204816d4de92SAdrian Chadd 2049ee321975SAdrian Chadd /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */ 2050ef27340cSAdrian Chadd ATH_PCU_UNLOCK_ASSERT(sc); 2051ef27340cSAdrian Chadd ATH_UNLOCK_ASSERT(sc); 2052ef27340cSAdrian Chadd 2053ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 2054e78719adSAdrian Chadd ath_hal_intrset(ah, 0); /* disable interrupts */ 2055e78719adSAdrian Chadd ath_txrx_stop_locked(sc); /* Ensure TX/RX is stopped */ 2056ee321975SAdrian Chadd if (ath_reset_grablock(sc, 1) == 0) { 2057ee321975SAdrian Chadd device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 2058ef27340cSAdrian Chadd __func__); 2059ef27340cSAdrian Chadd } 2060ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 2061ef27340cSAdrian Chadd 2062f52d3452SAdrian Chadd /* 20639a842e8bSAdrian Chadd * Should now wait for pending TX/RX to complete 20649a842e8bSAdrian Chadd * and block future ones from occuring. This needs to be 20659a842e8bSAdrian Chadd * done before the TX queue is drained. 2066f52d3452SAdrian Chadd */ 2067ef27340cSAdrian Chadd ath_draintxq(sc, reset_type); /* stop xmit side */ 2068ef27340cSAdrian Chadd 2069ef27340cSAdrian Chadd /* 2070ef27340cSAdrian Chadd * Regardless of whether we're doing a no-loss flush or 2071ef27340cSAdrian Chadd * not, stop the PCU and handle what's in the RX queue. 2072ef27340cSAdrian Chadd * That way frames aren't dropped which shouldn't be. 2073ef27340cSAdrian Chadd */ 20749a842e8bSAdrian Chadd ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS)); 2075ef27340cSAdrian Chadd ath_rx_proc(sc, 0); 2076ef27340cSAdrian Chadd 2077b032f27cSSam Leffler ath_settkipmic(sc); /* configure TKIP MIC handling */ 20785591b213SSam Leffler /* NB: indicate channel change so we do a full reset */ 207959efa8b5SSam Leffler if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status)) 20805591b213SSam Leffler if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 20815591b213SSam Leffler __func__, status); 2082c59005e9SSam Leffler sc->sc_diversity = ath_hal_getdiversity(ah); 208348237774SAdrian Chadd 208448237774SAdrian Chadd /* Let DFS at it in case it's a DFS channel */ 208548237774SAdrian Chadd ath_dfs_radar_enable(sc, ic->ic_curchan); 208648237774SAdrian Chadd 208768e8e04eSSam Leffler if (ath_startrecv(sc) != 0) /* restart recv */ 208868e8e04eSSam Leffler if_printf(ifp, "%s: unable to start recv logic\n", __func__); 2089c42a7b7eSSam Leffler /* 2090c42a7b7eSSam Leffler * We may be doing a reset in response to an ioctl 2091c42a7b7eSSam Leffler * that changes the channel so update any state that 2092c42a7b7eSSam Leffler * might change as a result. 2093c42a7b7eSSam Leffler */ 2094724c193aSSam Leffler ath_chan_change(sc, ic->ic_curchan); 2095c89b957aSSam Leffler if (sc->sc_beacons) { /* restart beacons */ 2096584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 209710ad9a77SSam Leffler if (sc->sc_tdma) 209810ad9a77SSam Leffler ath_tdma_config(sc, NULL); 209910ad9a77SSam Leffler else 210010ad9a77SSam Leffler #endif 2101c89b957aSSam Leffler ath_beacon_config(sc, NULL); 210210ad9a77SSam Leffler } 2103c42a7b7eSSam Leffler 2104ef27340cSAdrian Chadd /* 2105ef27340cSAdrian Chadd * Release the reset lock and re-enable interrupts here. 2106ef27340cSAdrian Chadd * If an interrupt was being processed in ath_intr(), 2107ef27340cSAdrian Chadd * it would disable interrupts at this point. So we have 2108ef27340cSAdrian Chadd * to atomically enable interrupts and decrement the 2109ef27340cSAdrian Chadd * reset counter - this way ath_intr() doesn't end up 2110ef27340cSAdrian Chadd * disabling interrupts without a corresponding enable 2111ef27340cSAdrian Chadd * in the rest or channel change path. 2112ef27340cSAdrian Chadd */ 2113ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 2114ef27340cSAdrian Chadd sc->sc_inreset_cnt--; 2115ef27340cSAdrian Chadd /* XXX only do this if sc_inreset_cnt == 0? */ 2116ef27340cSAdrian Chadd ath_hal_intrset(ah, sc->sc_imask); 2117ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 2118ef27340cSAdrian Chadd 2119ef27340cSAdrian Chadd /* 2120ef27340cSAdrian Chadd * TX and RX can be started here. If it were started with 2121ef27340cSAdrian Chadd * sc_inreset_cnt > 0, the TX and RX path would abort. 2122ef27340cSAdrian Chadd * Thus if this is a nested call through the reset or 2123ef27340cSAdrian Chadd * channel change code, TX completion will occur but 2124ef27340cSAdrian Chadd * RX completion and ath_start / ath_tx_start will not 2125ef27340cSAdrian Chadd * run. 2126ef27340cSAdrian Chadd */ 2127ef27340cSAdrian Chadd 2128ef27340cSAdrian Chadd /* Restart TX/RX as needed */ 2129ef27340cSAdrian Chadd ath_txrx_start(sc); 2130ef27340cSAdrian Chadd 2131ef27340cSAdrian Chadd /* XXX Restart TX completion and pending TX */ 2132ef27340cSAdrian Chadd if (reset_type == ATH_RESET_NOLOSS) { 2133ef27340cSAdrian Chadd for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 2134ef27340cSAdrian Chadd if (ATH_TXQ_SETUP(sc, i)) { 2135ef27340cSAdrian Chadd ATH_TXQ_LOCK(&sc->sc_txq[i]); 2136ef27340cSAdrian Chadd ath_txq_restart_dma(sc, &sc->sc_txq[i]); 2137ef27340cSAdrian Chadd ath_txq_sched(sc, &sc->sc_txq[i]); 2138ef27340cSAdrian Chadd ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 2139ef27340cSAdrian Chadd } 2140ef27340cSAdrian Chadd } 2141ef27340cSAdrian Chadd } 2142ef27340cSAdrian Chadd 2143ef27340cSAdrian Chadd /* 2144ef27340cSAdrian Chadd * This may have been set during an ath_start() call which 2145ef27340cSAdrian Chadd * set this once it detected a concurrent TX was going on. 2146ef27340cSAdrian Chadd * So, clear it. 2147ef27340cSAdrian Chadd */ 2148ef27340cSAdrian Chadd /* XXX do this inside of IF_LOCK? */ 2149ef27340cSAdrian Chadd ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2150ef27340cSAdrian Chadd 2151ef27340cSAdrian Chadd /* Handle any frames in the TX queue */ 2152ef27340cSAdrian Chadd /* 2153ef27340cSAdrian Chadd * XXX should this be done by the caller, rather than 2154ef27340cSAdrian Chadd * ath_reset() ? 2155ef27340cSAdrian Chadd */ 2156c42a7b7eSSam Leffler ath_start(ifp); /* restart xmit */ 2157c42a7b7eSSam Leffler return 0; 21585591b213SSam Leffler } 21595591b213SSam Leffler 216068e8e04eSSam Leffler static int 2161b032f27cSSam Leffler ath_reset_vap(struct ieee80211vap *vap, u_long cmd) 2162b032f27cSSam Leffler { 21634b54a231SSam Leffler struct ieee80211com *ic = vap->iv_ic; 21644b54a231SSam Leffler struct ifnet *ifp = ic->ic_ifp; 21654b54a231SSam Leffler struct ath_softc *sc = ifp->if_softc; 21664b54a231SSam Leffler struct ath_hal *ah = sc->sc_ah; 21674b54a231SSam Leffler 21684b54a231SSam Leffler switch (cmd) { 21694b54a231SSam Leffler case IEEE80211_IOC_TXPOWER: 21704b54a231SSam Leffler /* 21714b54a231SSam Leffler * If per-packet TPC is enabled, then we have nothing 21724b54a231SSam Leffler * to do; otherwise we need to force the global limit. 21734b54a231SSam Leffler * All this can happen directly; no need to reset. 21744b54a231SSam Leffler */ 21754b54a231SSam Leffler if (!ath_hal_gettpc(ah)) 21764b54a231SSam Leffler ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 21774b54a231SSam Leffler return 0; 21784b54a231SSam Leffler } 2179517526efSAdrian Chadd /* XXX? Full or NOLOSS? */ 2180517526efSAdrian Chadd return ath_reset(ifp, ATH_RESET_FULL); 2181b032f27cSSam Leffler } 2182b032f27cSSam Leffler 2183b8e788a5SAdrian Chadd struct ath_buf * 218410ad9a77SSam Leffler _ath_getbuf_locked(struct ath_softc *sc) 218510ad9a77SSam Leffler { 218610ad9a77SSam Leffler struct ath_buf *bf; 218710ad9a77SSam Leffler 218810ad9a77SSam Leffler ATH_TXBUF_LOCK_ASSERT(sc); 218910ad9a77SSam Leffler 21906b349e5aSAdrian Chadd bf = TAILQ_FIRST(&sc->sc_txbuf); 2191e346b073SAdrian Chadd if (bf == NULL) { 2192e346b073SAdrian Chadd sc->sc_stats.ast_tx_getnobuf++; 2193e346b073SAdrian Chadd } else { 2194e346b073SAdrian Chadd if (bf->bf_flags & ATH_BUF_BUSY) { 2195e346b073SAdrian Chadd sc->sc_stats.ast_tx_getbusybuf++; 2196e346b073SAdrian Chadd bf = NULL; 2197e346b073SAdrian Chadd } 2198e346b073SAdrian Chadd } 2199e346b073SAdrian Chadd 220010ad9a77SSam Leffler if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) 22016b349e5aSAdrian Chadd TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 220210ad9a77SSam Leffler else 220310ad9a77SSam Leffler bf = NULL; 2204e346b073SAdrian Chadd 220510ad9a77SSam Leffler if (bf == NULL) { 220610ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, 22076b349e5aSAdrian Chadd TAILQ_FIRST(&sc->sc_txbuf) == NULL ? 220810ad9a77SSam Leffler "out of xmit buffers" : "xmit buffer busy"); 2209e346b073SAdrian Chadd return NULL; 221010ad9a77SSam Leffler } 2211e346b073SAdrian Chadd 2212e346b073SAdrian Chadd /* Valid bf here; clear some basic fields */ 2213e346b073SAdrian Chadd bf->bf_next = NULL; /* XXX just to be sure */ 2214e346b073SAdrian Chadd bf->bf_last = NULL; /* XXX again, just to be sure */ 2215e346b073SAdrian Chadd bf->bf_comp = NULL; /* XXX again, just to be sure */ 2216e346b073SAdrian Chadd bzero(&bf->bf_state, sizeof(bf->bf_state)); 2217e346b073SAdrian Chadd 221810ad9a77SSam Leffler return bf; 221910ad9a77SSam Leffler } 222010ad9a77SSam Leffler 2221e346b073SAdrian Chadd /* 2222e346b073SAdrian Chadd * When retrying a software frame, buffers marked ATH_BUF_BUSY 2223e346b073SAdrian Chadd * can't be thrown back on the queue as they could still be 2224e346b073SAdrian Chadd * in use by the hardware. 2225e346b073SAdrian Chadd * 2226e346b073SAdrian Chadd * This duplicates the buffer, or returns NULL. 2227e346b073SAdrian Chadd * 2228e346b073SAdrian Chadd * The descriptor is also copied but the link pointers and 2229e346b073SAdrian Chadd * the DMA segments aren't copied; this frame should thus 2230e346b073SAdrian Chadd * be again passed through the descriptor setup/chain routines 2231e346b073SAdrian Chadd * so the link is correct. 2232e346b073SAdrian Chadd * 2233e346b073SAdrian Chadd * The caller must free the buffer using ath_freebuf(). 2234e346b073SAdrian Chadd * 2235e346b073SAdrian Chadd * XXX TODO: this call shouldn't fail as it'll cause packet loss 2236e346b073SAdrian Chadd * XXX in the TX pathway when retries are needed. 2237e346b073SAdrian Chadd * XXX Figure out how to keep some buffers free, or factor the 2238e346b073SAdrian Chadd * XXX number of busy buffers into the xmit path (ath_start()) 2239e346b073SAdrian Chadd * XXX so we don't over-commit. 2240e346b073SAdrian Chadd */ 2241e346b073SAdrian Chadd struct ath_buf * 2242e346b073SAdrian Chadd ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf) 2243e346b073SAdrian Chadd { 2244e346b073SAdrian Chadd struct ath_buf *tbf; 2245e346b073SAdrian Chadd 2246e346b073SAdrian Chadd tbf = ath_getbuf(sc); 2247e346b073SAdrian Chadd if (tbf == NULL) 2248e346b073SAdrian Chadd return NULL; /* XXX failure? Why? */ 2249e346b073SAdrian Chadd 2250e346b073SAdrian Chadd /* Copy basics */ 2251e346b073SAdrian Chadd tbf->bf_next = NULL; 2252e346b073SAdrian Chadd tbf->bf_nseg = bf->bf_nseg; 2253e346b073SAdrian Chadd tbf->bf_txflags = bf->bf_txflags; 2254e346b073SAdrian Chadd tbf->bf_flags = bf->bf_flags & ~ATH_BUF_BUSY; 2255e346b073SAdrian Chadd tbf->bf_status = bf->bf_status; 2256e346b073SAdrian Chadd tbf->bf_m = bf->bf_m; 2257e346b073SAdrian Chadd tbf->bf_node = bf->bf_node; 2258e346b073SAdrian Chadd /* will be setup by the chain/setup function */ 2259e346b073SAdrian Chadd tbf->bf_lastds = NULL; 2260e346b073SAdrian Chadd /* for now, last == self */ 2261e346b073SAdrian Chadd tbf->bf_last = tbf; 2262e346b073SAdrian Chadd tbf->bf_comp = bf->bf_comp; 2263e346b073SAdrian Chadd 2264e346b073SAdrian Chadd /* NOTE: DMA segments will be setup by the setup/chain functions */ 2265e346b073SAdrian Chadd 2266e346b073SAdrian Chadd /* The caller has to re-init the descriptor + links */ 2267e346b073SAdrian Chadd 2268e346b073SAdrian Chadd /* Copy state */ 2269e346b073SAdrian Chadd memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state)); 2270e346b073SAdrian Chadd 2271e346b073SAdrian Chadd return tbf; 2272e346b073SAdrian Chadd } 2273e346b073SAdrian Chadd 2274b8e788a5SAdrian Chadd struct ath_buf * 227510ad9a77SSam Leffler ath_getbuf(struct ath_softc *sc) 227610ad9a77SSam Leffler { 227710ad9a77SSam Leffler struct ath_buf *bf; 227810ad9a77SSam Leffler 227910ad9a77SSam Leffler ATH_TXBUF_LOCK(sc); 228010ad9a77SSam Leffler bf = _ath_getbuf_locked(sc); 228110ad9a77SSam Leffler if (bf == NULL) { 228210ad9a77SSam Leffler struct ifnet *ifp = sc->sc_ifp; 228310ad9a77SSam Leffler 228410ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); 228510ad9a77SSam Leffler sc->sc_stats.ast_tx_qstop++; 2286ef27340cSAdrian Chadd /* XXX do this inside of IF_LOCK? */ 228710ad9a77SSam Leffler ifp->if_drv_flags |= IFF_DRV_OACTIVE; 228810ad9a77SSam Leffler } 228910ad9a77SSam Leffler ATH_TXBUF_UNLOCK(sc); 229010ad9a77SSam Leffler return bf; 229110ad9a77SSam Leffler } 229210ad9a77SSam Leffler 22935591b213SSam Leffler static void 22945591b213SSam Leffler ath_start(struct ifnet *ifp) 22955591b213SSam Leffler { 22965591b213SSam Leffler struct ath_softc *sc = ifp->if_softc; 22975591b213SSam Leffler struct ieee80211_node *ni; 22985591b213SSam Leffler struct ath_buf *bf; 229968e8e04eSSam Leffler struct mbuf *m, *next; 230068e8e04eSSam Leffler ath_bufhead frags; 23015591b213SSam Leffler 230213f4c340SRobert Watson if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) 23035591b213SSam Leffler return; 2304ef27340cSAdrian Chadd 2305ef27340cSAdrian Chadd /* XXX is it ok to hold the ATH_LOCK here? */ 2306ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 2307ef27340cSAdrian Chadd if (sc->sc_inreset_cnt > 0) { 2308ef27340cSAdrian Chadd device_printf(sc->sc_dev, 2309ef27340cSAdrian Chadd "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2310ef27340cSAdrian Chadd /* XXX do this inside of IF_LOCK? */ 2311ef27340cSAdrian Chadd ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2312ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 2313ef27340cSAdrian Chadd return; 2314ef27340cSAdrian Chadd } 2315ef27340cSAdrian Chadd sc->sc_txstart_cnt++; 2316ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 2317ef27340cSAdrian Chadd 23185591b213SSam Leffler for (;;) { 23195591b213SSam Leffler /* 23205591b213SSam Leffler * Grab a TX buffer and associated resources. 23215591b213SSam Leffler */ 232210ad9a77SSam Leffler bf = ath_getbuf(sc); 232310ad9a77SSam Leffler if (bf == NULL) 23245591b213SSam Leffler break; 23252b9411e2SSam Leffler 2326b032f27cSSam Leffler IFQ_DEQUEUE(&ifp->if_snd, m); 2327b032f27cSSam Leffler if (m == NULL) { 2328b032f27cSSam Leffler ATH_TXBUF_LOCK(sc); 23296b349e5aSAdrian Chadd TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 2330b032f27cSSam Leffler ATH_TXBUF_UNLOCK(sc); 2331b032f27cSSam Leffler break; 2332b032f27cSSam Leffler } 2333b032f27cSSam Leffler ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 233468e8e04eSSam Leffler /* 233568e8e04eSSam Leffler * Check for fragmentation. If this frame 233668e8e04eSSam Leffler * has been broken up verify we have enough 233768e8e04eSSam Leffler * buffers to send all the fragments so all 233868e8e04eSSam Leffler * go out or none... 233968e8e04eSSam Leffler */ 23406b349e5aSAdrian Chadd TAILQ_INIT(&frags); 234168e8e04eSSam Leffler if ((m->m_flags & M_FRAG) && 234268e8e04eSSam Leffler !ath_txfrag_setup(sc, &frags, m, ni)) { 234368e8e04eSSam Leffler DPRINTF(sc, ATH_DEBUG_XMIT, 234468e8e04eSSam Leffler "%s: out of txfrag buffers\n", __func__); 234536c6be9aSSam Leffler sc->sc_stats.ast_tx_nofrag++; 23469cb93076SSam Leffler ifp->if_oerrors++; 234768e8e04eSSam Leffler ath_freetx(m); 234868e8e04eSSam Leffler goto bad; 234968e8e04eSSam Leffler } 2350339ccfb3SSam Leffler ifp->if_opackets++; 235168e8e04eSSam Leffler nextfrag: 235268e8e04eSSam Leffler /* 235368e8e04eSSam Leffler * Pass the frame to the h/w for transmission. 235468e8e04eSSam Leffler * Fragmented frames have each frag chained together 235568e8e04eSSam Leffler * with m_nextpkt. We know there are sufficient ath_buf's 235668e8e04eSSam Leffler * to send all the frags because of work done by 235768e8e04eSSam Leffler * ath_txfrag_setup. We leave m_nextpkt set while 235868e8e04eSSam Leffler * calling ath_tx_start so it can use it to extend the 235968e8e04eSSam Leffler * the tx duration to cover the subsequent frag and 236068e8e04eSSam Leffler * so it can reclaim all the mbufs in case of an error; 236168e8e04eSSam Leffler * ath_tx_start clears m_nextpkt once it commits to 236268e8e04eSSam Leffler * handing the frame to the hardware. 236368e8e04eSSam Leffler */ 236468e8e04eSSam Leffler next = m->m_nextpkt; 23655591b213SSam Leffler if (ath_tx_start(sc, ni, bf, m)) { 23665591b213SSam Leffler bad: 23675591b213SSam Leffler ifp->if_oerrors++; 2368c42a7b7eSSam Leffler reclaim: 236968e8e04eSSam Leffler bf->bf_m = NULL; 237068e8e04eSSam Leffler bf->bf_node = NULL; 2371c42a7b7eSSam Leffler ATH_TXBUF_LOCK(sc); 23726b349e5aSAdrian Chadd TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 237368e8e04eSSam Leffler ath_txfrag_cleanup(sc, &frags, ni); 2374c42a7b7eSSam Leffler ATH_TXBUF_UNLOCK(sc); 2375c42a7b7eSSam Leffler if (ni != NULL) 2376c42a7b7eSSam Leffler ieee80211_free_node(ni); 23775591b213SSam Leffler continue; 23785591b213SSam Leffler } 237968e8e04eSSam Leffler if (next != NULL) { 238068e8e04eSSam Leffler /* 238168e8e04eSSam Leffler * Beware of state changing between frags. 238268e8e04eSSam Leffler * XXX check sta power-save state? 238368e8e04eSSam Leffler */ 2384b032f27cSSam Leffler if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { 238568e8e04eSSam Leffler DPRINTF(sc, ATH_DEBUG_XMIT, 238668e8e04eSSam Leffler "%s: flush fragmented packet, state %s\n", 238768e8e04eSSam Leffler __func__, 2388b032f27cSSam Leffler ieee80211_state_name[ni->ni_vap->iv_state]); 238968e8e04eSSam Leffler ath_freetx(next); 239068e8e04eSSam Leffler goto reclaim; 239168e8e04eSSam Leffler } 239268e8e04eSSam Leffler m = next; 23936b349e5aSAdrian Chadd bf = TAILQ_FIRST(&frags); 239468e8e04eSSam Leffler KASSERT(bf != NULL, ("no buf for txfrag")); 23956b349e5aSAdrian Chadd TAILQ_REMOVE(&frags, bf, bf_list); 239668e8e04eSSam Leffler goto nextfrag; 239768e8e04eSSam Leffler } 23985591b213SSam Leffler 23992e986da5SSam Leffler sc->sc_wd_timer = 5; 24005591b213SSam Leffler } 2401ef27340cSAdrian Chadd 2402ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 2403ef27340cSAdrian Chadd sc->sc_txstart_cnt--; 2404ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 24055591b213SSam Leffler } 24065591b213SSam Leffler 24075591b213SSam Leffler static int 24085591b213SSam Leffler ath_media_change(struct ifnet *ifp) 24095591b213SSam Leffler { 2410b032f27cSSam Leffler int error = ieee80211_media_change(ifp); 2411b032f27cSSam Leffler /* NB: only the fixed rate can change and that doesn't need a reset */ 2412b032f27cSSam Leffler return (error == ENETRESET ? 0 : error); 24135591b213SSam Leffler } 24145591b213SSam Leffler 2415c42a7b7eSSam Leffler /* 2416c42a7b7eSSam Leffler * Block/unblock tx+rx processing while a key change is done. 2417c42a7b7eSSam Leffler * We assume the caller serializes key management operations 2418c42a7b7eSSam Leffler * so we only need to worry about synchronization with other 2419c42a7b7eSSam Leffler * uses that originate in the driver. 2420c42a7b7eSSam Leffler */ 2421c42a7b7eSSam Leffler static void 2422b032f27cSSam Leffler ath_key_update_begin(struct ieee80211vap *vap) 2423c42a7b7eSSam Leffler { 2424b032f27cSSam Leffler struct ifnet *ifp = vap->iv_ic->ic_ifp; 2425c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 2426c42a7b7eSSam Leffler 2427c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2428b032f27cSSam Leffler taskqueue_block(sc->sc_tq); 2429c42a7b7eSSam Leffler IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ 2430c42a7b7eSSam Leffler } 2431c42a7b7eSSam Leffler 2432c42a7b7eSSam Leffler static void 2433b032f27cSSam Leffler ath_key_update_end(struct ieee80211vap *vap) 2434c42a7b7eSSam Leffler { 2435b032f27cSSam Leffler struct ifnet *ifp = vap->iv_ic->ic_ifp; 2436c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 2437c42a7b7eSSam Leffler 2438c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2439c42a7b7eSSam Leffler IF_UNLOCK(&ifp->if_snd); 2440b032f27cSSam Leffler taskqueue_unblock(sc->sc_tq); 2441c42a7b7eSSam Leffler } 24425591b213SSam Leffler 24434bc0e754SSam Leffler /* 24444bc0e754SSam Leffler * Calculate the receive filter according to the 24454bc0e754SSam Leffler * operating mode and state: 24464bc0e754SSam Leffler * 24474bc0e754SSam Leffler * o always accept unicast, broadcast, and multicast traffic 2448b032f27cSSam Leffler * o accept PHY error frames when hardware doesn't have MIB support 2449411373ebSSam Leffler * to count and we need them for ANI (sta mode only until recently) 2450b032f27cSSam Leffler * and we are not scanning (ANI is disabled) 2451411373ebSSam Leffler * NB: older hal's add rx filter bits out of sight and we need to 2452411373ebSSam Leffler * blindly preserve them 24534bc0e754SSam Leffler * o probe request frames are accepted only when operating in 245459aa14a9SRui Paulo * hostap, adhoc, mesh, or monitor modes 2455b032f27cSSam Leffler * o enable promiscuous mode 2456b032f27cSSam Leffler * - when in monitor mode 2457b032f27cSSam Leffler * - if interface marked PROMISC (assumes bridge setting is filtered) 24584bc0e754SSam Leffler * o accept beacons: 24594bc0e754SSam Leffler * - when operating in station mode for collecting rssi data when 24604bc0e754SSam Leffler * the station is otherwise quiet, or 2461b032f27cSSam Leffler * - when operating in adhoc mode so the 802.11 layer creates 2462b032f27cSSam Leffler * node table entries for peers, 24634bc0e754SSam Leffler * - when scanning 2464b032f27cSSam Leffler * - when doing s/w beacon miss (e.g. for ap+sta) 2465b032f27cSSam Leffler * - when operating in ap mode in 11g to detect overlapping bss that 2466b032f27cSSam Leffler * require protection 246759aa14a9SRui Paulo * - when operating in mesh mode to detect neighbors 24686f48c956SSam Leffler * o accept control frames: 24696f48c956SSam Leffler * - when in monitor mode 2470b032f27cSSam Leffler * XXX HT protection for 11n 24714bc0e754SSam Leffler */ 24724bc0e754SSam Leffler static u_int32_t 247368e8e04eSSam Leffler ath_calcrxfilter(struct ath_softc *sc) 24744bc0e754SSam Leffler { 2475fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 2476b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 24774bc0e754SSam Leffler u_int32_t rfilt; 24784bc0e754SSam Leffler 2479b032f27cSSam Leffler rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; 2480411373ebSSam Leffler if (!sc->sc_needmib && !sc->sc_scanning) 2481411373ebSSam Leffler rfilt |= HAL_RX_FILTER_PHYERR; 24824bc0e754SSam Leffler if (ic->ic_opmode != IEEE80211_M_STA) 24834bc0e754SSam Leffler rfilt |= HAL_RX_FILTER_PROBEREQ; 24845463c4a4SSam Leffler /* XXX ic->ic_monvaps != 0? */ 2485b032f27cSSam Leffler if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC)) 24864bc0e754SSam Leffler rfilt |= HAL_RX_FILTER_PROM; 24874bc0e754SSam Leffler if (ic->ic_opmode == IEEE80211_M_STA || 248847db982fSSam Leffler ic->ic_opmode == IEEE80211_M_IBSS || 2489b032f27cSSam Leffler sc->sc_swbmiss || sc->sc_scanning) 2490b032f27cSSam Leffler rfilt |= HAL_RX_FILTER_BEACON; 2491b032f27cSSam Leffler /* 2492b032f27cSSam Leffler * NB: We don't recalculate the rx filter when 2493b032f27cSSam Leffler * ic_protmode changes; otherwise we could do 2494b032f27cSSam Leffler * this only when ic_protmode != NONE. 2495b032f27cSSam Leffler */ 2496b032f27cSSam Leffler if (ic->ic_opmode == IEEE80211_M_HOSTAP && 2497b032f27cSSam Leffler IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) 24984bc0e754SSam Leffler rfilt |= HAL_RX_FILTER_BEACON; 2499f378d4c8SAdrian Chadd 2500f378d4c8SAdrian Chadd /* 25014aa18e9dSAdrian Chadd * Enable hardware PS-POLL RX only for hostap mode; 2502f378d4c8SAdrian Chadd * STA mode sends PS-POLL frames but never 25034aa18e9dSAdrian Chadd * receives them. 2504f378d4c8SAdrian Chadd */ 2505dce0bccaSAdrian Chadd if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PSPOLL, 2506f378d4c8SAdrian Chadd 0, NULL) == HAL_OK && 2507f378d4c8SAdrian Chadd ic->ic_opmode == IEEE80211_M_HOSTAP) 2508f378d4c8SAdrian Chadd rfilt |= HAL_RX_FILTER_PSPOLL; 2509f378d4c8SAdrian Chadd 2510fe0dd789SSam Leffler if (sc->sc_nmeshvaps) { 251159aa14a9SRui Paulo rfilt |= HAL_RX_FILTER_BEACON; 251259aa14a9SRui Paulo if (sc->sc_hasbmatch) 251359aa14a9SRui Paulo rfilt |= HAL_RX_FILTER_BSSID; 251459aa14a9SRui Paulo else 251559aa14a9SRui Paulo rfilt |= HAL_RX_FILTER_PROM; 251659aa14a9SRui Paulo } 25176f48c956SSam Leffler if (ic->ic_opmode == IEEE80211_M_MONITOR) 25186f48c956SSam Leffler rfilt |= HAL_RX_FILTER_CONTROL; 2519f378d4c8SAdrian Chadd 2520f378d4c8SAdrian Chadd /* 2521f378d4c8SAdrian Chadd * Enable RX of compressed BAR frames only when doing 2522f378d4c8SAdrian Chadd * 802.11n. Required for A-MPDU. 2523f378d4c8SAdrian Chadd */ 2524a83df4d3SAdrian Chadd if (IEEE80211_IS_CHAN_HT(ic->ic_curchan)) 2525a83df4d3SAdrian Chadd rfilt |= HAL_RX_FILTER_COMPBAR; 2526f378d4c8SAdrian Chadd 2527fad901ebSAdrian Chadd /* 2528fad901ebSAdrian Chadd * Enable radar PHY errors if requested by the 2529fad901ebSAdrian Chadd * DFS module. 2530fad901ebSAdrian Chadd */ 2531fad901ebSAdrian Chadd if (sc->sc_dodfs) 2532fad901ebSAdrian Chadd rfilt |= HAL_RX_FILTER_PHYRADAR; 2533fad901ebSAdrian Chadd 2534b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n", 2535b032f27cSSam Leffler __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags); 25364bc0e754SSam Leffler return rfilt; 2537b032f27cSSam Leffler } 2538b032f27cSSam Leffler 2539b032f27cSSam Leffler static void 2540b032f27cSSam Leffler ath_update_promisc(struct ifnet *ifp) 2541b032f27cSSam Leffler { 2542b032f27cSSam Leffler struct ath_softc *sc = ifp->if_softc; 2543b032f27cSSam Leffler u_int32_t rfilt; 2544b032f27cSSam Leffler 2545b032f27cSSam Leffler /* configure rx filter */ 2546b032f27cSSam Leffler rfilt = ath_calcrxfilter(sc); 2547b032f27cSSam Leffler ath_hal_setrxfilter(sc->sc_ah, rfilt); 2548b032f27cSSam Leffler 2549b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); 2550b032f27cSSam Leffler } 2551b032f27cSSam Leffler 2552b032f27cSSam Leffler static void 2553b032f27cSSam Leffler ath_update_mcast(struct ifnet *ifp) 2554b032f27cSSam Leffler { 2555b032f27cSSam Leffler struct ath_softc *sc = ifp->if_softc; 2556b032f27cSSam Leffler u_int32_t mfilt[2]; 2557b032f27cSSam Leffler 2558b032f27cSSam Leffler /* calculate and install multicast filter */ 2559b032f27cSSam Leffler if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 2560b032f27cSSam Leffler struct ifmultiaddr *ifma; 2561b032f27cSSam Leffler /* 2562b032f27cSSam Leffler * Merge multicast addresses to form the hardware filter. 2563b032f27cSSam Leffler */ 2564b032f27cSSam Leffler mfilt[0] = mfilt[1] = 0; 2565eb956cd0SRobert Watson if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */ 2566b032f27cSSam Leffler TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2567b032f27cSSam Leffler caddr_t dl; 2568b032f27cSSam Leffler u_int32_t val; 2569b032f27cSSam Leffler u_int8_t pos; 2570b032f27cSSam Leffler 2571b032f27cSSam Leffler /* calculate XOR of eight 6bit values */ 2572b032f27cSSam Leffler dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 2573b032f27cSSam Leffler val = LE_READ_4(dl + 0); 2574b032f27cSSam Leffler pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2575b032f27cSSam Leffler val = LE_READ_4(dl + 3); 2576b032f27cSSam Leffler pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2577b032f27cSSam Leffler pos &= 0x3f; 2578b032f27cSSam Leffler mfilt[pos / 32] |= (1 << (pos % 32)); 2579b032f27cSSam Leffler } 2580eb956cd0SRobert Watson if_maddr_runlock(ifp); 2581b032f27cSSam Leffler } else 2582b032f27cSSam Leffler mfilt[0] = mfilt[1] = ~0; 2583b032f27cSSam Leffler ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); 2584b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", 2585b032f27cSSam Leffler __func__, mfilt[0], mfilt[1]); 25864bc0e754SSam Leffler } 25874bc0e754SSam Leffler 25885591b213SSam Leffler static void 25895591b213SSam Leffler ath_mode_init(struct ath_softc *sc) 25905591b213SSam Leffler { 2591fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 2592b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 2593b032f27cSSam Leffler u_int32_t rfilt; 25945591b213SSam Leffler 25954bc0e754SSam Leffler /* configure rx filter */ 259668e8e04eSSam Leffler rfilt = ath_calcrxfilter(sc); 25974bc0e754SSam Leffler ath_hal_setrxfilter(ah, rfilt); 25984bc0e754SSam Leffler 25995591b213SSam Leffler /* configure operational mode */ 2600c42a7b7eSSam Leffler ath_hal_setopmode(ah); 2601c42a7b7eSSam Leffler 260229aca940SSam Leffler /* handle any link-level address change */ 260329aca940SSam Leffler ath_hal_setmac(ah, IF_LLADDR(ifp)); 26045591b213SSam Leffler 26055591b213SSam Leffler /* calculate and install multicast filter */ 2606b032f27cSSam Leffler ath_update_mcast(ifp); 26075591b213SSam Leffler } 26085591b213SSam Leffler 2609c42a7b7eSSam Leffler /* 2610c42a7b7eSSam Leffler * Set the slot time based on the current setting. 2611c42a7b7eSSam Leffler */ 2612c42a7b7eSSam Leffler static void 2613c42a7b7eSSam Leffler ath_setslottime(struct ath_softc *sc) 2614c42a7b7eSSam Leffler { 2615b032f27cSSam Leffler struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2616c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 2617aaa70f2fSSam Leffler u_int usec; 2618c42a7b7eSSam Leffler 2619aaa70f2fSSam Leffler if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 2620aaa70f2fSSam Leffler usec = 13; 2621aaa70f2fSSam Leffler else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 2622aaa70f2fSSam Leffler usec = 21; 2623724c193aSSam Leffler else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 2624724c193aSSam Leffler /* honor short/long slot time only in 11g */ 2625724c193aSSam Leffler /* XXX shouldn't honor on pure g or turbo g channel */ 2626724c193aSSam Leffler if (ic->ic_flags & IEEE80211_F_SHSLOT) 2627aaa70f2fSSam Leffler usec = HAL_SLOT_TIME_9; 2628aaa70f2fSSam Leffler else 2629aaa70f2fSSam Leffler usec = HAL_SLOT_TIME_20; 2630724c193aSSam Leffler } else 2631724c193aSSam Leffler usec = HAL_SLOT_TIME_9; 2632aaa70f2fSSam Leffler 2633aaa70f2fSSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, 2634aaa70f2fSSam Leffler "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 2635aaa70f2fSSam Leffler __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 2636aaa70f2fSSam Leffler ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 2637aaa70f2fSSam Leffler 2638aaa70f2fSSam Leffler ath_hal_setslottime(ah, usec); 2639c42a7b7eSSam Leffler sc->sc_updateslot = OK; 2640c42a7b7eSSam Leffler } 2641c42a7b7eSSam Leffler 2642c42a7b7eSSam Leffler /* 2643c42a7b7eSSam Leffler * Callback from the 802.11 layer to update the 2644c42a7b7eSSam Leffler * slot time based on the current setting. 2645c42a7b7eSSam Leffler */ 2646c42a7b7eSSam Leffler static void 2647c42a7b7eSSam Leffler ath_updateslot(struct ifnet *ifp) 2648c42a7b7eSSam Leffler { 2649c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 2650b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 2651c42a7b7eSSam Leffler 2652c42a7b7eSSam Leffler /* 2653c42a7b7eSSam Leffler * When not coordinating the BSS, change the hardware 2654c42a7b7eSSam Leffler * immediately. For other operation we defer the change 2655c42a7b7eSSam Leffler * until beacon updates have propagated to the stations. 2656c42a7b7eSSam Leffler */ 265759aa14a9SRui Paulo if (ic->ic_opmode == IEEE80211_M_HOSTAP || 265859aa14a9SRui Paulo ic->ic_opmode == IEEE80211_M_MBSS) 2659c42a7b7eSSam Leffler sc->sc_updateslot = UPDATE; 2660c42a7b7eSSam Leffler else 2661c42a7b7eSSam Leffler ath_setslottime(sc); 2662c42a7b7eSSam Leffler } 2663c42a7b7eSSam Leffler 2664c42a7b7eSSam Leffler /* 266580d2765fSSam Leffler * Setup a h/w transmit queue for beacons. 266680d2765fSSam Leffler */ 266780d2765fSSam Leffler static int 266880d2765fSSam Leffler ath_beaconq_setup(struct ath_hal *ah) 266980d2765fSSam Leffler { 267080d2765fSSam Leffler HAL_TXQ_INFO qi; 267180d2765fSSam Leffler 267280d2765fSSam Leffler memset(&qi, 0, sizeof(qi)); 267380d2765fSSam Leffler qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 267480d2765fSSam Leffler qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 267580d2765fSSam Leffler qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 26760f2e86fbSSam Leffler /* NB: for dynamic turbo, don't enable any other interrupts */ 2677bd5a9920SSam Leffler qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE; 267880d2765fSSam Leffler return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi); 267980d2765fSSam Leffler } 268080d2765fSSam Leffler 268180d2765fSSam Leffler /* 26820f2e86fbSSam Leffler * Setup the transmit queue parameters for the beacon queue. 26830f2e86fbSSam Leffler */ 26840f2e86fbSSam Leffler static int 26850f2e86fbSSam Leffler ath_beaconq_config(struct ath_softc *sc) 26860f2e86fbSSam Leffler { 26870f2e86fbSSam Leffler #define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1) 2688b032f27cSSam Leffler struct ieee80211com *ic = sc->sc_ifp->if_l2com; 26890f2e86fbSSam Leffler struct ath_hal *ah = sc->sc_ah; 26900f2e86fbSSam Leffler HAL_TXQ_INFO qi; 26910f2e86fbSSam Leffler 26920f2e86fbSSam Leffler ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi); 269359aa14a9SRui Paulo if (ic->ic_opmode == IEEE80211_M_HOSTAP || 269459aa14a9SRui Paulo ic->ic_opmode == IEEE80211_M_MBSS) { 26950f2e86fbSSam Leffler /* 26960f2e86fbSSam Leffler * Always burst out beacon and CAB traffic. 26970f2e86fbSSam Leffler */ 26980f2e86fbSSam Leffler qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT; 26990f2e86fbSSam Leffler qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT; 27000f2e86fbSSam Leffler qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT; 27010f2e86fbSSam Leffler } else { 27020f2e86fbSSam Leffler struct wmeParams *wmep = 27030f2e86fbSSam Leffler &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE]; 27040f2e86fbSSam Leffler /* 27050f2e86fbSSam Leffler * Adhoc mode; important thing is to use 2x cwmin. 27060f2e86fbSSam Leffler */ 27070f2e86fbSSam Leffler qi.tqi_aifs = wmep->wmep_aifsn; 27080f2e86fbSSam Leffler qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 27090f2e86fbSSam Leffler qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 27100f2e86fbSSam Leffler } 27110f2e86fbSSam Leffler 27120f2e86fbSSam Leffler if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) { 27130f2e86fbSSam Leffler device_printf(sc->sc_dev, "unable to update parameters for " 27140f2e86fbSSam Leffler "beacon hardware queue!\n"); 27150f2e86fbSSam Leffler return 0; 27160f2e86fbSSam Leffler } else { 27170f2e86fbSSam Leffler ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */ 27180f2e86fbSSam Leffler return 1; 27190f2e86fbSSam Leffler } 27200f2e86fbSSam Leffler #undef ATH_EXPONENT_TO_VALUE 27210f2e86fbSSam Leffler } 27220f2e86fbSSam Leffler 27230f2e86fbSSam Leffler /* 2724c42a7b7eSSam Leffler * Allocate and setup an initial beacon frame. 2725c42a7b7eSSam Leffler */ 27265591b213SSam Leffler static int 27275591b213SSam Leffler ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) 27285591b213SSam Leffler { 2729b032f27cSSam Leffler struct ieee80211vap *vap = ni->ni_vap; 2730b032f27cSSam Leffler struct ath_vap *avp = ATH_VAP(vap); 27315591b213SSam Leffler struct ath_buf *bf; 27325591b213SSam Leffler struct mbuf *m; 2733c42a7b7eSSam Leffler int error; 27345591b213SSam Leffler 2735b032f27cSSam Leffler bf = avp->av_bcbuf; 27367ebd03d7SAdrian Chadd DPRINTF(sc, ATH_DEBUG_NODE, "%s: bf_m=%p, bf_node=%p\n", 27377ebd03d7SAdrian Chadd __func__, bf->bf_m, bf->bf_node); 2738b032f27cSSam Leffler if (bf->bf_m != NULL) { 2739b032f27cSSam Leffler bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2740b032f27cSSam Leffler m_freem(bf->bf_m); 2741b032f27cSSam Leffler bf->bf_m = NULL; 2742c42a7b7eSSam Leffler } 2743b032f27cSSam Leffler if (bf->bf_node != NULL) { 2744b032f27cSSam Leffler ieee80211_free_node(bf->bf_node); 2745b032f27cSSam Leffler bf->bf_node = NULL; 2746b032f27cSSam Leffler } 2747b032f27cSSam Leffler 27485591b213SSam Leffler /* 27495591b213SSam Leffler * NB: the beacon data buffer must be 32-bit aligned; 27505591b213SSam Leffler * we assume the mbuf routines will return us something 27515591b213SSam Leffler * with this alignment (perhaps should assert). 27525591b213SSam Leffler */ 2753b032f27cSSam Leffler m = ieee80211_beacon_alloc(ni, &avp->av_boff); 27545591b213SSam Leffler if (m == NULL) { 2755b032f27cSSam Leffler device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__); 27565591b213SSam Leffler sc->sc_stats.ast_be_nombuf++; 27575591b213SSam Leffler return ENOMEM; 27585591b213SSam Leffler } 2759f9e6219bSSam Leffler error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 2760f9e6219bSSam Leffler bf->bf_segs, &bf->bf_nseg, 27615591b213SSam Leffler BUS_DMA_NOWAIT); 2762b032f27cSSam Leffler if (error != 0) { 2763b032f27cSSam Leffler device_printf(sc->sc_dev, 2764b032f27cSSam Leffler "%s: cannot map mbuf, bus_dmamap_load_mbuf_sg returns %d\n", 2765b032f27cSSam Leffler __func__, error); 2766b032f27cSSam Leffler m_freem(m); 2767b032f27cSSam Leffler return error; 2768b032f27cSSam Leffler } 2769b032f27cSSam Leffler 2770b032f27cSSam Leffler /* 2771b032f27cSSam Leffler * Calculate a TSF adjustment factor required for staggered 2772b032f27cSSam Leffler * beacons. Note that we assume the format of the beacon 2773b032f27cSSam Leffler * frame leaves the tstamp field immediately following the 2774b032f27cSSam Leffler * header. 2775b032f27cSSam Leffler */ 2776b032f27cSSam Leffler if (sc->sc_stagbeacons && avp->av_bslot > 0) { 2777b032f27cSSam Leffler uint64_t tsfadjust; 2778b032f27cSSam Leffler struct ieee80211_frame *wh; 2779b032f27cSSam Leffler 2780b032f27cSSam Leffler /* 2781b032f27cSSam Leffler * The beacon interval is in TU's; the TSF is in usecs. 2782b032f27cSSam Leffler * We figure out how many TU's to add to align the timestamp 2783b032f27cSSam Leffler * then convert to TSF units and handle byte swapping before 2784b032f27cSSam Leffler * inserting it in the frame. The hardware will then add this 2785b032f27cSSam Leffler * each time a beacon frame is sent. Note that we align vap's 2786b032f27cSSam Leffler * 1..N and leave vap 0 untouched. This means vap 0 has a 2787b032f27cSSam Leffler * timestamp in one beacon interval while the others get a 2788b032f27cSSam Leffler * timstamp aligned to the next interval. 2789b032f27cSSam Leffler */ 2790b032f27cSSam Leffler tsfadjust = ni->ni_intval * 2791b032f27cSSam Leffler (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF; 2792b032f27cSSam Leffler tsfadjust = htole64(tsfadjust << 10); /* TU -> TSF */ 2793b032f27cSSam Leffler 2794b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 2795b032f27cSSam Leffler "%s: %s beacons bslot %d intval %u tsfadjust %llu\n", 2796b032f27cSSam Leffler __func__, sc->sc_stagbeacons ? "stagger" : "burst", 27973627e321SSam Leffler avp->av_bslot, ni->ni_intval, 27983627e321SSam Leffler (long long unsigned) le64toh(tsfadjust)); 2799b032f27cSSam Leffler 2800b032f27cSSam Leffler wh = mtod(m, struct ieee80211_frame *); 2801b032f27cSSam Leffler memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust)); 2802b032f27cSSam Leffler } 2803c42a7b7eSSam Leffler bf->bf_m = m; 2804f818612bSSam Leffler bf->bf_node = ieee80211_ref_node(ni); 2805b032f27cSSam Leffler 2806b032f27cSSam Leffler return 0; 28075591b213SSam Leffler } 2808c42a7b7eSSam Leffler 2809c42a7b7eSSam Leffler /* 2810c42a7b7eSSam Leffler * Setup the beacon frame for transmit. 2811c42a7b7eSSam Leffler */ 2812c42a7b7eSSam Leffler static void 2813c42a7b7eSSam Leffler ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf) 2814c42a7b7eSSam Leffler { 2815c42a7b7eSSam Leffler #define USE_SHPREAMBLE(_ic) \ 2816c42a7b7eSSam Leffler (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\ 2817c42a7b7eSSam Leffler == IEEE80211_F_SHPREAMBLE) 2818c42a7b7eSSam Leffler struct ieee80211_node *ni = bf->bf_node; 2819c42a7b7eSSam Leffler struct ieee80211com *ic = ni->ni_ic; 2820c42a7b7eSSam Leffler struct mbuf *m = bf->bf_m; 2821c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 2822c42a7b7eSSam Leffler struct ath_desc *ds; 2823c42a7b7eSSam Leffler int flags, antenna; 282455f63772SSam Leffler const HAL_RATE_TABLE *rt; 282555f63772SSam Leffler u_int8_t rix, rate; 2826c42a7b7eSSam Leffler 28274a3ac3fcSSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n", 2828c42a7b7eSSam Leffler __func__, m, m->m_len); 28295591b213SSam Leffler 28305591b213SSam Leffler /* setup descriptors */ 28315591b213SSam Leffler ds = bf->bf_desc; 28326edf1dc7SAdrian Chadd bf->bf_last = bf; 28336edf1dc7SAdrian Chadd bf->bf_lastds = ds; 28345591b213SSam Leffler 2835c42a7b7eSSam Leffler flags = HAL_TXDESC_NOACK; 2836c42a7b7eSSam Leffler if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) { 2837c42a7b7eSSam Leffler ds->ds_link = bf->bf_daddr; /* self-linked */ 2838c42a7b7eSSam Leffler flags |= HAL_TXDESC_VEOL; 2839c42a7b7eSSam Leffler /* 2840c42a7b7eSSam Leffler * Let hardware handle antenna switching. 2841c42a7b7eSSam Leffler */ 28424866e6c2SSam Leffler antenna = sc->sc_txantenna; 2843c42a7b7eSSam Leffler } else { 28445591b213SSam Leffler ds->ds_link = 0; 2845c42a7b7eSSam Leffler /* 2846c42a7b7eSSam Leffler * Switch antenna every 4 beacons. 2847c42a7b7eSSam Leffler * XXX assumes two antenna 2848c42a7b7eSSam Leffler */ 2849b032f27cSSam Leffler if (sc->sc_txantenna != 0) 2850b032f27cSSam Leffler antenna = sc->sc_txantenna; 2851b032f27cSSam Leffler else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0) 2852b032f27cSSam Leffler antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1); 2853b032f27cSSam Leffler else 2854b032f27cSSam Leffler antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1); 2855c42a7b7eSSam Leffler } 2856c42a7b7eSSam Leffler 2857c42a7b7eSSam Leffler KASSERT(bf->bf_nseg == 1, 2858c42a7b7eSSam Leffler ("multi-segment beacon frame; nseg %u", bf->bf_nseg)); 28595591b213SSam Leffler ds->ds_data = bf->bf_segs[0].ds_addr; 28605591b213SSam Leffler /* 28615591b213SSam Leffler * Calculate rate code. 28625591b213SSam Leffler * XXX everything at min xmit rate 28635591b213SSam Leffler */ 2864b032f27cSSam Leffler rix = 0; 286555f63772SSam Leffler rt = sc->sc_currates; 286655f63772SSam Leffler rate = rt->info[rix].rateCode; 2867c42a7b7eSSam Leffler if (USE_SHPREAMBLE(ic)) 286855f63772SSam Leffler rate |= rt->info[rix].shortPreamble; 28695591b213SSam Leffler ath_hal_setuptxdesc(ah, ds 2870c42a7b7eSSam Leffler , m->m_len + IEEE80211_CRC_LEN /* frame length */ 28715591b213SSam Leffler , sizeof(struct ieee80211_frame)/* header length */ 28725591b213SSam Leffler , HAL_PKT_TYPE_BEACON /* Atheros packet type */ 2873c42a7b7eSSam Leffler , ni->ni_txpower /* txpower XXX */ 28745591b213SSam Leffler , rate, 1 /* series 0 rate/tries */ 28755591b213SSam Leffler , HAL_TXKEYIX_INVALID /* no encryption */ 2876c42a7b7eSSam Leffler , antenna /* antenna mode */ 2877c42a7b7eSSam Leffler , flags /* no ack, veol for beacons */ 28785591b213SSam Leffler , 0 /* rts/cts rate */ 28795591b213SSam Leffler , 0 /* rts/cts duration */ 28805591b213SSam Leffler ); 28815591b213SSam Leffler /* NB: beacon's BufLen must be a multiple of 4 bytes */ 28825591b213SSam Leffler ath_hal_filltxdesc(ah, ds 2883c42a7b7eSSam Leffler , roundup(m->m_len, 4) /* buffer length */ 28845591b213SSam Leffler , AH_TRUE /* first segment */ 28855591b213SSam Leffler , AH_TRUE /* last segment */ 2886c42a7b7eSSam Leffler , ds /* first descriptor */ 28875591b213SSam Leffler ); 2888b032f27cSSam Leffler #if 0 2889b032f27cSSam Leffler ath_desc_swap(ds); 2890b032f27cSSam Leffler #endif 2891c42a7b7eSSam Leffler #undef USE_SHPREAMBLE 28925591b213SSam Leffler } 28935591b213SSam Leffler 2894b105a069SSam Leffler static void 2895b032f27cSSam Leffler ath_beacon_update(struct ieee80211vap *vap, int item) 2896b105a069SSam Leffler { 2897b032f27cSSam Leffler struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff; 2898b105a069SSam Leffler 2899b105a069SSam Leffler setbit(bo->bo_flags, item); 2900b105a069SSam Leffler } 2901b105a069SSam Leffler 2902c42a7b7eSSam Leffler /* 2903622b3fd2SSam Leffler * Append the contents of src to dst; both queues 2904622b3fd2SSam Leffler * are assumed to be locked. 2905622b3fd2SSam Leffler */ 2906622b3fd2SSam Leffler static void 2907622b3fd2SSam Leffler ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 2908622b3fd2SSam Leffler { 29096b349e5aSAdrian Chadd TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); 2910622b3fd2SSam Leffler dst->axq_link = src->axq_link; 2911622b3fd2SSam Leffler src->axq_link = NULL; 2912622b3fd2SSam Leffler dst->axq_depth += src->axq_depth; 29136edf1dc7SAdrian Chadd dst->axq_aggr_depth += src->axq_aggr_depth; 2914622b3fd2SSam Leffler src->axq_depth = 0; 29156edf1dc7SAdrian Chadd src->axq_aggr_depth = 0; 2916622b3fd2SSam Leffler } 2917622b3fd2SSam Leffler 2918622b3fd2SSam Leffler /* 2919c42a7b7eSSam Leffler * Transmit a beacon frame at SWBA. Dynamic updates to the 2920c42a7b7eSSam Leffler * frame contents are done as needed and the slot time is 2921c42a7b7eSSam Leffler * also adjusted based on current state. 2922c42a7b7eSSam Leffler */ 29235591b213SSam Leffler static void 29245591b213SSam Leffler ath_beacon_proc(void *arg, int pending) 29255591b213SSam Leffler { 29265591b213SSam Leffler struct ath_softc *sc = arg; 29275591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 2928b032f27cSSam Leffler struct ieee80211vap *vap; 2929b032f27cSSam Leffler struct ath_buf *bf; 2930b032f27cSSam Leffler int slot, otherant; 2931b032f27cSSam Leffler uint32_t bfaddr; 29325591b213SSam Leffler 2933c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n", 2934c42a7b7eSSam Leffler __func__, pending); 2935c42a7b7eSSam Leffler /* 2936c42a7b7eSSam Leffler * Check if the previous beacon has gone out. If 2937c66c48cbSSam Leffler * not don't try to post another, skip this period 2938c66c48cbSSam Leffler * and wait for the next. Missed beacons indicate 2939c66c48cbSSam Leffler * a problem and should not occur. If we miss too 2940c66c48cbSSam Leffler * many consecutive beacons reset the device. 2941c42a7b7eSSam Leffler */ 2942c42a7b7eSSam Leffler if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { 2943c42a7b7eSSam Leffler sc->sc_bmisscount++; 29447ec4e6b8SAdrian Chadd sc->sc_stats.ast_be_missed++; 29454a3ac3fcSSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 2946c42a7b7eSSam Leffler "%s: missed %u consecutive beacons\n", 2947c42a7b7eSSam Leffler __func__, sc->sc_bmisscount); 2948a32ac9d3SSam Leffler if (sc->sc_bmisscount >= ath_bstuck_threshold) 29490bbf5441SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask); 2950c42a7b7eSSam Leffler return; 2951c42a7b7eSSam Leffler } 2952c42a7b7eSSam Leffler if (sc->sc_bmisscount != 0) { 2953c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 2954c42a7b7eSSam Leffler "%s: resume beacon xmit after %u misses\n", 2955c42a7b7eSSam Leffler __func__, sc->sc_bmisscount); 2956c42a7b7eSSam Leffler sc->sc_bmisscount = 0; 2957c42a7b7eSSam Leffler } 2958c42a7b7eSSam Leffler 2959b032f27cSSam Leffler if (sc->sc_stagbeacons) { /* staggered beacons */ 2960b032f27cSSam Leffler struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2961b032f27cSSam Leffler uint32_t tsftu; 2962b032f27cSSam Leffler 2963b032f27cSSam Leffler tsftu = ath_hal_gettsf32(ah) >> 10; 2964b032f27cSSam Leffler /* XXX lintval */ 2965b032f27cSSam Leffler slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval; 2966b032f27cSSam Leffler vap = sc->sc_bslot[(slot+1) % ATH_BCBUF]; 2967b032f27cSSam Leffler bfaddr = 0; 2968309a3e45SSam Leffler if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) { 2969b032f27cSSam Leffler bf = ath_beacon_generate(sc, vap); 2970b032f27cSSam Leffler if (bf != NULL) 2971b032f27cSSam Leffler bfaddr = bf->bf_daddr; 2972b032f27cSSam Leffler } 2973b032f27cSSam Leffler } else { /* burst'd beacons */ 2974b032f27cSSam Leffler uint32_t *bflink = &bfaddr; 2975b032f27cSSam Leffler 2976b032f27cSSam Leffler for (slot = 0; slot < ATH_BCBUF; slot++) { 2977b032f27cSSam Leffler vap = sc->sc_bslot[slot]; 2978309a3e45SSam Leffler if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) { 2979b032f27cSSam Leffler bf = ath_beacon_generate(sc, vap); 2980b032f27cSSam Leffler if (bf != NULL) { 2981b032f27cSSam Leffler *bflink = bf->bf_daddr; 2982b032f27cSSam Leffler bflink = &bf->bf_desc->ds_link; 2983c42a7b7eSSam Leffler } 2984c42a7b7eSSam Leffler } 2985b032f27cSSam Leffler } 2986b032f27cSSam Leffler *bflink = 0; /* terminate list */ 2987622b3fd2SSam Leffler } 2988c42a7b7eSSam Leffler 2989c42a7b7eSSam Leffler /* 2990c42a7b7eSSam Leffler * Handle slot time change when a non-ERP station joins/leaves 2991c42a7b7eSSam Leffler * an 11g network. The 802.11 layer notifies us via callback, 2992c42a7b7eSSam Leffler * we mark updateslot, then wait one beacon before effecting 2993c42a7b7eSSam Leffler * the change. This gives associated stations at least one 2994c42a7b7eSSam Leffler * beacon interval to note the state change. 2995c42a7b7eSSam Leffler */ 2996c42a7b7eSSam Leffler /* XXX locking */ 2997b032f27cSSam Leffler if (sc->sc_updateslot == UPDATE) { 2998c42a7b7eSSam Leffler sc->sc_updateslot = COMMIT; /* commit next beacon */ 2999b032f27cSSam Leffler sc->sc_slotupdate = slot; 3000b032f27cSSam Leffler } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot) 3001c42a7b7eSSam Leffler ath_setslottime(sc); /* commit change to h/w */ 3002c42a7b7eSSam Leffler 3003c42a7b7eSSam Leffler /* 3004c42a7b7eSSam Leffler * Check recent per-antenna transmit statistics and flip 3005c42a7b7eSSam Leffler * the default antenna if noticeably more frames went out 3006c42a7b7eSSam Leffler * on the non-default antenna. 3007c42a7b7eSSam Leffler * XXX assumes 2 anntenae 3008c42a7b7eSSam Leffler */ 3009b032f27cSSam Leffler if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) { 3010c42a7b7eSSam Leffler otherant = sc->sc_defant & 1 ? 2 : 1; 3011c42a7b7eSSam Leffler if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) 3012c42a7b7eSSam Leffler ath_setdefantenna(sc, otherant); 3013c42a7b7eSSam Leffler sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; 3014b032f27cSSam Leffler } 3015c42a7b7eSSam Leffler 3016b032f27cSSam Leffler if (bfaddr != 0) { 3017c42a7b7eSSam Leffler /* 3018c42a7b7eSSam Leffler * Stop any current dma and put the new frame on the queue. 3019c42a7b7eSSam Leffler * This should never fail since we check above that no frames 3020c42a7b7eSSam Leffler * are still pending on the queue. 3021c42a7b7eSSam Leffler */ 30225591b213SSam Leffler if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { 3023c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, 3024c42a7b7eSSam Leffler "%s: beacon queue %u did not stop?\n", 3025c42a7b7eSSam Leffler __func__, sc->sc_bhalq); 30265591b213SSam Leffler } 3027b032f27cSSam Leffler /* NB: cabq traffic should already be queued and primed */ 3028b032f27cSSam Leffler ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr); 3029b032f27cSSam Leffler ath_hal_txstart(ah, sc->sc_bhalq); 3030b032f27cSSam Leffler 3031b032f27cSSam Leffler sc->sc_stats.ast_be_xmit++; 3032b032f27cSSam Leffler } 3033b032f27cSSam Leffler } 3034b032f27cSSam Leffler 3035b032f27cSSam Leffler static struct ath_buf * 3036b032f27cSSam Leffler ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap) 3037b032f27cSSam Leffler { 3038b032f27cSSam Leffler struct ath_vap *avp = ATH_VAP(vap); 3039b032f27cSSam Leffler struct ath_txq *cabq = sc->sc_cabq; 3040b032f27cSSam Leffler struct ath_buf *bf; 3041b032f27cSSam Leffler struct mbuf *m; 3042b032f27cSSam Leffler int nmcastq, error; 3043b032f27cSSam Leffler 3044309a3e45SSam Leffler KASSERT(vap->iv_state >= IEEE80211_S_RUN, 3045b032f27cSSam Leffler ("not running, state %d", vap->iv_state)); 3046b032f27cSSam Leffler KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer")); 3047b032f27cSSam Leffler 3048b032f27cSSam Leffler /* 3049b032f27cSSam Leffler * Update dynamic beacon contents. If this returns 3050b032f27cSSam Leffler * non-zero then we need to remap the memory because 3051b032f27cSSam Leffler * the beacon frame changed size (probably because 3052b032f27cSSam Leffler * of the TIM bitmap). 3053b032f27cSSam Leffler */ 3054b032f27cSSam Leffler bf = avp->av_bcbuf; 3055b032f27cSSam Leffler m = bf->bf_m; 3056b032f27cSSam Leffler nmcastq = avp->av_mcastq.axq_depth; 3057b032f27cSSam Leffler if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) { 3058b032f27cSSam Leffler /* XXX too conservative? */ 3059b032f27cSSam Leffler bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3060b032f27cSSam Leffler error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 3061b032f27cSSam Leffler bf->bf_segs, &bf->bf_nseg, 3062b032f27cSSam Leffler BUS_DMA_NOWAIT); 3063b032f27cSSam Leffler if (error != 0) { 3064b032f27cSSam Leffler if_printf(vap->iv_ifp, 3065b032f27cSSam Leffler "%s: bus_dmamap_load_mbuf_sg failed, error %u\n", 3066b032f27cSSam Leffler __func__, error); 3067b032f27cSSam Leffler return NULL; 3068b032f27cSSam Leffler } 3069b032f27cSSam Leffler } 3070b032f27cSSam Leffler if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) { 3071b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 3072b032f27cSSam Leffler "%s: cabq did not drain, mcastq %u cabq %u\n", 3073b032f27cSSam Leffler __func__, nmcastq, cabq->axq_depth); 3074b032f27cSSam Leffler sc->sc_stats.ast_cabq_busy++; 3075b032f27cSSam Leffler if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) { 3076b032f27cSSam Leffler /* 3077b032f27cSSam Leffler * CABQ traffic from a previous vap is still pending. 3078b032f27cSSam Leffler * We must drain the q before this beacon frame goes 3079b032f27cSSam Leffler * out as otherwise this vap's stations will get cab 3080b032f27cSSam Leffler * frames from a different vap. 3081b032f27cSSam Leffler * XXX could be slow causing us to miss DBA 3082b032f27cSSam Leffler */ 3083b032f27cSSam Leffler ath_tx_draintxq(sc, cabq); 3084b032f27cSSam Leffler } 3085b032f27cSSam Leffler } 3086b032f27cSSam Leffler ath_beacon_setup(sc, bf); 30875591b213SSam Leffler bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 30885591b213SSam Leffler 3089c42a7b7eSSam Leffler /* 3090c42a7b7eSSam Leffler * Enable the CAB queue before the beacon queue to 3091c42a7b7eSSam Leffler * insure cab frames are triggered by this beacon. 3092c42a7b7eSSam Leffler */ 3093b032f27cSSam Leffler if (avp->av_boff.bo_tim[4] & 1) { 3094b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 3095b032f27cSSam Leffler 3096f3af83f7SSam Leffler /* NB: only at DTIM */ 3097622b3fd2SSam Leffler ATH_TXQ_LOCK(cabq); 3098b032f27cSSam Leffler ATH_TXQ_LOCK(&avp->av_mcastq); 3099622b3fd2SSam Leffler if (nmcastq) { 3100622b3fd2SSam Leffler struct ath_buf *bfm; 3101622b3fd2SSam Leffler 3102622b3fd2SSam Leffler /* 3103622b3fd2SSam Leffler * Move frames from the s/w mcast q to the h/w cab q. 3104b032f27cSSam Leffler * XXX MORE_DATA bit 3105622b3fd2SSam Leffler */ 31066b349e5aSAdrian Chadd bfm = TAILQ_FIRST(&avp->av_mcastq.axq_q); 3107622b3fd2SSam Leffler if (cabq->axq_link != NULL) { 3108622b3fd2SSam Leffler *cabq->axq_link = bfm->bf_daddr; 3109622b3fd2SSam Leffler } else 3110622b3fd2SSam Leffler ath_hal_puttxbuf(ah, cabq->axq_qnum, 3111622b3fd2SSam Leffler bfm->bf_daddr); 3112b032f27cSSam Leffler ath_txqmove(cabq, &avp->av_mcastq); 3113622b3fd2SSam Leffler 3114622b3fd2SSam Leffler sc->sc_stats.ast_cabq_xmit += nmcastq; 3115622b3fd2SSam Leffler } 3116622b3fd2SSam Leffler /* NB: gated by beacon so safe to start here */ 31176b349e5aSAdrian Chadd if (! TAILQ_EMPTY(&(cabq->axq_q))) 3118622b3fd2SSam Leffler ath_hal_txstart(ah, cabq->axq_qnum); 3119b032f27cSSam Leffler ATH_TXQ_UNLOCK(&avp->av_mcastq); 31207b15790aSAdrian Chadd ATH_TXQ_UNLOCK(cabq); 3121622b3fd2SSam Leffler } 3122b032f27cSSam Leffler return bf; 3123b032f27cSSam Leffler } 3124b032f27cSSam Leffler 3125b032f27cSSam Leffler static void 3126b032f27cSSam Leffler ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap) 3127b032f27cSSam Leffler { 3128b032f27cSSam Leffler struct ath_vap *avp = ATH_VAP(vap); 3129b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 3130b032f27cSSam Leffler struct ath_buf *bf; 3131b032f27cSSam Leffler struct mbuf *m; 3132b032f27cSSam Leffler int error; 3133b032f27cSSam Leffler 3134b032f27cSSam Leffler KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer")); 3135b032f27cSSam Leffler 3136b032f27cSSam Leffler /* 3137b032f27cSSam Leffler * Update dynamic beacon contents. If this returns 3138b032f27cSSam Leffler * non-zero then we need to remap the memory because 3139b032f27cSSam Leffler * the beacon frame changed size (probably because 3140b032f27cSSam Leffler * of the TIM bitmap). 3141b032f27cSSam Leffler */ 3142b032f27cSSam Leffler bf = avp->av_bcbuf; 3143b032f27cSSam Leffler m = bf->bf_m; 3144b032f27cSSam Leffler if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) { 3145b032f27cSSam Leffler /* XXX too conservative? */ 3146b032f27cSSam Leffler bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3147b032f27cSSam Leffler error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 3148b032f27cSSam Leffler bf->bf_segs, &bf->bf_nseg, 3149b032f27cSSam Leffler BUS_DMA_NOWAIT); 3150b032f27cSSam Leffler if (error != 0) { 3151b032f27cSSam Leffler if_printf(vap->iv_ifp, 3152b032f27cSSam Leffler "%s: bus_dmamap_load_mbuf_sg failed, error %u\n", 3153b032f27cSSam Leffler __func__, error); 3154b032f27cSSam Leffler return; 3155b032f27cSSam Leffler } 3156b032f27cSSam Leffler } 3157b032f27cSSam Leffler ath_beacon_setup(sc, bf); 3158b032f27cSSam Leffler bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 3159b032f27cSSam Leffler 3160b032f27cSSam Leffler /* NB: caller is known to have already stopped tx dma */ 31615591b213SSam Leffler ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); 31625591b213SSam Leffler ath_hal_txstart(ah, sc->sc_bhalq); 31635591b213SSam Leffler } 31645591b213SSam Leffler 3165c42a7b7eSSam Leffler /* 3166c42a7b7eSSam Leffler * Reset the hardware after detecting beacons have stopped. 3167c42a7b7eSSam Leffler */ 3168c42a7b7eSSam Leffler static void 3169c42a7b7eSSam Leffler ath_bstuck_proc(void *arg, int pending) 3170c42a7b7eSSam Leffler { 3171c42a7b7eSSam Leffler struct ath_softc *sc = arg; 3172fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 317316d4de92SAdrian Chadd uint32_t hangs = 0; 317416d4de92SAdrian Chadd 317516d4de92SAdrian Chadd if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) 317616d4de92SAdrian Chadd if_printf(ifp, "bb hang detected (0x%x)\n", hangs); 3177c42a7b7eSSam Leffler 3178c42a7b7eSSam Leffler if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 3179c42a7b7eSSam Leffler sc->sc_bmisscount); 3180c2e34459SSam Leffler sc->sc_stats.ast_bstuck++; 318116d4de92SAdrian Chadd /* 318216d4de92SAdrian Chadd * This assumes that there's no simultaneous channel mode change 318316d4de92SAdrian Chadd * occuring. 318416d4de92SAdrian Chadd */ 3185517526efSAdrian Chadd ath_reset(ifp, ATH_RESET_NOLOSS); 3186c42a7b7eSSam Leffler } 3187c42a7b7eSSam Leffler 3188c42a7b7eSSam Leffler /* 3189b032f27cSSam Leffler * Reclaim beacon resources and return buffer to the pool. 3190b032f27cSSam Leffler */ 3191b032f27cSSam Leffler static void 3192b032f27cSSam Leffler ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf) 3193b032f27cSSam Leffler { 3194b032f27cSSam Leffler 31957ebd03d7SAdrian Chadd DPRINTF(sc, ATH_DEBUG_NODE, "%s: free bf=%p, bf_m=%p, bf_node=%p\n", 31967ebd03d7SAdrian Chadd __func__, bf, bf->bf_m, bf->bf_node); 3197b032f27cSSam Leffler if (bf->bf_m != NULL) { 3198b032f27cSSam Leffler bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3199b032f27cSSam Leffler m_freem(bf->bf_m); 3200b032f27cSSam Leffler bf->bf_m = NULL; 3201b032f27cSSam Leffler } 3202b032f27cSSam Leffler if (bf->bf_node != NULL) { 3203b032f27cSSam Leffler ieee80211_free_node(bf->bf_node); 3204b032f27cSSam Leffler bf->bf_node = NULL; 3205b032f27cSSam Leffler } 32066b349e5aSAdrian Chadd TAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list); 3207b032f27cSSam Leffler } 3208b032f27cSSam Leffler 3209b032f27cSSam Leffler /* 3210c42a7b7eSSam Leffler * Reclaim beacon resources. 3211c42a7b7eSSam Leffler */ 32125591b213SSam Leffler static void 32135591b213SSam Leffler ath_beacon_free(struct ath_softc *sc) 32145591b213SSam Leffler { 3215c42a7b7eSSam Leffler struct ath_buf *bf; 32165591b213SSam Leffler 32176b349e5aSAdrian Chadd TAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) { 32187ebd03d7SAdrian Chadd DPRINTF(sc, ATH_DEBUG_NODE, 32197ebd03d7SAdrian Chadd "%s: free bf=%p, bf_m=%p, bf_node=%p\n", 32207ebd03d7SAdrian Chadd __func__, bf, bf->bf_m, bf->bf_node); 32215591b213SSam Leffler if (bf->bf_m != NULL) { 32225591b213SSam Leffler bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 32235591b213SSam Leffler m_freem(bf->bf_m); 32245591b213SSam Leffler bf->bf_m = NULL; 3225f818612bSSam Leffler } 3226f818612bSSam Leffler if (bf->bf_node != NULL) { 3227f818612bSSam Leffler ieee80211_free_node(bf->bf_node); 32285591b213SSam Leffler bf->bf_node = NULL; 32295591b213SSam Leffler } 32305591b213SSam Leffler } 3231f818612bSSam Leffler } 32325591b213SSam Leffler 32335591b213SSam Leffler /* 32345591b213SSam Leffler * Configure the beacon and sleep timers. 32355591b213SSam Leffler * 32365591b213SSam Leffler * When operating as an AP this resets the TSF and sets 32375591b213SSam Leffler * up the hardware to notify us when we need to issue beacons. 32385591b213SSam Leffler * 32395591b213SSam Leffler * When operating in station mode this sets up the beacon 32405591b213SSam Leffler * timers according to the timestamp of the last received 32415591b213SSam Leffler * beacon and the current TSF, configures PCF and DTIM 32425591b213SSam Leffler * handling, programs the sleep registers so the hardware 32435591b213SSam Leffler * will wakeup in time to receive beacons, and configures 32445591b213SSam Leffler * the beacon miss handling so we'll receive a BMISS 32455591b213SSam Leffler * interrupt when we stop seeing beacons from the AP 32465591b213SSam Leffler * we've associated with. 32475591b213SSam Leffler */ 32485591b213SSam Leffler static void 3249b032f27cSSam Leffler ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap) 32505591b213SSam Leffler { 325180d939bfSSam Leffler #define TSF_TO_TU(_h,_l) \ 325280d939bfSSam Leffler ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) 325380d939bfSSam Leffler #define FUDGE 2 32545591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 3255b032f27cSSam Leffler struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3256b032f27cSSam Leffler struct ieee80211_node *ni; 325780d939bfSSam Leffler u_int32_t nexttbtt, intval, tsftu; 325880d939bfSSam Leffler u_int64_t tsf; 32595591b213SSam Leffler 3260b032f27cSSam Leffler if (vap == NULL) 3261b032f27cSSam Leffler vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */ 326280767531SAdrian Chadd ni = ieee80211_ref_node(vap->iv_bss); 3263b032f27cSSam Leffler 32648371372bSSam Leffler /* extract tstamp from last beacon and convert to TU */ 32658371372bSSam Leffler nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4), 32668371372bSSam Leffler LE_READ_4(ni->ni_tstamp.data)); 326759aa14a9SRui Paulo if (ic->ic_opmode == IEEE80211_M_HOSTAP || 326859aa14a9SRui Paulo ic->ic_opmode == IEEE80211_M_MBSS) { 3269b032f27cSSam Leffler /* 327059aa14a9SRui Paulo * For multi-bss ap/mesh support beacons are either staggered 3271b032f27cSSam Leffler * evenly over N slots or burst together. For the former 3272b032f27cSSam Leffler * arrange for the SWBA to be delivered for each slot. 3273b032f27cSSam Leffler * Slots that are not occupied will generate nothing. 3274b032f27cSSam Leffler */ 32758371372bSSam Leffler /* NB: the beacon interval is kept internally in TU's */ 32764bacf7c1SSam Leffler intval = ni->ni_intval & HAL_BEACON_PERIOD; 3277b032f27cSSam Leffler if (sc->sc_stagbeacons) 3278b032f27cSSam Leffler intval /= ATH_BCBUF; 3279b032f27cSSam Leffler } else { 3280b032f27cSSam Leffler /* NB: the beacon interval is kept internally in TU's */ 3281b032f27cSSam Leffler intval = ni->ni_intval & HAL_BEACON_PERIOD; 3282b032f27cSSam Leffler } 3283a6c992f4SSam Leffler if (nexttbtt == 0) /* e.g. for ap mode */ 3284a6c992f4SSam Leffler nexttbtt = intval; 3285a6c992f4SSam Leffler else if (intval) /* NB: can be 0 for monitor mode */ 3286a6c992f4SSam Leffler nexttbtt = roundup(nexttbtt, intval); 3287a6c992f4SSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n", 3288a6c992f4SSam Leffler __func__, nexttbtt, intval, ni->ni_intval); 3289b032f27cSSam Leffler if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) { 32905591b213SSam Leffler HAL_BEACON_STATE bs; 32918371372bSSam Leffler int dtimperiod, dtimcount; 32928371372bSSam Leffler int cfpperiod, cfpcount; 32935591b213SSam Leffler 32948371372bSSam Leffler /* 32958371372bSSam Leffler * Setup dtim and cfp parameters according to 32968371372bSSam Leffler * last beacon we received (which may be none). 32978371372bSSam Leffler */ 32988371372bSSam Leffler dtimperiod = ni->ni_dtim_period; 32998371372bSSam Leffler if (dtimperiod <= 0) /* NB: 0 if not known */ 33008371372bSSam Leffler dtimperiod = 1; 33018371372bSSam Leffler dtimcount = ni->ni_dtim_count; 33028371372bSSam Leffler if (dtimcount >= dtimperiod) /* NB: sanity check */ 33038371372bSSam Leffler dtimcount = 0; /* XXX? */ 33048371372bSSam Leffler cfpperiod = 1; /* NB: no PCF support yet */ 33058371372bSSam Leffler cfpcount = 0; 33068371372bSSam Leffler /* 33078371372bSSam Leffler * Pull nexttbtt forward to reflect the current 33088371372bSSam Leffler * TSF and calculate dtim+cfp state for the result. 33098371372bSSam Leffler */ 33108371372bSSam Leffler tsf = ath_hal_gettsf64(ah); 331180d939bfSSam Leffler tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 33128371372bSSam Leffler do { 33138371372bSSam Leffler nexttbtt += intval; 33148371372bSSam Leffler if (--dtimcount < 0) { 33158371372bSSam Leffler dtimcount = dtimperiod - 1; 33168371372bSSam Leffler if (--cfpcount < 0) 33178371372bSSam Leffler cfpcount = cfpperiod - 1; 33188371372bSSam Leffler } 33198371372bSSam Leffler } while (nexttbtt < tsftu); 33205591b213SSam Leffler memset(&bs, 0, sizeof(bs)); 3321a6c992f4SSam Leffler bs.bs_intval = intval; 33225591b213SSam Leffler bs.bs_nexttbtt = nexttbtt; 33238371372bSSam Leffler bs.bs_dtimperiod = dtimperiod*intval; 33248371372bSSam Leffler bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval; 33258371372bSSam Leffler bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod; 33268371372bSSam Leffler bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod; 33278371372bSSam Leffler bs.bs_cfpmaxduration = 0; 33288371372bSSam Leffler #if 0 33295591b213SSam Leffler /* 3330c42a7b7eSSam Leffler * The 802.11 layer records the offset to the DTIM 3331c42a7b7eSSam Leffler * bitmap while receiving beacons; use it here to 3332c42a7b7eSSam Leffler * enable h/w detection of our AID being marked in 3333c42a7b7eSSam Leffler * the bitmap vector (to indicate frames for us are 3334c42a7b7eSSam Leffler * pending at the AP). 33358371372bSSam Leffler * XXX do DTIM handling in s/w to WAR old h/w bugs 33368371372bSSam Leffler * XXX enable based on h/w rev for newer chips 3337c42a7b7eSSam Leffler */ 3338c42a7b7eSSam Leffler bs.bs_timoffset = ni->ni_timoff; 33398371372bSSam Leffler #endif 3340c42a7b7eSSam Leffler /* 33415591b213SSam Leffler * Calculate the number of consecutive beacons to miss 334268e8e04eSSam Leffler * before taking a BMISS interrupt. 33435591b213SSam Leffler * Note that we clamp the result to at most 10 beacons. 33445591b213SSam Leffler */ 3345b032f27cSSam Leffler bs.bs_bmissthreshold = vap->iv_bmissthreshold; 33465591b213SSam Leffler if (bs.bs_bmissthreshold > 10) 33475591b213SSam Leffler bs.bs_bmissthreshold = 10; 33485591b213SSam Leffler else if (bs.bs_bmissthreshold <= 0) 33495591b213SSam Leffler bs.bs_bmissthreshold = 1; 33505591b213SSam Leffler 33515591b213SSam Leffler /* 33525591b213SSam Leffler * Calculate sleep duration. The configuration is 33535591b213SSam Leffler * given in ms. We insure a multiple of the beacon 33545591b213SSam Leffler * period is used. Also, if the sleep duration is 33555591b213SSam Leffler * greater than the DTIM period then it makes senses 33565591b213SSam Leffler * to make it a multiple of that. 33575591b213SSam Leffler * 33585591b213SSam Leffler * XXX fixed at 100ms 33595591b213SSam Leffler */ 33604bacf7c1SSam Leffler bs.bs_sleepduration = 33614bacf7c1SSam Leffler roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval); 33625591b213SSam Leffler if (bs.bs_sleepduration > bs.bs_dtimperiod) 33635591b213SSam Leffler bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod); 33645591b213SSam Leffler 3365c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 33668371372bSSam Leffler "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n" 33675591b213SSam Leffler , __func__ 33688371372bSSam Leffler , tsf, tsftu 33695591b213SSam Leffler , bs.bs_intval 33705591b213SSam Leffler , bs.bs_nexttbtt 33715591b213SSam Leffler , bs.bs_dtimperiod 33725591b213SSam Leffler , bs.bs_nextdtim 33735591b213SSam Leffler , bs.bs_bmissthreshold 33745591b213SSam Leffler , bs.bs_sleepduration 3375c42a7b7eSSam Leffler , bs.bs_cfpperiod 3376c42a7b7eSSam Leffler , bs.bs_cfpmaxduration 3377c42a7b7eSSam Leffler , bs.bs_cfpnext 3378c42a7b7eSSam Leffler , bs.bs_timoffset 3379c42a7b7eSSam Leffler ); 33805591b213SSam Leffler ath_hal_intrset(ah, 0); 3381c42a7b7eSSam Leffler ath_hal_beacontimers(ah, &bs); 33825591b213SSam Leffler sc->sc_imask |= HAL_INT_BMISS; 33835591b213SSam Leffler ath_hal_intrset(ah, sc->sc_imask); 33845591b213SSam Leffler } else { 33855591b213SSam Leffler ath_hal_intrset(ah, 0); 3386a6c992f4SSam Leffler if (nexttbtt == intval) 3387c42a7b7eSSam Leffler intval |= HAL_BEACON_RESET_TSF; 3388c42a7b7eSSam Leffler if (ic->ic_opmode == IEEE80211_M_IBSS) { 3389c42a7b7eSSam Leffler /* 3390c42a7b7eSSam Leffler * In IBSS mode enable the beacon timers but only 3391c42a7b7eSSam Leffler * enable SWBA interrupts if we need to manually 3392c42a7b7eSSam Leffler * prepare beacon frames. Otherwise we use a 3393c42a7b7eSSam Leffler * self-linked tx descriptor and let the hardware 3394c42a7b7eSSam Leffler * deal with things. 3395c42a7b7eSSam Leffler */ 3396c42a7b7eSSam Leffler intval |= HAL_BEACON_ENA; 3397c42a7b7eSSam Leffler if (!sc->sc_hasveol) 3398c42a7b7eSSam Leffler sc->sc_imask |= HAL_INT_SWBA; 339980d939bfSSam Leffler if ((intval & HAL_BEACON_RESET_TSF) == 0) { 340080d939bfSSam Leffler /* 340180d939bfSSam Leffler * Pull nexttbtt forward to reflect 340280d939bfSSam Leffler * the current TSF. 340380d939bfSSam Leffler */ 340480d939bfSSam Leffler tsf = ath_hal_gettsf64(ah); 340580d939bfSSam Leffler tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 340680d939bfSSam Leffler do { 340780d939bfSSam Leffler nexttbtt += intval; 340880d939bfSSam Leffler } while (nexttbtt < tsftu); 340980d939bfSSam Leffler } 34100f2e86fbSSam Leffler ath_beaconq_config(sc); 341159aa14a9SRui Paulo } else if (ic->ic_opmode == IEEE80211_M_HOSTAP || 341259aa14a9SRui Paulo ic->ic_opmode == IEEE80211_M_MBSS) { 3413c42a7b7eSSam Leffler /* 341459aa14a9SRui Paulo * In AP/mesh mode we enable the beacon timers 341559aa14a9SRui Paulo * and SWBA interrupts to prepare beacon frames. 3416c42a7b7eSSam Leffler */ 3417c42a7b7eSSam Leffler intval |= HAL_BEACON_ENA; 34185591b213SSam Leffler sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ 34190f2e86fbSSam Leffler ath_beaconq_config(sc); 3420c42a7b7eSSam Leffler } 3421c42a7b7eSSam Leffler ath_hal_beaconinit(ah, nexttbtt, intval); 3422c42a7b7eSSam Leffler sc->sc_bmisscount = 0; 34235591b213SSam Leffler ath_hal_intrset(ah, sc->sc_imask); 3424c42a7b7eSSam Leffler /* 3425c42a7b7eSSam Leffler * When using a self-linked beacon descriptor in 3426c42a7b7eSSam Leffler * ibss mode load it once here. 3427c42a7b7eSSam Leffler */ 3428c42a7b7eSSam Leffler if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) 3429b032f27cSSam Leffler ath_beacon_start_adhoc(sc, vap); 34305591b213SSam Leffler } 343180d939bfSSam Leffler sc->sc_syncbeacon = 0; 343280767531SAdrian Chadd ieee80211_free_node(ni); 343380d939bfSSam Leffler #undef FUDGE 34348371372bSSam Leffler #undef TSF_TO_TU 34355591b213SSam Leffler } 34365591b213SSam Leffler 34375591b213SSam Leffler static void 34385591b213SSam Leffler ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 34395591b213SSam Leffler { 34405591b213SSam Leffler bus_addr_t *paddr = (bus_addr_t*) arg; 3441d77367bfSSam Leffler KASSERT(error == 0, ("error %u on bus_dma callback", error)); 34425591b213SSam Leffler *paddr = segs->ds_addr; 34435591b213SSam Leffler } 34445591b213SSam Leffler 34455591b213SSam Leffler static int 3446c42a7b7eSSam Leffler ath_descdma_setup(struct ath_softc *sc, 3447c42a7b7eSSam Leffler struct ath_descdma *dd, ath_bufhead *head, 3448c42a7b7eSSam Leffler const char *name, int nbuf, int ndesc) 3449c42a7b7eSSam Leffler { 3450c42a7b7eSSam Leffler #define DS2PHYS(_dd, _ds) \ 3451c42a7b7eSSam Leffler ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 345245abcd6cSAdrian Chadd #define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 345345abcd6cSAdrian Chadd ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 3454fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 345545abcd6cSAdrian Chadd uint8_t *ds; 3456c42a7b7eSSam Leffler struct ath_buf *bf; 3457c42a7b7eSSam Leffler int i, bsize, error; 345845abcd6cSAdrian Chadd int desc_len; 345945abcd6cSAdrian Chadd 346045abcd6cSAdrian Chadd desc_len = sizeof(struct ath_desc); 3461c42a7b7eSSam Leffler 3462c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n", 3463c42a7b7eSSam Leffler __func__, name, nbuf, ndesc); 3464c42a7b7eSSam Leffler 3465c42a7b7eSSam Leffler dd->dd_name = name; 346645abcd6cSAdrian Chadd dd->dd_desc_len = desc_len * nbuf * ndesc; 346745abcd6cSAdrian Chadd 346845abcd6cSAdrian Chadd /* 346945abcd6cSAdrian Chadd * Merlin work-around: 347045abcd6cSAdrian Chadd * Descriptors that cross the 4KB boundary can't be used. 347145abcd6cSAdrian Chadd * Assume one skipped descriptor per 4KB page. 347245abcd6cSAdrian Chadd */ 347345abcd6cSAdrian Chadd if (! ath_hal_split4ktrans(sc->sc_ah)) { 347445abcd6cSAdrian Chadd int numdescpage = 4096 / (desc_len * ndesc); 347545abcd6cSAdrian Chadd dd->dd_desc_len = (nbuf / numdescpage + 1) * 4096; 347645abcd6cSAdrian Chadd } 3477c42a7b7eSSam Leffler 3478c42a7b7eSSam Leffler /* 3479c42a7b7eSSam Leffler * Setup DMA descriptor area. 3480c42a7b7eSSam Leffler */ 3481c2175ff5SMarius Strobl error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 3482c42a7b7eSSam Leffler PAGE_SIZE, 0, /* alignment, bounds */ 3483c42a7b7eSSam Leffler BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 3484c42a7b7eSSam Leffler BUS_SPACE_MAXADDR, /* highaddr */ 3485c42a7b7eSSam Leffler NULL, NULL, /* filter, filterarg */ 3486c42a7b7eSSam Leffler dd->dd_desc_len, /* maxsize */ 3487c42a7b7eSSam Leffler 1, /* nsegments */ 34886ccb8ea7SSam Leffler dd->dd_desc_len, /* maxsegsize */ 3489c42a7b7eSSam Leffler BUS_DMA_ALLOCNOW, /* flags */ 3490c42a7b7eSSam Leffler NULL, /* lockfunc */ 3491c42a7b7eSSam Leffler NULL, /* lockarg */ 3492c42a7b7eSSam Leffler &dd->dd_dmat); 3493c42a7b7eSSam Leffler if (error != 0) { 3494c42a7b7eSSam Leffler if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); 3495c42a7b7eSSam Leffler return error; 3496c42a7b7eSSam Leffler } 3497c42a7b7eSSam Leffler 3498c42a7b7eSSam Leffler /* allocate descriptors */ 3499c42a7b7eSSam Leffler error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap); 3500c42a7b7eSSam Leffler if (error != 0) { 3501c42a7b7eSSam Leffler if_printf(ifp, "unable to create dmamap for %s descriptors, " 3502c42a7b7eSSam Leffler "error %u\n", dd->dd_name, error); 3503c42a7b7eSSam Leffler goto fail0; 3504c42a7b7eSSam Leffler } 3505c42a7b7eSSam Leffler 3506c42a7b7eSSam Leffler error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 35070553a01fSSam Leffler BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 35080553a01fSSam Leffler &dd->dd_dmamap); 3509c42a7b7eSSam Leffler if (error != 0) { 3510c42a7b7eSSam Leffler if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 3511c42a7b7eSSam Leffler "error %u\n", nbuf * ndesc, dd->dd_name, error); 3512c42a7b7eSSam Leffler goto fail1; 3513c42a7b7eSSam Leffler } 3514c42a7b7eSSam Leffler 3515c42a7b7eSSam Leffler error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 3516c42a7b7eSSam Leffler dd->dd_desc, dd->dd_desc_len, 3517c42a7b7eSSam Leffler ath_load_cb, &dd->dd_desc_paddr, 3518c42a7b7eSSam Leffler BUS_DMA_NOWAIT); 3519c42a7b7eSSam Leffler if (error != 0) { 3520c42a7b7eSSam Leffler if_printf(ifp, "unable to map %s descriptors, error %u\n", 3521c42a7b7eSSam Leffler dd->dd_name, error); 3522c42a7b7eSSam Leffler goto fail2; 3523c42a7b7eSSam Leffler } 3524c42a7b7eSSam Leffler 352545abcd6cSAdrian Chadd ds = (uint8_t *) dd->dd_desc; 3526c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 3527c42a7b7eSSam Leffler __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, 3528c42a7b7eSSam Leffler (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); 3529c42a7b7eSSam Leffler 3530ebecf802SSam Leffler /* allocate rx buffers */ 3531c42a7b7eSSam Leffler bsize = sizeof(struct ath_buf) * nbuf; 3532c42a7b7eSSam Leffler bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 3533c42a7b7eSSam Leffler if (bf == NULL) { 3534c42a7b7eSSam Leffler if_printf(ifp, "malloc of %s buffers failed, size %u\n", 3535c42a7b7eSSam Leffler dd->dd_name, bsize); 3536c42a7b7eSSam Leffler goto fail3; 3537c42a7b7eSSam Leffler } 3538c42a7b7eSSam Leffler dd->dd_bufptr = bf; 3539c42a7b7eSSam Leffler 35406b349e5aSAdrian Chadd TAILQ_INIT(head); 354145abcd6cSAdrian Chadd for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * desc_len)) { 354245abcd6cSAdrian Chadd bf->bf_desc = (struct ath_desc *) ds; 3543c42a7b7eSSam Leffler bf->bf_daddr = DS2PHYS(dd, ds); 354445abcd6cSAdrian Chadd if (! ath_hal_split4ktrans(sc->sc_ah)) { 354545abcd6cSAdrian Chadd /* 354645abcd6cSAdrian Chadd * Merlin WAR: Skip descriptor addresses which 354745abcd6cSAdrian Chadd * cause 4KB boundary crossing along any point 354845abcd6cSAdrian Chadd * in the descriptor. 354945abcd6cSAdrian Chadd */ 355045abcd6cSAdrian Chadd if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr, 355145abcd6cSAdrian Chadd desc_len * ndesc)) { 355245abcd6cSAdrian Chadd /* Start at the next page */ 355345abcd6cSAdrian Chadd ds += 0x1000 - (bf->bf_daddr & 0xFFF); 355445abcd6cSAdrian Chadd bf->bf_desc = (struct ath_desc *) ds; 355545abcd6cSAdrian Chadd bf->bf_daddr = DS2PHYS(dd, ds); 355645abcd6cSAdrian Chadd } 355745abcd6cSAdrian Chadd } 3558c42a7b7eSSam Leffler error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 3559c42a7b7eSSam Leffler &bf->bf_dmamap); 3560c42a7b7eSSam Leffler if (error != 0) { 3561c42a7b7eSSam Leffler if_printf(ifp, "unable to create dmamap for %s " 3562c42a7b7eSSam Leffler "buffer %u, error %u\n", dd->dd_name, i, error); 3563c42a7b7eSSam Leffler ath_descdma_cleanup(sc, dd, head); 3564c42a7b7eSSam Leffler return error; 3565c42a7b7eSSam Leffler } 35666edf1dc7SAdrian Chadd bf->bf_lastds = bf->bf_desc; /* Just an initial value */ 35676b349e5aSAdrian Chadd TAILQ_INSERT_TAIL(head, bf, bf_list); 3568c42a7b7eSSam Leffler } 3569c42a7b7eSSam Leffler return 0; 3570c42a7b7eSSam Leffler fail3: 3571c42a7b7eSSam Leffler bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3572c42a7b7eSSam Leffler fail2: 3573c42a7b7eSSam Leffler bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3574c42a7b7eSSam Leffler fail1: 3575c42a7b7eSSam Leffler bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 3576c42a7b7eSSam Leffler fail0: 3577c42a7b7eSSam Leffler bus_dma_tag_destroy(dd->dd_dmat); 3578c42a7b7eSSam Leffler memset(dd, 0, sizeof(*dd)); 3579c42a7b7eSSam Leffler return error; 3580c42a7b7eSSam Leffler #undef DS2PHYS 358145abcd6cSAdrian Chadd #undef ATH_DESC_4KB_BOUND_CHECK 3582c42a7b7eSSam Leffler } 3583c42a7b7eSSam Leffler 3584c42a7b7eSSam Leffler static void 3585c42a7b7eSSam Leffler ath_descdma_cleanup(struct ath_softc *sc, 3586c42a7b7eSSam Leffler struct ath_descdma *dd, ath_bufhead *head) 3587c42a7b7eSSam Leffler { 3588c42a7b7eSSam Leffler struct ath_buf *bf; 3589c42a7b7eSSam Leffler struct ieee80211_node *ni; 3590c42a7b7eSSam Leffler 3591c42a7b7eSSam Leffler bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3592c42a7b7eSSam Leffler bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3593c42a7b7eSSam Leffler bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 3594c42a7b7eSSam Leffler bus_dma_tag_destroy(dd->dd_dmat); 3595c42a7b7eSSam Leffler 35966b349e5aSAdrian Chadd TAILQ_FOREACH(bf, head, bf_list) { 3597c42a7b7eSSam Leffler if (bf->bf_m) { 3598c42a7b7eSSam Leffler m_freem(bf->bf_m); 3599c42a7b7eSSam Leffler bf->bf_m = NULL; 3600c42a7b7eSSam Leffler } 3601c42a7b7eSSam Leffler if (bf->bf_dmamap != NULL) { 3602c42a7b7eSSam Leffler bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 3603c42a7b7eSSam Leffler bf->bf_dmamap = NULL; 3604c42a7b7eSSam Leffler } 3605c42a7b7eSSam Leffler ni = bf->bf_node; 3606c42a7b7eSSam Leffler bf->bf_node = NULL; 3607c42a7b7eSSam Leffler if (ni != NULL) { 3608c42a7b7eSSam Leffler /* 3609c42a7b7eSSam Leffler * Reclaim node reference. 3610c42a7b7eSSam Leffler */ 3611c42a7b7eSSam Leffler ieee80211_free_node(ni); 3612c42a7b7eSSam Leffler } 3613c42a7b7eSSam Leffler } 3614c42a7b7eSSam Leffler 36156b349e5aSAdrian Chadd TAILQ_INIT(head); 3616c42a7b7eSSam Leffler free(dd->dd_bufptr, M_ATHDEV); 3617c42a7b7eSSam Leffler memset(dd, 0, sizeof(*dd)); 3618c42a7b7eSSam Leffler } 3619c42a7b7eSSam Leffler 3620c42a7b7eSSam Leffler static int 36215591b213SSam Leffler ath_desc_alloc(struct ath_softc *sc) 36225591b213SSam Leffler { 3623c42a7b7eSSam Leffler int error; 36245591b213SSam Leffler 3625c42a7b7eSSam Leffler error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 3626e2d787faSSam Leffler "rx", ath_rxbuf, 1); 36275591b213SSam Leffler if (error != 0) 36285591b213SSam Leffler return error; 36295591b213SSam Leffler 3630c42a7b7eSSam Leffler error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 3631e2d787faSSam Leffler "tx", ath_txbuf, ATH_TXDESC); 3632c42a7b7eSSam Leffler if (error != 0) { 3633c42a7b7eSSam Leffler ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 36345591b213SSam Leffler return error; 3635c42a7b7eSSam Leffler } 3636c42a7b7eSSam Leffler 3637c42a7b7eSSam Leffler error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 3638b032f27cSSam Leffler "beacon", ATH_BCBUF, 1); 3639c42a7b7eSSam Leffler if (error != 0) { 3640c42a7b7eSSam Leffler ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3641c42a7b7eSSam Leffler ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 3642c42a7b7eSSam Leffler return error; 3643c42a7b7eSSam Leffler } 36445591b213SSam Leffler return 0; 36455591b213SSam Leffler } 36465591b213SSam Leffler 36475591b213SSam Leffler static void 36485591b213SSam Leffler ath_desc_free(struct ath_softc *sc) 36495591b213SSam Leffler { 36505591b213SSam Leffler 3651c42a7b7eSSam Leffler if (sc->sc_bdma.dd_desc_len != 0) 3652c42a7b7eSSam Leffler ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 3653c42a7b7eSSam Leffler if (sc->sc_txdma.dd_desc_len != 0) 3654c42a7b7eSSam Leffler ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3655c42a7b7eSSam Leffler if (sc->sc_rxdma.dd_desc_len != 0) 3656c42a7b7eSSam Leffler ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 36575591b213SSam Leffler } 36585591b213SSam Leffler 36595591b213SSam Leffler static struct ieee80211_node * 366038c208f8SSam Leffler ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 36615591b213SSam Leffler { 366238c208f8SSam Leffler struct ieee80211com *ic = vap->iv_ic; 3663c42a7b7eSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 3664c42a7b7eSSam Leffler const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 3665c42a7b7eSSam Leffler struct ath_node *an; 3666c42a7b7eSSam Leffler 3667c42a7b7eSSam Leffler an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 3668c42a7b7eSSam Leffler if (an == NULL) { 3669c42a7b7eSSam Leffler /* XXX stat+msg */ 3670de5af704SSam Leffler return NULL; 36715591b213SSam Leffler } 3672c42a7b7eSSam Leffler ath_rate_node_init(sc, an); 36735591b213SSam Leffler 36743dd85b26SAdrian Chadd /* Setup the mutex - there's no associd yet so set the name to NULL */ 36753dd85b26SAdrian Chadd snprintf(an->an_name, sizeof(an->an_name), "%s: node %p", 36763dd85b26SAdrian Chadd device_get_nameunit(sc->sc_dev), an); 36773dd85b26SAdrian Chadd mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); 36783dd85b26SAdrian Chadd 3679eb6f0de0SAdrian Chadd /* XXX setup ath_tid */ 3680eb6f0de0SAdrian Chadd ath_tx_tid_init(sc, an); 3681eb6f0de0SAdrian Chadd 3682c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); 3683c42a7b7eSSam Leffler return &an->an_node; 3684c42a7b7eSSam Leffler } 3685c42a7b7eSSam Leffler 36865591b213SSam Leffler static void 36874afa805eSAdrian Chadd ath_node_cleanup(struct ieee80211_node *ni) 36884afa805eSAdrian Chadd { 36894afa805eSAdrian Chadd struct ieee80211com *ic = ni->ni_ic; 36904afa805eSAdrian Chadd struct ath_softc *sc = ic->ic_ifp->if_softc; 36914afa805eSAdrian Chadd 36924afa805eSAdrian Chadd /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */ 3693eb6f0de0SAdrian Chadd ath_tx_node_flush(sc, ATH_NODE(ni)); 36944afa805eSAdrian Chadd ath_rate_node_cleanup(sc, ATH_NODE(ni)); 36954afa805eSAdrian Chadd sc->sc_node_cleanup(ni); 36964afa805eSAdrian Chadd } 36974afa805eSAdrian Chadd 36984afa805eSAdrian Chadd static void 3699c42a7b7eSSam Leffler ath_node_free(struct ieee80211_node *ni) 37005591b213SSam Leffler { 3701c42a7b7eSSam Leffler struct ieee80211com *ic = ni->ni_ic; 3702c42a7b7eSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 37031e774079SSam Leffler 3704c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni); 37053dd85b26SAdrian Chadd mtx_destroy(&ATH_NODE(ni)->an_mtx); 3706c42a7b7eSSam Leffler sc->sc_node_free(ni); 37075591b213SSam Leffler } 37085591b213SSam Leffler 370968e8e04eSSam Leffler static void 371068e8e04eSSam Leffler ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) 371168e8e04eSSam Leffler { 371268e8e04eSSam Leffler struct ieee80211com *ic = ni->ni_ic; 371368e8e04eSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 371468e8e04eSSam Leffler struct ath_hal *ah = sc->sc_ah; 371568e8e04eSSam Leffler 3716b032f27cSSam Leffler *rssi = ic->ic_node_getrssi(ni); 371759efa8b5SSam Leffler if (ni->ni_chan != IEEE80211_CHAN_ANYC) 371859efa8b5SSam Leffler *noise = ath_hal_getchannoise(ah, ni->ni_chan); 371959efa8b5SSam Leffler else 372068e8e04eSSam Leffler *noise = -95; /* nominally correct */ 372168e8e04eSSam Leffler } 372268e8e04eSSam Leffler 37235591b213SSam Leffler static int 37245591b213SSam Leffler ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 37255591b213SSam Leffler { 37265591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 37275591b213SSam Leffler int error; 37285591b213SSam Leffler struct mbuf *m; 37295591b213SSam Leffler struct ath_desc *ds; 37305591b213SSam Leffler 37315591b213SSam Leffler m = bf->bf_m; 37325591b213SSam Leffler if (m == NULL) { 37335591b213SSam Leffler /* 37345591b213SSam Leffler * NB: by assigning a page to the rx dma buffer we 37355591b213SSam Leffler * implicitly satisfy the Atheros requirement that 37365591b213SSam Leffler * this buffer be cache-line-aligned and sized to be 37375591b213SSam Leffler * multiple of the cache line size. Not doing this 37385591b213SSam Leffler * causes weird stuff to happen (for the 5210 at least). 37395591b213SSam Leffler */ 37405591b213SSam Leffler m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 37415591b213SSam Leffler if (m == NULL) { 3742c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, 3743c42a7b7eSSam Leffler "%s: no mbuf/cluster\n", __func__); 37445591b213SSam Leffler sc->sc_stats.ast_rx_nombuf++; 37455591b213SSam Leffler return ENOMEM; 37465591b213SSam Leffler } 37475591b213SSam Leffler m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 37485591b213SSam Leffler 3749f9e6219bSSam Leffler error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 3750c42a7b7eSSam Leffler bf->bf_dmamap, m, 3751f9e6219bSSam Leffler bf->bf_segs, &bf->bf_nseg, 37525591b213SSam Leffler BUS_DMA_NOWAIT); 37535591b213SSam Leffler if (error != 0) { 3754c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, 3755f9e6219bSSam Leffler "%s: bus_dmamap_load_mbuf_sg failed; error %d\n", 3756c42a7b7eSSam Leffler __func__, error); 37575591b213SSam Leffler sc->sc_stats.ast_rx_busdma++; 3758b2792ff6SSam Leffler m_freem(m); 37595591b213SSam Leffler return error; 37605591b213SSam Leffler } 3761d77367bfSSam Leffler KASSERT(bf->bf_nseg == 1, 3762d77367bfSSam Leffler ("multi-segment packet; nseg %u", bf->bf_nseg)); 3763b2792ff6SSam Leffler bf->bf_m = m; 37645591b213SSam Leffler } 37655591b213SSam Leffler bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD); 37665591b213SSam Leffler 376704e22a02SSam Leffler /* 376804e22a02SSam Leffler * Setup descriptors. For receive we always terminate 376904e22a02SSam Leffler * the descriptor list with a self-linked entry so we'll 377004e22a02SSam Leffler * not get overrun under high load (as can happen with a 3771c42a7b7eSSam Leffler * 5212 when ANI processing enables PHY error frames). 377204e22a02SSam Leffler * 377304e22a02SSam Leffler * To insure the last descriptor is self-linked we create 377404e22a02SSam Leffler * each descriptor as self-linked and add it to the end. As 377504e22a02SSam Leffler * each additional descriptor is added the previous self-linked 377604e22a02SSam Leffler * entry is ``fixed'' naturally. This should be safe even 377704e22a02SSam Leffler * if DMA is happening. When processing RX interrupts we 377804e22a02SSam Leffler * never remove/process the last, self-linked, entry on the 377904e22a02SSam Leffler * descriptor list. This insures the hardware always has 378004e22a02SSam Leffler * someplace to write a new frame. 378104e22a02SSam Leffler */ 37828a2a6beeSAdrian Chadd /* 37838a2a6beeSAdrian Chadd * 11N: we can no longer afford to self link the last descriptor. 37848a2a6beeSAdrian Chadd * MAC acknowledges BA status as long as it copies frames to host 37858a2a6beeSAdrian Chadd * buffer (or rx fifo). This can incorrectly acknowledge packets 37868a2a6beeSAdrian Chadd * to a sender if last desc is self-linked. 37878a2a6beeSAdrian Chadd */ 37885591b213SSam Leffler ds = bf->bf_desc; 37898a2a6beeSAdrian Chadd if (sc->sc_rxslink) 379004e22a02SSam Leffler ds->ds_link = bf->bf_daddr; /* link to self */ 37918a2a6beeSAdrian Chadd else 37928a2a6beeSAdrian Chadd ds->ds_link = 0; /* terminate the list */ 37935591b213SSam Leffler ds->ds_data = bf->bf_segs[0].ds_addr; 37945591b213SSam Leffler ath_hal_setuprxdesc(ah, ds 37955591b213SSam Leffler , m->m_len /* buffer size */ 37965591b213SSam Leffler , 0 37975591b213SSam Leffler ); 37985591b213SSam Leffler 37995591b213SSam Leffler if (sc->sc_rxlink != NULL) 38005591b213SSam Leffler *sc->sc_rxlink = bf->bf_daddr; 38015591b213SSam Leffler sc->sc_rxlink = &ds->ds_link; 38025591b213SSam Leffler return 0; 38035591b213SSam Leffler } 38045591b213SSam Leffler 3805c42a7b7eSSam Leffler /* 380603ed599aSSam Leffler * Extend 15-bit time stamp from rx descriptor to 38077b0c77ecSSam Leffler * a full 64-bit TSF using the specified TSF. 380803ed599aSSam Leffler */ 380903ed599aSSam Leffler static __inline u_int64_t 3810fc4de9b7SAdrian Chadd ath_extend_tsf15(u_int32_t rstamp, u_int64_t tsf) 381103ed599aSSam Leffler { 381203ed599aSSam Leffler if ((tsf & 0x7fff) < rstamp) 381303ed599aSSam Leffler tsf -= 0x8000; 3814fc4de9b7SAdrian Chadd 381503ed599aSSam Leffler return ((tsf &~ 0x7fff) | rstamp); 381603ed599aSSam Leffler } 381703ed599aSSam Leffler 381803ed599aSSam Leffler /* 3819fc4de9b7SAdrian Chadd * Extend 32-bit time stamp from rx descriptor to 3820fc4de9b7SAdrian Chadd * a full 64-bit TSF using the specified TSF. 3821fc4de9b7SAdrian Chadd */ 3822fc4de9b7SAdrian Chadd static __inline u_int64_t 3823fc4de9b7SAdrian Chadd ath_extend_tsf32(u_int32_t rstamp, u_int64_t tsf) 3824fc4de9b7SAdrian Chadd { 3825fc4de9b7SAdrian Chadd u_int32_t tsf_low = tsf & 0xffffffff; 3826fc4de9b7SAdrian Chadd u_int64_t tsf64 = (tsf & ~0xffffffffULL) | rstamp; 3827fc4de9b7SAdrian Chadd 3828fc4de9b7SAdrian Chadd if (rstamp > tsf_low && (rstamp - tsf_low > 0x10000000)) 3829fc4de9b7SAdrian Chadd tsf64 -= 0x100000000ULL; 3830fc4de9b7SAdrian Chadd 3831fc4de9b7SAdrian Chadd if (rstamp < tsf_low && (tsf_low - rstamp > 0x10000000)) 3832fc4de9b7SAdrian Chadd tsf64 += 0x100000000ULL; 3833fc4de9b7SAdrian Chadd 3834fc4de9b7SAdrian Chadd return tsf64; 3835fc4de9b7SAdrian Chadd } 3836fc4de9b7SAdrian Chadd 3837fc4de9b7SAdrian Chadd /* 3838fc4de9b7SAdrian Chadd * Extend the TSF from the RX descriptor to a full 64 bit TSF. 3839fc4de9b7SAdrian Chadd * Earlier hardware versions only wrote the low 15 bits of the 3840fc4de9b7SAdrian Chadd * TSF into the RX descriptor; later versions (AR5416 and up) 3841fc4de9b7SAdrian Chadd * include the 32 bit TSF value. 3842fc4de9b7SAdrian Chadd */ 3843fc4de9b7SAdrian Chadd static __inline u_int64_t 3844fc4de9b7SAdrian Chadd ath_extend_tsf(struct ath_softc *sc, u_int32_t rstamp, u_int64_t tsf) 3845fc4de9b7SAdrian Chadd { 3846fc4de9b7SAdrian Chadd if (sc->sc_rxtsf32) 3847fc4de9b7SAdrian Chadd return ath_extend_tsf32(rstamp, tsf); 3848fc4de9b7SAdrian Chadd else 3849fc4de9b7SAdrian Chadd return ath_extend_tsf15(rstamp, tsf); 3850fc4de9b7SAdrian Chadd } 3851fc4de9b7SAdrian Chadd 3852fc4de9b7SAdrian Chadd /* 3853c42a7b7eSSam Leffler * Intercept management frames to collect beacon rssi data 3854c42a7b7eSSam Leffler * and to do ibss merges. 3855c42a7b7eSSam Leffler */ 3856c42a7b7eSSam Leffler static void 3857b032f27cSSam Leffler ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, 38585463c4a4SSam Leffler int subtype, int rssi, int nf) 3859c42a7b7eSSam Leffler { 3860b032f27cSSam Leffler struct ieee80211vap *vap = ni->ni_vap; 3861b032f27cSSam Leffler struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 3862c42a7b7eSSam Leffler 3863c42a7b7eSSam Leffler /* 3864c42a7b7eSSam Leffler * Call up first so subsequent work can use information 3865c42a7b7eSSam Leffler * potentially stored in the node (e.g. for ibss merge). 3866c42a7b7eSSam Leffler */ 38675463c4a4SSam Leffler ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, nf); 3868c42a7b7eSSam Leffler switch (subtype) { 3869c42a7b7eSSam Leffler case IEEE80211_FC0_SUBTYPE_BEACON: 3870c42a7b7eSSam Leffler /* update rssi statistics for use by the hal */ 387180767531SAdrian Chadd /* XXX unlocked check against vap->iv_bss? */ 3872ffa2cab6SSam Leffler ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi); 387380d939bfSSam Leffler if (sc->sc_syncbeacon && 3874b032f27cSSam Leffler ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) { 387580d939bfSSam Leffler /* 387680d939bfSSam Leffler * Resync beacon timers using the tsf of the beacon 387780d939bfSSam Leffler * frame we just received. 387880d939bfSSam Leffler */ 3879b032f27cSSam Leffler ath_beacon_config(sc, vap); 388080d939bfSSam Leffler } 3881c42a7b7eSSam Leffler /* fall thru... */ 3882c42a7b7eSSam Leffler case IEEE80211_FC0_SUBTYPE_PROBE_RESP: 3883b032f27cSSam Leffler if (vap->iv_opmode == IEEE80211_M_IBSS && 3884b032f27cSSam Leffler vap->iv_state == IEEE80211_S_RUN) { 38857041d50cSBernhard Schmidt uint32_t rstamp = sc->sc_lastrs->rs_tstamp; 3886fc4de9b7SAdrian Chadd uint64_t tsf = ath_extend_tsf(sc, rstamp, 38877b0c77ecSSam Leffler ath_hal_gettsf64(sc->sc_ah)); 3888c42a7b7eSSam Leffler /* 3889c42a7b7eSSam Leffler * Handle ibss merge as needed; check the tsf on the 3890c42a7b7eSSam Leffler * frame before attempting the merge. The 802.11 spec 3891c42a7b7eSSam Leffler * says the station should change it's bssid to match 3892c42a7b7eSSam Leffler * the oldest station with the same ssid, where oldest 3893f818612bSSam Leffler * is determined by the tsf. Note that hardware 3894f818612bSSam Leffler * reconfiguration happens through callback to 389503ed599aSSam Leffler * ath_newstate as the state machine will go from 389603ed599aSSam Leffler * RUN -> RUN when this happens. 3897c42a7b7eSSam Leffler */ 389803ed599aSSam Leffler if (le64toh(ni->ni_tstamp.tsf) >= tsf) { 389903ed599aSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, 390033d7d80cSTai-hwa Liang "ibss merge, rstamp %u tsf %ju " 390133d7d80cSTai-hwa Liang "tstamp %ju\n", rstamp, (uintmax_t)tsf, 390233d7d80cSTai-hwa Liang (uintmax_t)ni->ni_tstamp.tsf); 3903641b4d0bSSam Leffler (void) ieee80211_ibss_merge(ni); 3904c42a7b7eSSam Leffler } 390503ed599aSSam Leffler } 3906c42a7b7eSSam Leffler break; 3907c42a7b7eSSam Leffler } 3908c42a7b7eSSam Leffler } 3909c42a7b7eSSam Leffler 3910c42a7b7eSSam Leffler /* 3911c42a7b7eSSam Leffler * Set the default antenna. 3912c42a7b7eSSam Leffler */ 3913c42a7b7eSSam Leffler static void 3914c42a7b7eSSam Leffler ath_setdefantenna(struct ath_softc *sc, u_int antenna) 3915c42a7b7eSSam Leffler { 3916c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 3917c42a7b7eSSam Leffler 3918c42a7b7eSSam Leffler /* XXX block beacon interrupts */ 3919c42a7b7eSSam Leffler ath_hal_setdefantenna(ah, antenna); 3920c42a7b7eSSam Leffler if (sc->sc_defant != antenna) 3921c42a7b7eSSam Leffler sc->sc_stats.ast_ant_defswitch++; 3922c42a7b7eSSam Leffler sc->sc_defant = antenna; 3923c42a7b7eSSam Leffler sc->sc_rxotherant = 0; 3924c42a7b7eSSam Leffler } 3925c42a7b7eSSam Leffler 39265463c4a4SSam Leffler static void 3927b032f27cSSam Leffler ath_rx_tap(struct ifnet *ifp, struct mbuf *m, 392865f9edeeSSam Leffler const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf) 39297b0c77ecSSam Leffler { 3930e387d629SSam Leffler #define CHAN_HT20 htole32(IEEE80211_CHAN_HT20) 3931e387d629SSam Leffler #define CHAN_HT40U htole32(IEEE80211_CHAN_HT40U) 3932e387d629SSam Leffler #define CHAN_HT40D htole32(IEEE80211_CHAN_HT40D) 393346d4d74cSSam Leffler #define CHAN_HT (CHAN_HT20|CHAN_HT40U|CHAN_HT40D) 3934b032f27cSSam Leffler struct ath_softc *sc = ifp->if_softc; 393546d4d74cSSam Leffler const HAL_RATE_TABLE *rt; 393646d4d74cSSam Leffler uint8_t rix; 39377b0c77ecSSam Leffler 393846d4d74cSSam Leffler rt = sc->sc_currates; 393946d4d74cSSam Leffler KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 394046d4d74cSSam Leffler rix = rt->rateCodeToIndex[rs->rs_rate]; 394168e8e04eSSam Leffler sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate; 39427b0c77ecSSam Leffler sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags; 394346d4d74cSSam Leffler #ifdef AH_SUPPORT_AR5416 3944e387d629SSam Leffler sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT; 394546d4d74cSSam Leffler if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) { /* HT rate */ 394659efa8b5SSam Leffler struct ieee80211com *ic = ifp->if_l2com; 394759efa8b5SSam Leffler 3948e387d629SSam Leffler if ((rs->rs_flags & HAL_RX_2040) == 0) 3949e387d629SSam Leffler sc->sc_rx_th.wr_chan_flags |= CHAN_HT20; 395059efa8b5SSam Leffler else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan)) 3951e387d629SSam Leffler sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U; 3952e387d629SSam Leffler else 3953e387d629SSam Leffler sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D; 395468e8e04eSSam Leffler if ((rs->rs_flags & HAL_RX_GI) == 0) 3955e387d629SSam Leffler sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI; 395668e8e04eSSam Leffler } 395768e8e04eSSam Leffler #endif 3958fc4de9b7SAdrian Chadd sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(sc, rs->rs_tstamp, tsf)); 395965f9edeeSSam Leffler if (rs->rs_status & HAL_RXERR_CRC) 39607b0c77ecSSam Leffler sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; 39617b0c77ecSSam Leffler /* XXX propagate other error flags from descriptor */ 39627b0c77ecSSam Leffler sc->sc_rx_th.wr_antnoise = nf; 39635463c4a4SSam Leffler sc->sc_rx_th.wr_antsignal = nf + rs->rs_rssi; 396465f9edeeSSam Leffler sc->sc_rx_th.wr_antenna = rs->rs_antenna; 396546d4d74cSSam Leffler #undef CHAN_HT 3966e387d629SSam Leffler #undef CHAN_HT20 3967e387d629SSam Leffler #undef CHAN_HT40U 3968e387d629SSam Leffler #undef CHAN_HT40D 39697b0c77ecSSam Leffler } 39707b0c77ecSSam Leffler 39715591b213SSam Leffler static void 3972b032f27cSSam Leffler ath_handle_micerror(struct ieee80211com *ic, 3973b032f27cSSam Leffler struct ieee80211_frame *wh, int keyix) 3974b032f27cSSam Leffler { 3975b032f27cSSam Leffler struct ieee80211_node *ni; 3976b032f27cSSam Leffler 3977b032f27cSSam Leffler /* XXX recheck MIC to deal w/ chips that lie */ 3978b032f27cSSam Leffler /* XXX discard MIC errors on !data frames */ 3979b032f27cSSam Leffler ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh); 3980b032f27cSSam Leffler if (ni != NULL) { 3981b032f27cSSam Leffler ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix); 3982b032f27cSSam Leffler ieee80211_free_node(ni); 3983b032f27cSSam Leffler } 3984b032f27cSSam Leffler } 3985b032f27cSSam Leffler 398696ff485dSAdrian Chadd /* 398796ff485dSAdrian Chadd * Only run the RX proc if it's not already running. 398896ff485dSAdrian Chadd * Since this may get run as part of the reset/flush path, 398996ff485dSAdrian Chadd * the task can't clash with an existing, running tasklet. 399096ff485dSAdrian Chadd */ 3991b032f27cSSam Leffler static void 399296ff485dSAdrian Chadd ath_rx_tasklet(void *arg, int npending) 399396ff485dSAdrian Chadd { 399496ff485dSAdrian Chadd struct ath_softc *sc = arg; 399596ff485dSAdrian Chadd 399696ff485dSAdrian Chadd CTR1(ATH_KTR_INTR, "ath_rx_proc: pending=%d", npending); 399796ff485dSAdrian Chadd DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending); 3998ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 3999ef27340cSAdrian Chadd if (sc->sc_inreset_cnt > 0) { 4000ef27340cSAdrian Chadd device_printf(sc->sc_dev, 4001ef27340cSAdrian Chadd "%s: sc_inreset_cnt > 0; skipping\n", __func__); 4002ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 4003ef27340cSAdrian Chadd return; 4004ef27340cSAdrian Chadd } 4005ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 400696ff485dSAdrian Chadd ath_rx_proc(sc, 1); 400796ff485dSAdrian Chadd } 400896ff485dSAdrian Chadd 400996ff485dSAdrian Chadd static void 401096ff485dSAdrian Chadd ath_rx_proc(struct ath_softc *sc, int resched) 40115591b213SSam Leffler { 40128cec0ab9SSam Leffler #define PA2DESC(_sc, _pa) \ 4013c42a7b7eSSam Leffler ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 4014c42a7b7eSSam Leffler ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 40155591b213SSam Leffler struct ath_buf *bf; 4016fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 4017b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 40185591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 40195591b213SSam Leffler struct ath_desc *ds; 402065f9edeeSSam Leffler struct ath_rx_status *rs; 40215591b213SSam Leffler struct mbuf *m; 40220a915fadSSam Leffler struct ieee80211_node *ni; 4023d7736e13SSam Leffler int len, type, ngood; 40245591b213SSam Leffler HAL_STATUS status; 40257b0c77ecSSam Leffler int16_t nf; 402606fc4a10SAdrian Chadd u_int64_t tsf, rstamp; 40278f939e79SAdrian Chadd int npkts = 0; 40285591b213SSam Leffler 4029ef27340cSAdrian Chadd /* XXX we must not hold the ATH_LOCK here */ 4030ef27340cSAdrian Chadd ATH_UNLOCK_ASSERT(sc); 4031ef27340cSAdrian Chadd ATH_PCU_UNLOCK_ASSERT(sc); 4032ef27340cSAdrian Chadd 4033ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 4034ef27340cSAdrian Chadd sc->sc_rxproc_cnt++; 4035ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 4036ef27340cSAdrian Chadd 403796ff485dSAdrian Chadd DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: called\n", __func__); 4038d7736e13SSam Leffler ngood = 0; 403959efa8b5SSam Leffler nf = ath_hal_getchannoise(ah, sc->sc_curchan); 404084784be1SSam Leffler sc->sc_stats.ast_rx_noise = nf; 40417b0c77ecSSam Leffler tsf = ath_hal_gettsf64(ah); 40425591b213SSam Leffler do { 40436b349e5aSAdrian Chadd bf = TAILQ_FIRST(&sc->sc_rxbuf); 40448a2a6beeSAdrian Chadd if (sc->sc_rxslink && bf == NULL) { /* NB: shouldn't happen */ 4045c42a7b7eSSam Leffler if_printf(ifp, "%s: no buffer!\n", __func__); 40465591b213SSam Leffler break; 40478a2a6beeSAdrian Chadd } else if (bf == NULL) { 40488a2a6beeSAdrian Chadd /* 40498a2a6beeSAdrian Chadd * End of List: 40508a2a6beeSAdrian Chadd * this can happen for non-self-linked RX chains 40518a2a6beeSAdrian Chadd */ 40528a2a6beeSAdrian Chadd sc->sc_stats.ast_rx_hitqueueend++; 40538a2a6beeSAdrian Chadd break; 40545591b213SSam Leffler } 4055b2792ff6SSam Leffler m = bf->bf_m; 4056b2792ff6SSam Leffler if (m == NULL) { /* NB: shouldn't happen */ 4057b2792ff6SSam Leffler /* 4058b2792ff6SSam Leffler * If mbuf allocation failed previously there 4059b2792ff6SSam Leffler * will be no mbuf; try again to re-populate it. 4060b2792ff6SSam Leffler */ 4061b2792ff6SSam Leffler /* XXX make debug msg */ 4062b2792ff6SSam Leffler if_printf(ifp, "%s: no mbuf!\n", __func__); 40636b349e5aSAdrian Chadd TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 4064b2792ff6SSam Leffler goto rx_next; 4065b2792ff6SSam Leffler } 406604e22a02SSam Leffler ds = bf->bf_desc; 406704e22a02SSam Leffler if (ds->ds_link == bf->bf_daddr) { 406804e22a02SSam Leffler /* NB: never process the self-linked entry at the end */ 4069f77057dbSAdrian Chadd sc->sc_stats.ast_rx_hitqueueend++; 407004e22a02SSam Leffler break; 407104e22a02SSam Leffler } 40728cec0ab9SSam Leffler /* XXX sync descriptor memory */ 40738cec0ab9SSam Leffler /* 40748cec0ab9SSam Leffler * Must provide the virtual address of the current 40758cec0ab9SSam Leffler * descriptor, the physical address, and the virtual 40768cec0ab9SSam Leffler * address of the next descriptor in the h/w chain. 40778cec0ab9SSam Leffler * This allows the HAL to look ahead to see if the 40788cec0ab9SSam Leffler * hardware is done with a descriptor by checking the 40798cec0ab9SSam Leffler * done bit in the following descriptor and the address 40808cec0ab9SSam Leffler * of the current descriptor the DMA engine is working 40818cec0ab9SSam Leffler * on. All this is necessary because of our use of 40828cec0ab9SSam Leffler * a self-linked list to avoid rx overruns. 40838cec0ab9SSam Leffler */ 408465f9edeeSSam Leffler rs = &bf->bf_status.ds_rxstat; 40858cec0ab9SSam Leffler status = ath_hal_rxprocdesc(ah, ds, 408665f9edeeSSam Leffler bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 4087a585a9a1SSam Leffler #ifdef ATH_DEBUG 4088c42a7b7eSSam Leffler if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 40896902009eSSam Leffler ath_printrxbuf(sc, bf, 0, status == HAL_OK); 40905591b213SSam Leffler #endif 40915591b213SSam Leffler if (status == HAL_EINPROGRESS) 40925591b213SSam Leffler break; 40936b349e5aSAdrian Chadd 40946b349e5aSAdrian Chadd TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 40958f939e79SAdrian Chadd npkts++; 4096f9aa1d90SAdrian Chadd 409706fc4a10SAdrian Chadd /* 409806fc4a10SAdrian Chadd * Calculate the correct 64 bit TSF given 409906fc4a10SAdrian Chadd * the TSF64 register value and rs_tstamp. 410006fc4a10SAdrian Chadd */ 410106fc4a10SAdrian Chadd rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf); 410206fc4a10SAdrian Chadd 4103f9aa1d90SAdrian Chadd /* These aren't specifically errors */ 41046e0f1168SAdrian Chadd #ifdef AH_SUPPORT_AR5416 4105f9aa1d90SAdrian Chadd if (rs->rs_flags & HAL_RX_GI) 4106f9aa1d90SAdrian Chadd sc->sc_stats.ast_rx_halfgi++; 4107f9aa1d90SAdrian Chadd if (rs->rs_flags & HAL_RX_2040) 4108f9aa1d90SAdrian Chadd sc->sc_stats.ast_rx_2040++; 4109f9aa1d90SAdrian Chadd if (rs->rs_flags & HAL_RX_DELIM_CRC_PRE) 4110f9aa1d90SAdrian Chadd sc->sc_stats.ast_rx_pre_crc_err++; 4111f9aa1d90SAdrian Chadd if (rs->rs_flags & HAL_RX_DELIM_CRC_POST) 4112f9aa1d90SAdrian Chadd sc->sc_stats.ast_rx_post_crc_err++; 4113f9aa1d90SAdrian Chadd if (rs->rs_flags & HAL_RX_DECRYPT_BUSY) 4114f9aa1d90SAdrian Chadd sc->sc_stats.ast_rx_decrypt_busy_err++; 4115f9aa1d90SAdrian Chadd if (rs->rs_flags & HAL_RX_HI_RX_CHAIN) 4116f9aa1d90SAdrian Chadd sc->sc_stats.ast_rx_hi_rx_chain++; 41176e0f1168SAdrian Chadd #endif /* AH_SUPPORT_AR5416 */ 4118f9aa1d90SAdrian Chadd 411968e8e04eSSam Leffler if (rs->rs_status != 0) { 412065f9edeeSSam Leffler if (rs->rs_status & HAL_RXERR_CRC) 41215591b213SSam Leffler sc->sc_stats.ast_rx_crcerr++; 412265f9edeeSSam Leffler if (rs->rs_status & HAL_RXERR_FIFO) 41235591b213SSam Leffler sc->sc_stats.ast_rx_fifoerr++; 412465f9edeeSSam Leffler if (rs->rs_status & HAL_RXERR_PHY) { 41255591b213SSam Leffler sc->sc_stats.ast_rx_phyerr++; 412648237774SAdrian Chadd /* Process DFS radar events */ 4127373815efSAdrian Chadd if ((rs->rs_phyerr == HAL_PHYERR_RADAR) || 4128373815efSAdrian Chadd (rs->rs_phyerr == HAL_PHYERR_FALSE_RADAR_EXT)) { 4129373815efSAdrian Chadd /* Since we're touching the frame data, sync it */ 4130373815efSAdrian Chadd bus_dmamap_sync(sc->sc_dmat, 4131373815efSAdrian Chadd bf->bf_dmamap, 4132373815efSAdrian Chadd BUS_DMASYNC_POSTREAD); 4133373815efSAdrian Chadd /* Now pass it to the radar processing code */ 413406fc4a10SAdrian Chadd ath_dfs_process_phy_err(sc, mtod(m, char *), rstamp, rs); 4135373815efSAdrian Chadd } 413648237774SAdrian Chadd 4137f9aa1d90SAdrian Chadd /* Be suitably paranoid about receiving phy errors out of the stats array bounds */ 4138f9aa1d90SAdrian Chadd if (rs->rs_phyerr < 64) 4139f9aa1d90SAdrian Chadd sc->sc_stats.ast_rx_phy[rs->rs_phyerr]++; 414068e8e04eSSam Leffler goto rx_error; /* NB: don't count in ierrors */ 4141c42a7b7eSSam Leffler } 414265f9edeeSSam Leffler if (rs->rs_status & HAL_RXERR_DECRYPT) { 414385643802SSam Leffler /* 4144c42a7b7eSSam Leffler * Decrypt error. If the error occurred 4145c42a7b7eSSam Leffler * because there was no hardware key, then 4146c42a7b7eSSam Leffler * let the frame through so the upper layers 4147c42a7b7eSSam Leffler * can process it. This is necessary for 5210 4148c42a7b7eSSam Leffler * parts which have no way to setup a ``clear'' 4149c42a7b7eSSam Leffler * key cache entry. 4150c42a7b7eSSam Leffler * 4151c42a7b7eSSam Leffler * XXX do key cache faulting 415285643802SSam Leffler */ 415365f9edeeSSam Leffler if (rs->rs_keyix == HAL_RXKEYIX_INVALID) 4154c42a7b7eSSam Leffler goto rx_accept; 4155c42a7b7eSSam Leffler sc->sc_stats.ast_rx_badcrypt++; 41565591b213SSam Leffler } 415765f9edeeSSam Leffler if (rs->rs_status & HAL_RXERR_MIC) { 4158c42a7b7eSSam Leffler sc->sc_stats.ast_rx_badmic++; 4159c42a7b7eSSam Leffler /* 4160c42a7b7eSSam Leffler * Do minimal work required to hand off 41615463c4a4SSam Leffler * the 802.11 header for notification. 4162c42a7b7eSSam Leffler */ 4163c42a7b7eSSam Leffler /* XXX frag's and qos frames */ 416465f9edeeSSam Leffler len = rs->rs_datalen; 4165c42a7b7eSSam Leffler if (len >= sizeof (struct ieee80211_frame)) { 4166c42a7b7eSSam Leffler bus_dmamap_sync(sc->sc_dmat, 4167c42a7b7eSSam Leffler bf->bf_dmamap, 4168c42a7b7eSSam Leffler BUS_DMASYNC_POSTREAD); 4169b032f27cSSam Leffler ath_handle_micerror(ic, 4170c42a7b7eSSam Leffler mtod(m, struct ieee80211_frame *), 41710ab4040aSSam Leffler sc->sc_splitmic ? 4172b032f27cSSam Leffler rs->rs_keyix-32 : rs->rs_keyix); 4173c42a7b7eSSam Leffler } 4174c42a7b7eSSam Leffler } 4175c42a7b7eSSam Leffler ifp->if_ierrors++; 417668e8e04eSSam Leffler rx_error: 417768e8e04eSSam Leffler /* 417868e8e04eSSam Leffler * Cleanup any pending partial frame. 417968e8e04eSSam Leffler */ 418068e8e04eSSam Leffler if (sc->sc_rxpending != NULL) { 418168e8e04eSSam Leffler m_freem(sc->sc_rxpending); 418268e8e04eSSam Leffler sc->sc_rxpending = NULL; 418368e8e04eSSam Leffler } 4184c42a7b7eSSam Leffler /* 41857b0c77ecSSam Leffler * When a tap is present pass error frames 41867b0c77ecSSam Leffler * that have been requested. By default we 41877b0c77ecSSam Leffler * pass decrypt+mic errors but others may be 41887b0c77ecSSam Leffler * interesting (e.g. crc). 4189c42a7b7eSSam Leffler */ 41905463c4a4SSam Leffler if (ieee80211_radiotap_active(ic) && 419165f9edeeSSam Leffler (rs->rs_status & sc->sc_monpass)) { 41927b0c77ecSSam Leffler bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 41937b0c77ecSSam Leffler BUS_DMASYNC_POSTREAD); 41947b0c77ecSSam Leffler /* NB: bpf needs the mbuf length setup */ 419565f9edeeSSam Leffler len = rs->rs_datalen; 41967b0c77ecSSam Leffler m->m_pkthdr.len = m->m_len = len; 4197dcfd99a7SAdrian Chadd bf->bf_m = NULL; 419806fc4a10SAdrian Chadd ath_rx_tap(ifp, m, rs, rstamp, nf); 41995463c4a4SSam Leffler ieee80211_radiotap_rx_all(ic, m); 4200dcfd99a7SAdrian Chadd m_freem(m); 42017b0c77ecSSam Leffler } 42027b0c77ecSSam Leffler /* XXX pass MIC errors up for s/w reclaculation */ 42035591b213SSam Leffler goto rx_next; 42045591b213SSam Leffler } 4205c42a7b7eSSam Leffler rx_accept: 4206c42a7b7eSSam Leffler /* 4207c42a7b7eSSam Leffler * Sync and unmap the frame. At this point we're 4208c42a7b7eSSam Leffler * committed to passing the mbuf somewhere so clear 4209c66c48cbSSam Leffler * bf_m; this means a new mbuf must be allocated 4210c42a7b7eSSam Leffler * when the rx descriptor is setup again to receive 4211c42a7b7eSSam Leffler * another frame. 4212c42a7b7eSSam Leffler */ 42135591b213SSam Leffler bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 42145591b213SSam Leffler BUS_DMASYNC_POSTREAD); 42155591b213SSam Leffler bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 42165591b213SSam Leffler bf->bf_m = NULL; 4217c42a7b7eSSam Leffler 421865f9edeeSSam Leffler len = rs->rs_datalen; 421968e8e04eSSam Leffler m->m_len = len; 422068e8e04eSSam Leffler 422168e8e04eSSam Leffler if (rs->rs_more) { 422268e8e04eSSam Leffler /* 422368e8e04eSSam Leffler * Frame spans multiple descriptors; save 422468e8e04eSSam Leffler * it for the next completed descriptor, it 422568e8e04eSSam Leffler * will be used to construct a jumbogram. 422668e8e04eSSam Leffler */ 422768e8e04eSSam Leffler if (sc->sc_rxpending != NULL) { 422868e8e04eSSam Leffler /* NB: max frame size is currently 2 clusters */ 422968e8e04eSSam Leffler sc->sc_stats.ast_rx_toobig++; 423068e8e04eSSam Leffler m_freem(sc->sc_rxpending); 423168e8e04eSSam Leffler } 423268e8e04eSSam Leffler m->m_pkthdr.rcvif = ifp; 423368e8e04eSSam Leffler m->m_pkthdr.len = len; 423468e8e04eSSam Leffler sc->sc_rxpending = m; 423568e8e04eSSam Leffler goto rx_next; 423668e8e04eSSam Leffler } else if (sc->sc_rxpending != NULL) { 423768e8e04eSSam Leffler /* 423868e8e04eSSam Leffler * This is the second part of a jumbogram, 423968e8e04eSSam Leffler * chain it to the first mbuf, adjust the 424068e8e04eSSam Leffler * frame length, and clear the rxpending state. 424168e8e04eSSam Leffler */ 424268e8e04eSSam Leffler sc->sc_rxpending->m_next = m; 424368e8e04eSSam Leffler sc->sc_rxpending->m_pkthdr.len += len; 424468e8e04eSSam Leffler m = sc->sc_rxpending; 424568e8e04eSSam Leffler sc->sc_rxpending = NULL; 424668e8e04eSSam Leffler } else { 424768e8e04eSSam Leffler /* 424868e8e04eSSam Leffler * Normal single-descriptor receive; setup 424968e8e04eSSam Leffler * the rcvif and packet length. 425068e8e04eSSam Leffler */ 425168e8e04eSSam Leffler m->m_pkthdr.rcvif = ifp; 425268e8e04eSSam Leffler m->m_pkthdr.len = len; 425368e8e04eSSam Leffler } 425473454c73SSam Leffler 4255197d53c5SAdrian Chadd /* 4256197d53c5SAdrian Chadd * Validate rs->rs_antenna. 4257197d53c5SAdrian Chadd * 4258197d53c5SAdrian Chadd * Some users w/ AR9285 NICs have reported crashes 4259197d53c5SAdrian Chadd * here because rs_antenna field is bogusly large. 4260197d53c5SAdrian Chadd * Let's enforce the maximum antenna limit of 8 4261197d53c5SAdrian Chadd * (and it shouldn't be hard coded, but that's a 4262197d53c5SAdrian Chadd * separate problem) and if there's an issue, print 4263197d53c5SAdrian Chadd * out an error and adjust rs_antenna to something 4264197d53c5SAdrian Chadd * sensible. 4265197d53c5SAdrian Chadd * 4266197d53c5SAdrian Chadd * This code should be removed once the actual 4267197d53c5SAdrian Chadd * root cause of the issue has been identified. 4268197d53c5SAdrian Chadd * For example, it may be that the rs_antenna 4269197d53c5SAdrian Chadd * field is only valid for the lsat frame of 4270197d53c5SAdrian Chadd * an aggregate and it just happens that it is 4271197d53c5SAdrian Chadd * "mostly" right. (This is a general statement - 4272197d53c5SAdrian Chadd * the majority of the statistics are only valid 4273197d53c5SAdrian Chadd * for the last frame in an aggregate. 4274197d53c5SAdrian Chadd */ 4275197d53c5SAdrian Chadd if (rs->rs_antenna > 7) { 4276197d53c5SAdrian Chadd device_printf(sc->sc_dev, "%s: rs_antenna > 7 (%d)\n", 4277197d53c5SAdrian Chadd __func__, rs->rs_antenna); 4278197d53c5SAdrian Chadd #ifdef ATH_DEBUG 4279197d53c5SAdrian Chadd ath_printrxbuf(sc, bf, 0, status == HAL_OK); 4280197d53c5SAdrian Chadd #endif /* ATH_DEBUG */ 4281197d53c5SAdrian Chadd rs->rs_antenna = 0; /* XXX better than nothing */ 4282197d53c5SAdrian Chadd } 4283197d53c5SAdrian Chadd 4284b032f27cSSam Leffler ifp->if_ipackets++; 428565f9edeeSSam Leffler sc->sc_stats.ast_ant_rx[rs->rs_antenna]++; 4286c42a7b7eSSam Leffler 42875463c4a4SSam Leffler /* 42885463c4a4SSam Leffler * Populate the rx status block. When there are bpf 42895463c4a4SSam Leffler * listeners we do the additional work to provide 42905463c4a4SSam Leffler * complete status. Otherwise we fill in only the 42915463c4a4SSam Leffler * material required by ieee80211_input. Note that 42925463c4a4SSam Leffler * noise setting is filled in above. 42935463c4a4SSam Leffler */ 42945463c4a4SSam Leffler if (ieee80211_radiotap_active(ic)) 429506fc4a10SAdrian Chadd ath_rx_tap(ifp, m, rs, rstamp, nf); 42960a915fadSSam Leffler 42975591b213SSam Leffler /* 4298c42a7b7eSSam Leffler * From this point on we assume the frame is at least 4299c42a7b7eSSam Leffler * as large as ieee80211_frame_min; verify that. 43005591b213SSam Leffler */ 4301c42a7b7eSSam Leffler if (len < IEEE80211_MIN_LEN) { 43025463c4a4SSam Leffler if (!ieee80211_radiotap_active(ic)) { 43035463c4a4SSam Leffler DPRINTF(sc, ATH_DEBUG_RECV, 43045463c4a4SSam Leffler "%s: short packet %d\n", __func__, len); 4305c42a7b7eSSam Leffler sc->sc_stats.ast_rx_tooshort++; 43065463c4a4SSam Leffler } else { 43075463c4a4SSam Leffler /* NB: in particular this captures ack's */ 43085463c4a4SSam Leffler ieee80211_radiotap_rx_all(ic, m); 43095463c4a4SSam Leffler } 4310c42a7b7eSSam Leffler m_freem(m); 4311c42a7b7eSSam Leffler goto rx_next; 43125591b213SSam Leffler } 43130a915fadSSam Leffler 4314c42a7b7eSSam Leffler if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) { 431546d4d74cSSam Leffler const HAL_RATE_TABLE *rt = sc->sc_currates; 431646d4d74cSSam Leffler uint8_t rix = rt->rateCodeToIndex[rs->rs_rate]; 431746d4d74cSSam Leffler 431868e8e04eSSam Leffler ieee80211_dump_pkt(ic, mtod(m, caddr_t), len, 431946d4d74cSSam Leffler sc->sc_hwmap[rix].ieeerate, rs->rs_rssi); 4320c42a7b7eSSam Leffler } 4321c42a7b7eSSam Leffler 4322c42a7b7eSSam Leffler m_adj(m, -IEEE80211_CRC_LEN); 4323de5af704SSam Leffler 4324de5af704SSam Leffler /* 4325c42a7b7eSSam Leffler * Locate the node for sender, track state, and then 4326c42a7b7eSSam Leffler * pass the (referenced) node up to the 802.11 layer 4327c42a7b7eSSam Leffler * for its use. 4328c42a7b7eSSam Leffler */ 4329c1225b52SSam Leffler ni = ieee80211_find_rxnode_withkey(ic, 4330c1225b52SSam Leffler mtod(m, const struct ieee80211_frame_min *), 433165f9edeeSSam Leffler rs->rs_keyix == HAL_RXKEYIX_INVALID ? 433265f9edeeSSam Leffler IEEE80211_KEYIX_NONE : rs->rs_keyix); 43337041d50cSBernhard Schmidt sc->sc_lastrs = rs; 4334a07e9ddbSAdrian Chadd 43356e0f1168SAdrian Chadd #ifdef AH_SUPPORT_AR5416 4336a07e9ddbSAdrian Chadd if (rs->rs_isaggr) 4337a07e9ddbSAdrian Chadd sc->sc_stats.ast_rx_agg++; 43386e0f1168SAdrian Chadd #endif /* AH_SUPPORT_AR5416 */ 4339a07e9ddbSAdrian Chadd 4340a07e9ddbSAdrian Chadd if (ni != NULL) { 4341b032f27cSSam Leffler /* 4342e57539afSAdrian Chadd * Only punt packets for ampdu reorder processing for 4343e57539afSAdrian Chadd * 11n nodes; net80211 enforces that M_AMPDU is only 4344e57539afSAdrian Chadd * set for 11n nodes. 434500fc8705SAdrian Chadd */ 434600fc8705SAdrian Chadd if (ni->ni_flags & IEEE80211_NODE_HT) 434700fc8705SAdrian Chadd m->m_flags |= M_AMPDU; 434800fc8705SAdrian Chadd 434900fc8705SAdrian Chadd /* 4350b032f27cSSam Leffler * Sending station is known, dispatch directly. 4351b032f27cSSam Leffler */ 43525463c4a4SSam Leffler type = ieee80211_input(ni, m, rs->rs_rssi, nf); 4353b032f27cSSam Leffler ieee80211_free_node(ni); 4354b032f27cSSam Leffler /* 4355b032f27cSSam Leffler * Arrange to update the last rx timestamp only for 4356b032f27cSSam Leffler * frames from our ap when operating in station mode. 4357b032f27cSSam Leffler * This assumes the rx key is always setup when 4358b032f27cSSam Leffler * associated. 4359b032f27cSSam Leffler */ 4360b032f27cSSam Leffler if (ic->ic_opmode == IEEE80211_M_STA && 4361b032f27cSSam Leffler rs->rs_keyix != HAL_RXKEYIX_INVALID) 4362b032f27cSSam Leffler ngood++; 4363b032f27cSSam Leffler } else { 43645463c4a4SSam Leffler type = ieee80211_input_all(ic, m, rs->rs_rssi, nf); 4365b032f27cSSam Leffler } 4366c42a7b7eSSam Leffler /* 4367c42a7b7eSSam Leffler * Track rx rssi and do any rx antenna management. 4368de5af704SSam Leffler */ 436965f9edeeSSam Leffler ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi); 4370c42a7b7eSSam Leffler if (sc->sc_diversity) { 4371c42a7b7eSSam Leffler /* 4372c42a7b7eSSam Leffler * When using fast diversity, change the default rx 4373c42a7b7eSSam Leffler * antenna if diversity chooses the other antenna 3 4374c42a7b7eSSam Leffler * times in a row. 4375c42a7b7eSSam Leffler */ 437665f9edeeSSam Leffler if (sc->sc_defant != rs->rs_antenna) { 4377c42a7b7eSSam Leffler if (++sc->sc_rxotherant >= 3) 437865f9edeeSSam Leffler ath_setdefantenna(sc, rs->rs_antenna); 4379c42a7b7eSSam Leffler } else 4380c42a7b7eSSam Leffler sc->sc_rxotherant = 0; 4381c42a7b7eSSam Leffler } 4382235ab70eSAdrian Chadd 4383235ab70eSAdrian Chadd /* Newer school diversity - kite specific for now */ 4384235ab70eSAdrian Chadd /* XXX perhaps migrate the normal diversity code to this? */ 4385235ab70eSAdrian Chadd if ((ah)->ah_rxAntCombDiversity) 4386235ab70eSAdrian Chadd (*(ah)->ah_rxAntCombDiversity)(ah, rs, ticks, hz); 4387235ab70eSAdrian Chadd 43883e50ec2cSSam Leffler if (sc->sc_softled) { 43893e50ec2cSSam Leffler /* 43903e50ec2cSSam Leffler * Blink for any data frame. Otherwise do a 43913e50ec2cSSam Leffler * heartbeat-style blink when idle. The latter 43923e50ec2cSSam Leffler * is mainly for station mode where we depend on 43933e50ec2cSSam Leffler * periodic beacon frames to trigger the poll event. 43943e50ec2cSSam Leffler */ 439531640eb7SSam Leffler if (type == IEEE80211_FC0_TYPE_DATA) { 439646d4d74cSSam Leffler const HAL_RATE_TABLE *rt = sc->sc_currates; 439746d4d74cSSam Leffler ath_led_event(sc, 439846d4d74cSSam Leffler rt->rateCodeToIndex[rs->rs_rate]); 43993e50ec2cSSam Leffler } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle) 440046d4d74cSSam Leffler ath_led_event(sc, 0); 44013e50ec2cSSam Leffler } 44025591b213SSam Leffler rx_next: 44036b349e5aSAdrian Chadd TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 44045591b213SSam Leffler } while (ath_rxbuf_init(sc, bf) == 0); 44055591b213SSam Leffler 4406c42a7b7eSSam Leffler /* rx signal state monitoring */ 440759efa8b5SSam Leffler ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); 4408d7736e13SSam Leffler if (ngood) 4409d7736e13SSam Leffler sc->sc_lastrx = tsf; 4410b5f4adb3SSam Leffler 4411f52d3452SAdrian Chadd CTR2(ATH_KTR_INTR, "ath_rx_proc: npkts=%d, ngood=%d", npkts, ngood); 441248237774SAdrian Chadd /* Queue DFS tasklet if needed */ 441396ff485dSAdrian Chadd if (resched && ath_dfs_tasklet_needed(sc, sc->sc_curchan)) 441448237774SAdrian Chadd taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); 441548237774SAdrian Chadd 44161fdadc0fSAdrian Chadd /* 44171fdadc0fSAdrian Chadd * Now that all the RX frames were handled that 44181fdadc0fSAdrian Chadd * need to be handled, kick the PCU if there's 44191fdadc0fSAdrian Chadd * been an RXEOL condition. 44201fdadc0fSAdrian Chadd */ 4421ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 442296ff485dSAdrian Chadd if (resched && sc->sc_kickpcu) { 4423f52d3452SAdrian Chadd CTR0(ATH_KTR_ERR, "ath_rx_proc: kickpcu"); 44248f939e79SAdrian Chadd device_printf(sc->sc_dev, "%s: kickpcu; handled %d packets\n", 44258f939e79SAdrian Chadd __func__, npkts); 44268f939e79SAdrian Chadd 44278f939e79SAdrian Chadd /* XXX rxslink? */ 4428ef27340cSAdrian Chadd /* 4429ef27340cSAdrian Chadd * XXX can we hold the PCU lock here? 4430ef27340cSAdrian Chadd * Are there any net80211 buffer calls involved? 4431ef27340cSAdrian Chadd */ 44328f939e79SAdrian Chadd bf = TAILQ_FIRST(&sc->sc_rxbuf); 44338f939e79SAdrian Chadd ath_hal_putrxbuf(ah, bf->bf_daddr); 44348f939e79SAdrian Chadd ath_hal_rxena(ah); /* enable recv descriptors */ 44358f939e79SAdrian Chadd ath_mode_init(sc); /* set filters, etc. */ 44368f939e79SAdrian Chadd ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 44378f939e79SAdrian Chadd 44381fdadc0fSAdrian Chadd ath_hal_intrset(ah, sc->sc_imask); 44398f939e79SAdrian Chadd sc->sc_kickpcu = 0; 44401fdadc0fSAdrian Chadd } 4441ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 44421fdadc0fSAdrian Chadd 4443ef27340cSAdrian Chadd /* XXX check this inside of IF_LOCK? */ 444496ff485dSAdrian Chadd if (resched && (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 4445339ccfb3SSam Leffler #ifdef IEEE80211_SUPPORT_SUPERG 444604f19fd6SSam Leffler ieee80211_ff_age_all(ic, 100); 4447339ccfb3SSam Leffler #endif 4448339ccfb3SSam Leffler if (!IFQ_IS_EMPTY(&ifp->if_snd)) 4449cd196bb2SSam Leffler ath_start(ifp); 4450339ccfb3SSam Leffler } 44518cec0ab9SSam Leffler #undef PA2DESC 4452ef27340cSAdrian Chadd 4453ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 4454ef27340cSAdrian Chadd sc->sc_rxproc_cnt--; 4455ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 44565591b213SSam Leffler } 44575591b213SSam Leffler 4458622b3fd2SSam Leffler static void 4459622b3fd2SSam Leffler ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 4460622b3fd2SSam Leffler { 4461622b3fd2SSam Leffler txq->axq_qnum = qnum; 4462339ccfb3SSam Leffler txq->axq_ac = 0; 4463622b3fd2SSam Leffler txq->axq_depth = 0; 446416d4de92SAdrian Chadd txq->axq_aggr_depth = 0; 4465622b3fd2SSam Leffler txq->axq_intrcnt = 0; 4466622b3fd2SSam Leffler txq->axq_link = NULL; 44676b349e5aSAdrian Chadd txq->axq_softc = sc; 44686b349e5aSAdrian Chadd TAILQ_INIT(&txq->axq_q); 44696b349e5aSAdrian Chadd TAILQ_INIT(&txq->axq_tidq); 4470622b3fd2SSam Leffler ATH_TXQ_LOCK_INIT(sc, txq); 4471622b3fd2SSam Leffler } 4472622b3fd2SSam Leffler 44735591b213SSam Leffler /* 4474c42a7b7eSSam Leffler * Setup a h/w transmit queue. 44755591b213SSam Leffler */ 4476c42a7b7eSSam Leffler static struct ath_txq * 4477c42a7b7eSSam Leffler ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 4478c42a7b7eSSam Leffler { 4479c42a7b7eSSam Leffler #define N(a) (sizeof(a)/sizeof(a[0])) 4480c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 4481c42a7b7eSSam Leffler HAL_TXQ_INFO qi; 4482c42a7b7eSSam Leffler int qnum; 4483c42a7b7eSSam Leffler 4484c42a7b7eSSam Leffler memset(&qi, 0, sizeof(qi)); 4485c42a7b7eSSam Leffler qi.tqi_subtype = subtype; 4486c42a7b7eSSam Leffler qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 4487c42a7b7eSSam Leffler qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 4488c42a7b7eSSam Leffler qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 4489c42a7b7eSSam Leffler /* 4490c42a7b7eSSam Leffler * Enable interrupts only for EOL and DESC conditions. 4491c42a7b7eSSam Leffler * We mark tx descriptors to receive a DESC interrupt 4492c42a7b7eSSam Leffler * when a tx queue gets deep; otherwise waiting for the 4493c42a7b7eSSam Leffler * EOL to reap descriptors. Note that this is done to 4494c42a7b7eSSam Leffler * reduce interrupt load and this only defers reaping 4495c42a7b7eSSam Leffler * descriptors, never transmitting frames. Aside from 4496c42a7b7eSSam Leffler * reducing interrupts this also permits more concurrency. 4497c42a7b7eSSam Leffler * The only potential downside is if the tx queue backs 4498c42a7b7eSSam Leffler * up in which case the top half of the kernel may backup 4499c42a7b7eSSam Leffler * due to a lack of tx descriptors. 4500c42a7b7eSSam Leffler */ 4501bd5a9920SSam Leffler qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE; 4502c42a7b7eSSam Leffler qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 4503c42a7b7eSSam Leffler if (qnum == -1) { 4504c42a7b7eSSam Leffler /* 4505c42a7b7eSSam Leffler * NB: don't print a message, this happens 4506a614e076SSam Leffler * normally on parts with too few tx queues 4507c42a7b7eSSam Leffler */ 4508c42a7b7eSSam Leffler return NULL; 4509c42a7b7eSSam Leffler } 4510c42a7b7eSSam Leffler if (qnum >= N(sc->sc_txq)) { 45116891c875SPeter Wemm device_printf(sc->sc_dev, 45126891c875SPeter Wemm "hal qnum %u out of range, max %zu!\n", 4513c42a7b7eSSam Leffler qnum, N(sc->sc_txq)); 4514c42a7b7eSSam Leffler ath_hal_releasetxqueue(ah, qnum); 4515c42a7b7eSSam Leffler return NULL; 4516c42a7b7eSSam Leffler } 4517c42a7b7eSSam Leffler if (!ATH_TXQ_SETUP(sc, qnum)) { 4518622b3fd2SSam Leffler ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 4519c42a7b7eSSam Leffler sc->sc_txqsetup |= 1<<qnum; 4520c42a7b7eSSam Leffler } 4521c42a7b7eSSam Leffler return &sc->sc_txq[qnum]; 4522c42a7b7eSSam Leffler #undef N 4523c42a7b7eSSam Leffler } 4524c42a7b7eSSam Leffler 4525c42a7b7eSSam Leffler /* 4526c42a7b7eSSam Leffler * Setup a hardware data transmit queue for the specified 4527c42a7b7eSSam Leffler * access control. The hal may not support all requested 4528c42a7b7eSSam Leffler * queues in which case it will return a reference to a 4529c42a7b7eSSam Leffler * previously setup queue. We record the mapping from ac's 4530c42a7b7eSSam Leffler * to h/w queues for use by ath_tx_start and also track 4531c42a7b7eSSam Leffler * the set of h/w queues being used to optimize work in the 4532c42a7b7eSSam Leffler * transmit interrupt handler and related routines. 4533c42a7b7eSSam Leffler */ 4534c42a7b7eSSam Leffler static int 4535c42a7b7eSSam Leffler ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 4536c42a7b7eSSam Leffler { 4537c42a7b7eSSam Leffler #define N(a) (sizeof(a)/sizeof(a[0])) 4538c42a7b7eSSam Leffler struct ath_txq *txq; 4539c42a7b7eSSam Leffler 4540c42a7b7eSSam Leffler if (ac >= N(sc->sc_ac2q)) { 45416891c875SPeter Wemm device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 4542c42a7b7eSSam Leffler ac, N(sc->sc_ac2q)); 4543c42a7b7eSSam Leffler return 0; 4544c42a7b7eSSam Leffler } 4545c42a7b7eSSam Leffler txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 4546c42a7b7eSSam Leffler if (txq != NULL) { 4547339ccfb3SSam Leffler txq->axq_ac = ac; 4548c42a7b7eSSam Leffler sc->sc_ac2q[ac] = txq; 4549c42a7b7eSSam Leffler return 1; 4550c42a7b7eSSam Leffler } else 4551c42a7b7eSSam Leffler return 0; 4552c42a7b7eSSam Leffler #undef N 4553c42a7b7eSSam Leffler } 4554c42a7b7eSSam Leffler 4555c42a7b7eSSam Leffler /* 4556c42a7b7eSSam Leffler * Update WME parameters for a transmit queue. 4557c42a7b7eSSam Leffler */ 4558c42a7b7eSSam Leffler static int 4559c42a7b7eSSam Leffler ath_txq_update(struct ath_softc *sc, int ac) 4560c42a7b7eSSam Leffler { 4561c42a7b7eSSam Leffler #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 4562c42a7b7eSSam Leffler #define ATH_TXOP_TO_US(v) (v<<5) 4563b032f27cSSam Leffler struct ifnet *ifp = sc->sc_ifp; 4564b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 4565c42a7b7eSSam Leffler struct ath_txq *txq = sc->sc_ac2q[ac]; 4566c42a7b7eSSam Leffler struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 4567c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 4568c42a7b7eSSam Leffler HAL_TXQ_INFO qi; 4569c42a7b7eSSam Leffler 4570c42a7b7eSSam Leffler ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 4571584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 457210ad9a77SSam Leffler if (sc->sc_tdma) { 457310ad9a77SSam Leffler /* 457410ad9a77SSam Leffler * AIFS is zero so there's no pre-transmit wait. The 457510ad9a77SSam Leffler * burst time defines the slot duration and is configured 457609be6601SSam Leffler * through net80211. The QCU is setup to not do post-xmit 457710ad9a77SSam Leffler * back off, lockout all lower-priority QCU's, and fire 457810ad9a77SSam Leffler * off the DMA beacon alert timer which is setup based 457910ad9a77SSam Leffler * on the slot configuration. 458010ad9a77SSam Leffler */ 458110ad9a77SSam Leffler qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 458210ad9a77SSam Leffler | HAL_TXQ_TXERRINT_ENABLE 458310ad9a77SSam Leffler | HAL_TXQ_TXURNINT_ENABLE 458410ad9a77SSam Leffler | HAL_TXQ_TXEOLINT_ENABLE 458510ad9a77SSam Leffler | HAL_TXQ_DBA_GATED 458610ad9a77SSam Leffler | HAL_TXQ_BACKOFF_DISABLE 458710ad9a77SSam Leffler | HAL_TXQ_ARB_LOCKOUT_GLOBAL 458810ad9a77SSam Leffler ; 458910ad9a77SSam Leffler qi.tqi_aifs = 0; 459010ad9a77SSam Leffler /* XXX +dbaprep? */ 459110ad9a77SSam Leffler qi.tqi_readyTime = sc->sc_tdmaslotlen; 459210ad9a77SSam Leffler qi.tqi_burstTime = qi.tqi_readyTime; 459310ad9a77SSam Leffler } else { 459410ad9a77SSam Leffler #endif 459516d4de92SAdrian Chadd /* 459616d4de92SAdrian Chadd * XXX shouldn't this just use the default flags 459716d4de92SAdrian Chadd * used in the previous queue setup? 459816d4de92SAdrian Chadd */ 459910ad9a77SSam Leffler qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 460010ad9a77SSam Leffler | HAL_TXQ_TXERRINT_ENABLE 460110ad9a77SSam Leffler | HAL_TXQ_TXDESCINT_ENABLE 460210ad9a77SSam Leffler | HAL_TXQ_TXURNINT_ENABLE 46031f25c0f7SAdrian Chadd | HAL_TXQ_TXEOLINT_ENABLE 460410ad9a77SSam Leffler ; 4605c42a7b7eSSam Leffler qi.tqi_aifs = wmep->wmep_aifsn; 4606c42a7b7eSSam Leffler qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 4607c42a7b7eSSam Leffler qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 460810ad9a77SSam Leffler qi.tqi_readyTime = 0; 4609c42a7b7eSSam Leffler qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 4610584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 461110ad9a77SSam Leffler } 461210ad9a77SSam Leffler #endif 461310ad9a77SSam Leffler 461410ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, 461510ad9a77SSam Leffler "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", 461610ad9a77SSam Leffler __func__, txq->axq_qnum, qi.tqi_qflags, 461710ad9a77SSam Leffler qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); 4618c42a7b7eSSam Leffler 4619c42a7b7eSSam Leffler if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 4620b032f27cSSam Leffler if_printf(ifp, "unable to update hardware queue " 4621c42a7b7eSSam Leffler "parameters for %s traffic!\n", 4622c42a7b7eSSam Leffler ieee80211_wme_acnames[ac]); 4623c42a7b7eSSam Leffler return 0; 4624c42a7b7eSSam Leffler } else { 4625c42a7b7eSSam Leffler ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 4626c42a7b7eSSam Leffler return 1; 4627c42a7b7eSSam Leffler } 4628c42a7b7eSSam Leffler #undef ATH_TXOP_TO_US 4629c42a7b7eSSam Leffler #undef ATH_EXPONENT_TO_VALUE 4630c42a7b7eSSam Leffler } 4631c42a7b7eSSam Leffler 4632c42a7b7eSSam Leffler /* 4633c42a7b7eSSam Leffler * Callback from the 802.11 layer to update WME parameters. 4634c42a7b7eSSam Leffler */ 4635c42a7b7eSSam Leffler static int 4636c42a7b7eSSam Leffler ath_wme_update(struct ieee80211com *ic) 4637c42a7b7eSSam Leffler { 4638c42a7b7eSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 4639c42a7b7eSSam Leffler 4640c42a7b7eSSam Leffler return !ath_txq_update(sc, WME_AC_BE) || 4641c42a7b7eSSam Leffler !ath_txq_update(sc, WME_AC_BK) || 4642c42a7b7eSSam Leffler !ath_txq_update(sc, WME_AC_VI) || 4643c42a7b7eSSam Leffler !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 4644c42a7b7eSSam Leffler } 4645c42a7b7eSSam Leffler 4646c42a7b7eSSam Leffler /* 4647c42a7b7eSSam Leffler * Reclaim resources for a setup queue. 4648c42a7b7eSSam Leffler */ 4649c42a7b7eSSam Leffler static void 4650c42a7b7eSSam Leffler ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 4651c42a7b7eSSam Leffler { 4652c42a7b7eSSam Leffler 4653c42a7b7eSSam Leffler ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 4654c42a7b7eSSam Leffler ATH_TXQ_LOCK_DESTROY(txq); 4655c42a7b7eSSam Leffler sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 4656c42a7b7eSSam Leffler } 4657c42a7b7eSSam Leffler 4658c42a7b7eSSam Leffler /* 4659c42a7b7eSSam Leffler * Reclaim all tx queue resources. 4660c42a7b7eSSam Leffler */ 4661c42a7b7eSSam Leffler static void 4662c42a7b7eSSam Leffler ath_tx_cleanup(struct ath_softc *sc) 4663c42a7b7eSSam Leffler { 4664c42a7b7eSSam Leffler int i; 4665c42a7b7eSSam Leffler 4666c42a7b7eSSam Leffler ATH_TXBUF_LOCK_DESTROY(sc); 4667c42a7b7eSSam Leffler for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4668c42a7b7eSSam Leffler if (ATH_TXQ_SETUP(sc, i)) 4669c42a7b7eSSam Leffler ath_tx_cleanupq(sc, &sc->sc_txq[i]); 4670c42a7b7eSSam Leffler } 46715591b213SSam Leffler 467299d258fdSSam Leffler /* 4673ab06fdf2SSam Leffler * Return h/w rate index for an IEEE rate (w/o basic rate bit) 4674ab06fdf2SSam Leffler * using the current rates in sc_rixmap. 46758b5341deSSam Leffler */ 4676b8e788a5SAdrian Chadd int 4677ab06fdf2SSam Leffler ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) 46788b5341deSSam Leffler { 4679ab06fdf2SSam Leffler int rix = sc->sc_rixmap[rate]; 4680ab06fdf2SSam Leffler /* NB: return lowest rix for invalid rate */ 4681ab06fdf2SSam Leffler return (rix == 0xff ? 0 : rix); 46828b5341deSSam Leffler } 46838b5341deSSam Leffler 46849352fb7aSAdrian Chadd static void 46859352fb7aSAdrian Chadd ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts, 46869352fb7aSAdrian Chadd struct ath_buf *bf) 46879352fb7aSAdrian Chadd { 46889352fb7aSAdrian Chadd struct ieee80211_node *ni = bf->bf_node; 46899352fb7aSAdrian Chadd struct ifnet *ifp = sc->sc_ifp; 46909352fb7aSAdrian Chadd struct ieee80211com *ic = ifp->if_l2com; 46919352fb7aSAdrian Chadd int sr, lr, pri; 46929352fb7aSAdrian Chadd 46939352fb7aSAdrian Chadd if (ts->ts_status == 0) { 46949352fb7aSAdrian Chadd u_int8_t txant = ts->ts_antenna; 46959352fb7aSAdrian Chadd sc->sc_stats.ast_ant_tx[txant]++; 46969352fb7aSAdrian Chadd sc->sc_ant_tx[txant]++; 46979352fb7aSAdrian Chadd if (ts->ts_finaltsi != 0) 46989352fb7aSAdrian Chadd sc->sc_stats.ast_tx_altrate++; 46999352fb7aSAdrian Chadd pri = M_WME_GETAC(bf->bf_m); 47009352fb7aSAdrian Chadd if (pri >= WME_AC_VO) 47019352fb7aSAdrian Chadd ic->ic_wme.wme_hipri_traffic++; 47029352fb7aSAdrian Chadd if ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0) 47039352fb7aSAdrian Chadd ni->ni_inact = ni->ni_inact_reload; 47049352fb7aSAdrian Chadd } else { 47059352fb7aSAdrian Chadd if (ts->ts_status & HAL_TXERR_XRETRY) 47069352fb7aSAdrian Chadd sc->sc_stats.ast_tx_xretries++; 47079352fb7aSAdrian Chadd if (ts->ts_status & HAL_TXERR_FIFO) 47089352fb7aSAdrian Chadd sc->sc_stats.ast_tx_fifoerr++; 47099352fb7aSAdrian Chadd if (ts->ts_status & HAL_TXERR_FILT) 47109352fb7aSAdrian Chadd sc->sc_stats.ast_tx_filtered++; 47119352fb7aSAdrian Chadd if (ts->ts_status & HAL_TXERR_XTXOP) 47129352fb7aSAdrian Chadd sc->sc_stats.ast_tx_xtxop++; 47139352fb7aSAdrian Chadd if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) 47149352fb7aSAdrian Chadd sc->sc_stats.ast_tx_timerexpired++; 47159352fb7aSAdrian Chadd 47169352fb7aSAdrian Chadd if (ts->ts_status & HAL_TX_DATA_UNDERRUN) 47179352fb7aSAdrian Chadd sc->sc_stats.ast_tx_data_underrun++; 47189352fb7aSAdrian Chadd if (ts->ts_status & HAL_TX_DELIM_UNDERRUN) 47199352fb7aSAdrian Chadd sc->sc_stats.ast_tx_delim_underrun++; 47209352fb7aSAdrian Chadd 47219352fb7aSAdrian Chadd if (bf->bf_m->m_flags & M_FF) 47229352fb7aSAdrian Chadd sc->sc_stats.ast_ff_txerr++; 47239352fb7aSAdrian Chadd } 47249352fb7aSAdrian Chadd /* XXX when is this valid? */ 47259352fb7aSAdrian Chadd if (ts->ts_status & HAL_TX_DESC_CFG_ERR) 47269352fb7aSAdrian Chadd sc->sc_stats.ast_tx_desccfgerr++; 47279352fb7aSAdrian Chadd 47289352fb7aSAdrian Chadd sr = ts->ts_shortretry; 47299352fb7aSAdrian Chadd lr = ts->ts_longretry; 47309352fb7aSAdrian Chadd sc->sc_stats.ast_tx_shortretry += sr; 47319352fb7aSAdrian Chadd sc->sc_stats.ast_tx_longretry += lr; 47329352fb7aSAdrian Chadd 47339352fb7aSAdrian Chadd } 47349352fb7aSAdrian Chadd 47359352fb7aSAdrian Chadd /* 47369352fb7aSAdrian Chadd * The default completion. If fail is 1, this means 47379352fb7aSAdrian Chadd * "please don't retry the frame, and just return -1 status 47389352fb7aSAdrian Chadd * to the net80211 stack. 47399352fb7aSAdrian Chadd */ 47409352fb7aSAdrian Chadd void 47419352fb7aSAdrian Chadd ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 47429352fb7aSAdrian Chadd { 47439352fb7aSAdrian Chadd struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 47449352fb7aSAdrian Chadd int st; 47459352fb7aSAdrian Chadd 47469352fb7aSAdrian Chadd if (fail == 1) 47479352fb7aSAdrian Chadd st = -1; 47489352fb7aSAdrian Chadd else 47499352fb7aSAdrian Chadd st = ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0) ? 47509352fb7aSAdrian Chadd ts->ts_status : HAL_TXERR_XRETRY; 47519352fb7aSAdrian Chadd 47529352fb7aSAdrian Chadd if (bf->bf_state.bfs_dobaw) 47539352fb7aSAdrian Chadd device_printf(sc->sc_dev, 47549352fb7aSAdrian Chadd "%s: dobaw should've been cleared!\n", __func__); 47559352fb7aSAdrian Chadd if (bf->bf_next != NULL) 47569352fb7aSAdrian Chadd device_printf(sc->sc_dev, 47579352fb7aSAdrian Chadd "%s: bf_next not NULL!\n", __func__); 47589352fb7aSAdrian Chadd 47599352fb7aSAdrian Chadd /* 47609352fb7aSAdrian Chadd * Do any tx complete callback. Note this must 47619352fb7aSAdrian Chadd * be done before releasing the node reference. 47629352fb7aSAdrian Chadd * This will free the mbuf, release the net80211 47639352fb7aSAdrian Chadd * node and recycle the ath_buf. 47649352fb7aSAdrian Chadd */ 47659352fb7aSAdrian Chadd ath_tx_freebuf(sc, bf, st); 47669352fb7aSAdrian Chadd } 47679352fb7aSAdrian Chadd 47689352fb7aSAdrian Chadd /* 4769eb6f0de0SAdrian Chadd * Update rate control with the given completion status. 4770eb6f0de0SAdrian Chadd */ 4771eb6f0de0SAdrian Chadd void 4772eb6f0de0SAdrian Chadd ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 4773eb6f0de0SAdrian Chadd struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, 4774eb6f0de0SAdrian Chadd int nframes, int nbad) 4775eb6f0de0SAdrian Chadd { 4776eb6f0de0SAdrian Chadd struct ath_node *an; 4777eb6f0de0SAdrian Chadd 4778eb6f0de0SAdrian Chadd /* Only for unicast frames */ 4779eb6f0de0SAdrian Chadd if (ni == NULL) 4780eb6f0de0SAdrian Chadd return; 4781eb6f0de0SAdrian Chadd 4782eb6f0de0SAdrian Chadd an = ATH_NODE(ni); 4783eb6f0de0SAdrian Chadd 4784eb6f0de0SAdrian Chadd if ((ts->ts_status & HAL_TXERR_FILT) == 0) { 4785eb6f0de0SAdrian Chadd ATH_NODE_LOCK(an); 4786eb6f0de0SAdrian Chadd ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad); 4787eb6f0de0SAdrian Chadd ATH_NODE_UNLOCK(an); 4788eb6f0de0SAdrian Chadd } 4789eb6f0de0SAdrian Chadd } 4790eb6f0de0SAdrian Chadd 4791eb6f0de0SAdrian Chadd /* 47929352fb7aSAdrian Chadd * Update the busy status of the last frame on the free list. 47939352fb7aSAdrian Chadd * When doing TDMA, the busy flag tracks whether the hardware 47949352fb7aSAdrian Chadd * currently points to this buffer or not, and thus gated DMA 47959352fb7aSAdrian Chadd * may restart by re-reading the last descriptor in this 47969352fb7aSAdrian Chadd * buffer. 47979352fb7aSAdrian Chadd * 47989352fb7aSAdrian Chadd * This should be called in the completion function once one 47999352fb7aSAdrian Chadd * of the buffers has been used. 48009352fb7aSAdrian Chadd */ 48019352fb7aSAdrian Chadd static void 48029352fb7aSAdrian Chadd ath_tx_update_busy(struct ath_softc *sc) 48039352fb7aSAdrian Chadd { 48049352fb7aSAdrian Chadd struct ath_buf *last; 48059352fb7aSAdrian Chadd 48069352fb7aSAdrian Chadd /* 48079352fb7aSAdrian Chadd * Since the last frame may still be marked 48089352fb7aSAdrian Chadd * as ATH_BUF_BUSY, unmark it here before 48099352fb7aSAdrian Chadd * finishing the frame processing. 48109352fb7aSAdrian Chadd * Since we've completed a frame (aggregate 48119352fb7aSAdrian Chadd * or otherwise), the hardware has moved on 48129352fb7aSAdrian Chadd * and is no longer referencing the previous 48139352fb7aSAdrian Chadd * descriptor. 48149352fb7aSAdrian Chadd */ 48159352fb7aSAdrian Chadd ATH_TXBUF_LOCK_ASSERT(sc); 48169352fb7aSAdrian Chadd last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 48179352fb7aSAdrian Chadd if (last != NULL) 48189352fb7aSAdrian Chadd last->bf_flags &= ~ATH_BUF_BUSY; 48199352fb7aSAdrian Chadd } 48209352fb7aSAdrian Chadd 48219352fb7aSAdrian Chadd 482268e8e04eSSam Leffler /* 4823c42a7b7eSSam Leffler * Process completed xmit descriptors from the specified queue. 4824eb6f0de0SAdrian Chadd * Kick the packet scheduler if needed. This can occur from this 4825eb6f0de0SAdrian Chadd * particular task. 4826c42a7b7eSSam Leffler */ 4827d7736e13SSam Leffler static int 482896ff485dSAdrian Chadd ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) 48295591b213SSam Leffler { 48305591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 48319352fb7aSAdrian Chadd struct ath_buf *bf; 48326edf1dc7SAdrian Chadd struct ath_desc *ds; 483365f9edeeSSam Leffler struct ath_tx_status *ts; 48345591b213SSam Leffler struct ieee80211_node *ni; 4835eb6f0de0SAdrian Chadd struct ath_node *an; 48369352fb7aSAdrian Chadd int nacked; 48375591b213SSam Leffler HAL_STATUS status; 48385591b213SSam Leffler 4839c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 4840c42a7b7eSSam Leffler __func__, txq->axq_qnum, 4841c42a7b7eSSam Leffler (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 4842c42a7b7eSSam Leffler txq->axq_link); 4843d7736e13SSam Leffler nacked = 0; 48445591b213SSam Leffler for (;;) { 4845c42a7b7eSSam Leffler ATH_TXQ_LOCK(txq); 4846c42a7b7eSSam Leffler txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 48476b349e5aSAdrian Chadd bf = TAILQ_FIRST(&txq->axq_q); 48485591b213SSam Leffler if (bf == NULL) { 4849c42a7b7eSSam Leffler ATH_TXQ_UNLOCK(txq); 48505591b213SSam Leffler break; 48515591b213SSam Leffler } 48526edf1dc7SAdrian Chadd ds = bf->bf_lastds; /* XXX must be setup correctly! */ 485365f9edeeSSam Leffler ts = &bf->bf_status.ds_txstat; 485465f9edeeSSam Leffler status = ath_hal_txprocdesc(ah, ds, ts); 4855a585a9a1SSam Leffler #ifdef ATH_DEBUG 4856c42a7b7eSSam Leffler if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 48576902009eSSam Leffler ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 48586902009eSSam Leffler status == HAL_OK); 48595591b213SSam Leffler #endif 48605591b213SSam Leffler if (status == HAL_EINPROGRESS) { 4861c42a7b7eSSam Leffler ATH_TXQ_UNLOCK(txq); 48625591b213SSam Leffler break; 48635591b213SSam Leffler } 48646b349e5aSAdrian Chadd ATH_TXQ_REMOVE(txq, bf, bf_list); 4865584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 486610ad9a77SSam Leffler if (txq->axq_depth > 0) { 486710ad9a77SSam Leffler /* 486810ad9a77SSam Leffler * More frames follow. Mark the buffer busy 486910ad9a77SSam Leffler * so it's not re-used while the hardware may 487010ad9a77SSam Leffler * still re-read the link field in the descriptor. 48716edf1dc7SAdrian Chadd * 48726edf1dc7SAdrian Chadd * Use the last buffer in an aggregate as that 48736edf1dc7SAdrian Chadd * is where the hardware may be - intermediate 48746edf1dc7SAdrian Chadd * descriptors won't be "busy". 487510ad9a77SSam Leffler */ 48766edf1dc7SAdrian Chadd bf->bf_last->bf_flags |= ATH_BUF_BUSY; 487710ad9a77SSam Leffler } else 487810ad9a77SSam Leffler #else 4879ebecf802SSam Leffler if (txq->axq_depth == 0) 488010ad9a77SSam Leffler #endif 48811539af1eSSam Leffler txq->axq_link = NULL; 48826edf1dc7SAdrian Chadd if (bf->bf_state.bfs_aggr) 48836edf1dc7SAdrian Chadd txq->axq_aggr_depth--; 48845591b213SSam Leffler 48855591b213SSam Leffler ni = bf->bf_node; 4886c42a7b7eSSam Leffler /* 48879352fb7aSAdrian Chadd * If unicast frame was ack'd update RSSI, 488884784be1SSam Leffler * including the last rx time used to 488984784be1SSam Leffler * workaround phantom bmiss interrupts. 4890d7736e13SSam Leffler */ 48919352fb7aSAdrian Chadd if (ni != NULL && ts->ts_status == 0 && 48929352fb7aSAdrian Chadd ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0)) { 4893d7736e13SSam Leffler nacked++; 489484784be1SSam Leffler sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 489584784be1SSam Leffler ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 489684784be1SSam Leffler ts->ts_rssi); 489784784be1SSam Leffler } 48989352fb7aSAdrian Chadd ATH_TXQ_UNLOCK(txq); 48999352fb7aSAdrian Chadd 49009352fb7aSAdrian Chadd /* If unicast frame, update general statistics */ 49019352fb7aSAdrian Chadd if (ni != NULL) { 4902eb6f0de0SAdrian Chadd an = ATH_NODE(ni); 49039352fb7aSAdrian Chadd /* update statistics */ 49049352fb7aSAdrian Chadd ath_tx_update_stats(sc, ts, bf); 4905d7736e13SSam Leffler } 49069352fb7aSAdrian Chadd 49070a915fadSSam Leffler /* 49089352fb7aSAdrian Chadd * Call the completion handler. 49099352fb7aSAdrian Chadd * The completion handler is responsible for 49109352fb7aSAdrian Chadd * calling the rate control code. 49119352fb7aSAdrian Chadd * 49129352fb7aSAdrian Chadd * Frames with no completion handler get the 49139352fb7aSAdrian Chadd * rate control code called here. 491468e8e04eSSam Leffler */ 49159352fb7aSAdrian Chadd if (bf->bf_comp == NULL) { 49169352fb7aSAdrian Chadd if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 49179352fb7aSAdrian Chadd (bf->bf_txflags & HAL_TXDESC_NOACK) == 0) { 49189352fb7aSAdrian Chadd /* 49199352fb7aSAdrian Chadd * XXX assume this isn't an aggregate 49209352fb7aSAdrian Chadd * frame. 49219352fb7aSAdrian Chadd */ 4922eb6f0de0SAdrian Chadd ath_tx_update_ratectrl(sc, ni, 4923eb6f0de0SAdrian Chadd bf->bf_state.bfs_rc, ts, 4924eb6f0de0SAdrian Chadd bf->bf_state.bfs_pktlen, 1, 4925eb6f0de0SAdrian Chadd (ts->ts_status == 0 ? 0 : 1)); 49265591b213SSam Leffler } 49279352fb7aSAdrian Chadd ath_tx_default_comp(sc, bf, 0); 49289352fb7aSAdrian Chadd } else 49299352fb7aSAdrian Chadd bf->bf_comp(sc, bf, 0); 49305591b213SSam Leffler } 4931339ccfb3SSam Leffler #ifdef IEEE80211_SUPPORT_SUPERG 493268e8e04eSSam Leffler /* 493368e8e04eSSam Leffler * Flush fast-frame staging queue when traffic slows. 493468e8e04eSSam Leffler */ 493568e8e04eSSam Leffler if (txq->axq_depth <= 1) 493604f19fd6SSam Leffler ieee80211_ff_flush(ic, txq->axq_ac); 4937339ccfb3SSam Leffler #endif 4938eb6f0de0SAdrian Chadd 4939eb6f0de0SAdrian Chadd /* Kick the TXQ scheduler */ 4940eb6f0de0SAdrian Chadd if (dosched) { 4941eb6f0de0SAdrian Chadd ATH_TXQ_LOCK(txq); 4942eb6f0de0SAdrian Chadd ath_txq_sched(sc, txq); 4943eb6f0de0SAdrian Chadd ATH_TXQ_UNLOCK(txq); 4944eb6f0de0SAdrian Chadd } 4945eb6f0de0SAdrian Chadd 4946d7736e13SSam Leffler return nacked; 4947d7736e13SSam Leffler } 4948d7736e13SSam Leffler 49498f939e79SAdrian Chadd #define TXQACTIVE(t, q) ( (t) & (1 << (q))) 4950c42a7b7eSSam Leffler 4951c42a7b7eSSam Leffler /* 4952c42a7b7eSSam Leffler * Deferred processing of transmit interrupt; special-cased 4953c42a7b7eSSam Leffler * for a single hardware transmit queue (e.g. 5210 and 5211). 4954c42a7b7eSSam Leffler */ 4955c42a7b7eSSam Leffler static void 4956c42a7b7eSSam Leffler ath_tx_proc_q0(void *arg, int npending) 4957c42a7b7eSSam Leffler { 4958c42a7b7eSSam Leffler struct ath_softc *sc = arg; 4959fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 49608f939e79SAdrian Chadd uint32_t txqs; 4961c42a7b7eSSam Leffler 4962ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 4963ef27340cSAdrian Chadd sc->sc_txproc_cnt++; 49648f939e79SAdrian Chadd txqs = sc->sc_txq_active; 49658f939e79SAdrian Chadd sc->sc_txq_active &= ~txqs; 4966ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 49678f939e79SAdrian Chadd 496896ff485dSAdrian Chadd if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) 49698f939e79SAdrian Chadd /* XXX why is lastrx updated in tx code? */ 4970d7736e13SSam Leffler sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 49718f939e79SAdrian Chadd if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 497296ff485dSAdrian Chadd ath_tx_processq(sc, sc->sc_cabq, 1); 4973ef27340cSAdrian Chadd /* XXX check this inside of IF_LOCK? */ 497413f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 49752e986da5SSam Leffler sc->sc_wd_timer = 0; 49765591b213SSam Leffler 49773e50ec2cSSam Leffler if (sc->sc_softled) 497846d4d74cSSam Leffler ath_led_event(sc, sc->sc_txrix); 49793e50ec2cSSam Leffler 4980ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 4981ef27340cSAdrian Chadd sc->sc_txproc_cnt--; 4982ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 4983ef27340cSAdrian Chadd 49845591b213SSam Leffler ath_start(ifp); 49855591b213SSam Leffler } 49865591b213SSam Leffler 49875591b213SSam Leffler /* 4988c42a7b7eSSam Leffler * Deferred processing of transmit interrupt; special-cased 4989c42a7b7eSSam Leffler * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 49905591b213SSam Leffler */ 49915591b213SSam Leffler static void 4992c42a7b7eSSam Leffler ath_tx_proc_q0123(void *arg, int npending) 4993c42a7b7eSSam Leffler { 4994c42a7b7eSSam Leffler struct ath_softc *sc = arg; 4995fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 4996d7736e13SSam Leffler int nacked; 49978f939e79SAdrian Chadd uint32_t txqs; 49988f939e79SAdrian Chadd 4999ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 5000ef27340cSAdrian Chadd sc->sc_txproc_cnt++; 50018f939e79SAdrian Chadd txqs = sc->sc_txq_active; 50028f939e79SAdrian Chadd sc->sc_txq_active &= ~txqs; 5003ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 5004c42a7b7eSSam Leffler 5005c42a7b7eSSam Leffler /* 5006c42a7b7eSSam Leffler * Process each active queue. 5007c42a7b7eSSam Leffler */ 5008d7736e13SSam Leffler nacked = 0; 50098f939e79SAdrian Chadd if (TXQACTIVE(txqs, 0)) 501096ff485dSAdrian Chadd nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); 50118f939e79SAdrian Chadd if (TXQACTIVE(txqs, 1)) 501296ff485dSAdrian Chadd nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); 50138f939e79SAdrian Chadd if (TXQACTIVE(txqs, 2)) 501496ff485dSAdrian Chadd nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); 50158f939e79SAdrian Chadd if (TXQACTIVE(txqs, 3)) 501696ff485dSAdrian Chadd nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); 50178f939e79SAdrian Chadd if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 501896ff485dSAdrian Chadd ath_tx_processq(sc, sc->sc_cabq, 1); 5019d7736e13SSam Leffler if (nacked) 5020d7736e13SSam Leffler sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 5021c42a7b7eSSam Leffler 5022ef27340cSAdrian Chadd /* XXX check this inside of IF_LOCK? */ 502313f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 50242e986da5SSam Leffler sc->sc_wd_timer = 0; 5025c42a7b7eSSam Leffler 50263e50ec2cSSam Leffler if (sc->sc_softled) 502746d4d74cSSam Leffler ath_led_event(sc, sc->sc_txrix); 50283e50ec2cSSam Leffler 5029ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 5030ef27340cSAdrian Chadd sc->sc_txproc_cnt--; 5031ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 5032ef27340cSAdrian Chadd 5033c42a7b7eSSam Leffler ath_start(ifp); 5034c42a7b7eSSam Leffler } 5035c42a7b7eSSam Leffler 5036c42a7b7eSSam Leffler /* 5037c42a7b7eSSam Leffler * Deferred processing of transmit interrupt. 5038c42a7b7eSSam Leffler */ 5039c42a7b7eSSam Leffler static void 5040c42a7b7eSSam Leffler ath_tx_proc(void *arg, int npending) 5041c42a7b7eSSam Leffler { 5042c42a7b7eSSam Leffler struct ath_softc *sc = arg; 5043fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 5044d7736e13SSam Leffler int i, nacked; 50458f939e79SAdrian Chadd uint32_t txqs; 50468f939e79SAdrian Chadd 5047ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 5048ef27340cSAdrian Chadd sc->sc_txproc_cnt++; 50498f939e79SAdrian Chadd txqs = sc->sc_txq_active; 50508f939e79SAdrian Chadd sc->sc_txq_active &= ~txqs; 5051ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 5052c42a7b7eSSam Leffler 5053c42a7b7eSSam Leffler /* 5054c42a7b7eSSam Leffler * Process each active queue. 5055c42a7b7eSSam Leffler */ 5056d7736e13SSam Leffler nacked = 0; 5057c42a7b7eSSam Leffler for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 50588f939e79SAdrian Chadd if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) 505996ff485dSAdrian Chadd nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); 5060d7736e13SSam Leffler if (nacked) 5061d7736e13SSam Leffler sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 5062c42a7b7eSSam Leffler 5063ef27340cSAdrian Chadd /* XXX check this inside of IF_LOCK? */ 506413f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 50652e986da5SSam Leffler sc->sc_wd_timer = 0; 5066c42a7b7eSSam Leffler 50673e50ec2cSSam Leffler if (sc->sc_softled) 506846d4d74cSSam Leffler ath_led_event(sc, sc->sc_txrix); 50693e50ec2cSSam Leffler 5070ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 5071ef27340cSAdrian Chadd sc->sc_txproc_cnt--; 5072ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 5073ef27340cSAdrian Chadd 5074c42a7b7eSSam Leffler ath_start(ifp); 5075c42a7b7eSSam Leffler } 507616d4de92SAdrian Chadd #undef TXQACTIVE 5077c42a7b7eSSam Leffler 50789352fb7aSAdrian Chadd /* 50799352fb7aSAdrian Chadd * Return a buffer to the pool and update the 'busy' flag on the 50809352fb7aSAdrian Chadd * previous 'tail' entry. 50819352fb7aSAdrian Chadd * 50829352fb7aSAdrian Chadd * This _must_ only be called when the buffer is involved in a completed 50839352fb7aSAdrian Chadd * TX. The logic is that if it was part of an active TX, the previous 50849352fb7aSAdrian Chadd * buffer on the list is now not involved in a halted TX DMA queue, waiting 50859352fb7aSAdrian Chadd * for restart (eg for TDMA.) 50869352fb7aSAdrian Chadd * 50879352fb7aSAdrian Chadd * The caller must free the mbuf and recycle the node reference. 50889352fb7aSAdrian Chadd */ 50899352fb7aSAdrian Chadd void 50909352fb7aSAdrian Chadd ath_freebuf(struct ath_softc *sc, struct ath_buf *bf) 50919352fb7aSAdrian Chadd { 50929352fb7aSAdrian Chadd bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 50939352fb7aSAdrian Chadd bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); 50949352fb7aSAdrian Chadd 50959352fb7aSAdrian Chadd KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__)); 50969352fb7aSAdrian Chadd KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__)); 50979352fb7aSAdrian Chadd 50989352fb7aSAdrian Chadd ATH_TXBUF_LOCK(sc); 50999352fb7aSAdrian Chadd ath_tx_update_busy(sc); 51009352fb7aSAdrian Chadd TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 51019352fb7aSAdrian Chadd ATH_TXBUF_UNLOCK(sc); 51029352fb7aSAdrian Chadd } 51039352fb7aSAdrian Chadd 51049352fb7aSAdrian Chadd /* 51059352fb7aSAdrian Chadd * This is currently used by ath_tx_draintxq() and 51069352fb7aSAdrian Chadd * ath_tx_tid_free_pkts(). 51079352fb7aSAdrian Chadd * 51089352fb7aSAdrian Chadd * It recycles a single ath_buf. 51099352fb7aSAdrian Chadd */ 51109352fb7aSAdrian Chadd void 51119352fb7aSAdrian Chadd ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status) 51129352fb7aSAdrian Chadd { 51139352fb7aSAdrian Chadd struct ieee80211_node *ni = bf->bf_node; 51149352fb7aSAdrian Chadd struct mbuf *m0 = bf->bf_m; 51159352fb7aSAdrian Chadd 51169352fb7aSAdrian Chadd bf->bf_node = NULL; 51179352fb7aSAdrian Chadd bf->bf_m = NULL; 51189352fb7aSAdrian Chadd 51199352fb7aSAdrian Chadd /* Free the buffer, it's not needed any longer */ 51209352fb7aSAdrian Chadd ath_freebuf(sc, bf); 51219352fb7aSAdrian Chadd 51229352fb7aSAdrian Chadd if (ni != NULL) { 51239352fb7aSAdrian Chadd /* 51249352fb7aSAdrian Chadd * Do any callback and reclaim the node reference. 51259352fb7aSAdrian Chadd */ 51269352fb7aSAdrian Chadd if (m0->m_flags & M_TXCB) 51279352fb7aSAdrian Chadd ieee80211_process_callback(ni, m0, status); 51289352fb7aSAdrian Chadd ieee80211_free_node(ni); 51299352fb7aSAdrian Chadd } 51309352fb7aSAdrian Chadd m_freem(m0); 51319352fb7aSAdrian Chadd 51329352fb7aSAdrian Chadd /* 51339352fb7aSAdrian Chadd * XXX the buffer used to be freed -after-, but the DMA map was 51349352fb7aSAdrian Chadd * freed where ath_freebuf() now is. I've no idea what this 51359352fb7aSAdrian Chadd * will do. 51369352fb7aSAdrian Chadd */ 51379352fb7aSAdrian Chadd } 51389352fb7aSAdrian Chadd 51399352fb7aSAdrian Chadd void 5140c42a7b7eSSam Leffler ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 51415591b213SSam Leffler { 5142a585a9a1SSam Leffler #ifdef ATH_DEBUG 51435591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 5144d2f6ed15SSam Leffler #endif 51455591b213SSam Leffler struct ath_buf *bf; 51467a4c5ed9SSam Leffler u_int ix; 51475591b213SSam Leffler 5148c42a7b7eSSam Leffler /* 5149c42a7b7eSSam Leffler * NB: this assumes output has been stopped and 51505d61b5e8SSam Leffler * we do not need to block ath_tx_proc 5151c42a7b7eSSam Leffler */ 515210ad9a77SSam Leffler ATH_TXBUF_LOCK(sc); 51536b349e5aSAdrian Chadd bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 515410ad9a77SSam Leffler if (bf != NULL) 515510ad9a77SSam Leffler bf->bf_flags &= ~ATH_BUF_BUSY; 515610ad9a77SSam Leffler ATH_TXBUF_UNLOCK(sc); 51579352fb7aSAdrian Chadd 51587a4c5ed9SSam Leffler for (ix = 0;; ix++) { 5159c42a7b7eSSam Leffler ATH_TXQ_LOCK(txq); 51606b349e5aSAdrian Chadd bf = TAILQ_FIRST(&txq->axq_q); 51615591b213SSam Leffler if (bf == NULL) { 5162ebecf802SSam Leffler txq->axq_link = NULL; 5163c42a7b7eSSam Leffler ATH_TXQ_UNLOCK(txq); 51645591b213SSam Leffler break; 51655591b213SSam Leffler } 51666b349e5aSAdrian Chadd ATH_TXQ_REMOVE(txq, bf, bf_list); 51676edf1dc7SAdrian Chadd if (bf->bf_state.bfs_aggr) 51686edf1dc7SAdrian Chadd txq->axq_aggr_depth--; 5169a585a9a1SSam Leffler #ifdef ATH_DEBUG 51704a3ac3fcSSam Leffler if (sc->sc_debug & ATH_DEBUG_RESET) { 5171b032f27cSSam Leffler struct ieee80211com *ic = sc->sc_ifp->if_l2com; 5172b032f27cSSam Leffler 51736902009eSSam Leffler ath_printtxbuf(sc, bf, txq->axq_qnum, ix, 51746edf1dc7SAdrian Chadd ath_hal_txprocdesc(ah, bf->bf_lastds, 517565f9edeeSSam Leffler &bf->bf_status.ds_txstat) == HAL_OK); 5176e40b6ab1SSam Leffler ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), 51774a3ac3fcSSam Leffler bf->bf_m->m_len, 0, -1); 51784a3ac3fcSSam Leffler } 5179a585a9a1SSam Leffler #endif /* ATH_DEBUG */ 518023428eafSSam Leffler /* 51819352fb7aSAdrian Chadd * Since we're now doing magic in the completion 51829352fb7aSAdrian Chadd * functions, we -must- call it for aggregation 51839352fb7aSAdrian Chadd * destinations or BAW tracking will get upset. 518423428eafSSam Leffler */ 51859352fb7aSAdrian Chadd /* 51869352fb7aSAdrian Chadd * Clear ATH_BUF_BUSY; the completion handler 51879352fb7aSAdrian Chadd * will free the buffer. 51889352fb7aSAdrian Chadd */ 51899352fb7aSAdrian Chadd ATH_TXQ_UNLOCK(txq); 519010ad9a77SSam Leffler bf->bf_flags &= ~ATH_BUF_BUSY; 51919352fb7aSAdrian Chadd if (bf->bf_comp) 51929352fb7aSAdrian Chadd bf->bf_comp(sc, bf, 1); 51939352fb7aSAdrian Chadd else 51949352fb7aSAdrian Chadd ath_tx_default_comp(sc, bf, 1); 51955591b213SSam Leffler } 51969352fb7aSAdrian Chadd 5197eb6f0de0SAdrian Chadd /* 5198eb6f0de0SAdrian Chadd * Drain software queued frames which are on 5199eb6f0de0SAdrian Chadd * active TIDs. 5200eb6f0de0SAdrian Chadd */ 5201eb6f0de0SAdrian Chadd ath_tx_txq_drain(sc, txq); 5202c42a7b7eSSam Leffler } 5203c42a7b7eSSam Leffler 5204c42a7b7eSSam Leffler static void 5205c42a7b7eSSam Leffler ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 5206c42a7b7eSSam Leffler { 5207c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 5208c42a7b7eSSam Leffler 5209c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 5210c42a7b7eSSam Leffler __func__, txq->axq_qnum, 52116891c875SPeter Wemm (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 52126891c875SPeter Wemm txq->axq_link); 52134a3ac3fcSSam Leffler (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 5214c42a7b7eSSam Leffler } 5215c42a7b7eSSam Leffler 52162d433424SAdrian Chadd static int 52172d433424SAdrian Chadd ath_stoptxdma(struct ath_softc *sc) 5218c42a7b7eSSam Leffler { 5219c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 5220c42a7b7eSSam Leffler int i; 5221c42a7b7eSSam Leffler 5222c42a7b7eSSam Leffler /* XXX return value */ 52232d433424SAdrian Chadd if (sc->sc_invalid) 52242d433424SAdrian Chadd return 0; 52252d433424SAdrian Chadd 5226c42a7b7eSSam Leffler if (!sc->sc_invalid) { 5227c42a7b7eSSam Leffler /* don't touch the hardware if marked invalid */ 52284a3ac3fcSSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 52294a3ac3fcSSam Leffler __func__, sc->sc_bhalq, 52304a3ac3fcSSam Leffler (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 52314a3ac3fcSSam Leffler NULL); 5232c42a7b7eSSam Leffler (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 5233c42a7b7eSSam Leffler for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 5234c42a7b7eSSam Leffler if (ATH_TXQ_SETUP(sc, i)) 5235c42a7b7eSSam Leffler ath_tx_stopdma(sc, &sc->sc_txq[i]); 5236c42a7b7eSSam Leffler } 52372d433424SAdrian Chadd 52382d433424SAdrian Chadd return 1; 52392d433424SAdrian Chadd } 52402d433424SAdrian Chadd 52412d433424SAdrian Chadd /* 52422d433424SAdrian Chadd * Drain the transmit queues and reclaim resources. 52432d433424SAdrian Chadd */ 52442d433424SAdrian Chadd static void 52452d433424SAdrian Chadd ath_draintxq(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 52462d433424SAdrian Chadd { 52472d433424SAdrian Chadd #ifdef ATH_DEBUG 52482d433424SAdrian Chadd struct ath_hal *ah = sc->sc_ah; 52492d433424SAdrian Chadd #endif 52502d433424SAdrian Chadd struct ifnet *ifp = sc->sc_ifp; 52512d433424SAdrian Chadd int i; 52522d433424SAdrian Chadd 52532d433424SAdrian Chadd (void) ath_stoptxdma(sc); 52542d433424SAdrian Chadd 5255ef27340cSAdrian Chadd for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 5256ef27340cSAdrian Chadd /* 5257ef27340cSAdrian Chadd * XXX TODO: should we just handle the completed TX frames 5258ef27340cSAdrian Chadd * here, whether or not the reset is a full one or not? 5259ef27340cSAdrian Chadd */ 5260ef27340cSAdrian Chadd if (ATH_TXQ_SETUP(sc, i)) { 5261ef27340cSAdrian Chadd if (reset_type == ATH_RESET_NOLOSS) 5262ef27340cSAdrian Chadd ath_tx_processq(sc, &sc->sc_txq[i], 0); 5263ef27340cSAdrian Chadd else 5264c42a7b7eSSam Leffler ath_tx_draintxq(sc, &sc->sc_txq[i]); 5265ef27340cSAdrian Chadd } 5266ef27340cSAdrian Chadd } 52674a3ac3fcSSam Leffler #ifdef ATH_DEBUG 52684a3ac3fcSSam Leffler if (sc->sc_debug & ATH_DEBUG_RESET) { 52696b349e5aSAdrian Chadd struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); 52704a3ac3fcSSam Leffler if (bf != NULL && bf->bf_m != NULL) { 52716902009eSSam Leffler ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, 52726edf1dc7SAdrian Chadd ath_hal_txprocdesc(ah, bf->bf_lastds, 527365f9edeeSSam Leffler &bf->bf_status.ds_txstat) == HAL_OK); 5274e40b6ab1SSam Leffler ieee80211_dump_pkt(ifp->if_l2com, 5275e40b6ab1SSam Leffler mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 5276e40b6ab1SSam Leffler 0, -1); 52774a3ac3fcSSam Leffler } 52784a3ac3fcSSam Leffler } 52794a3ac3fcSSam Leffler #endif /* ATH_DEBUG */ 5280ef27340cSAdrian Chadd /* XXX check this inside of IF_LOCK? */ 528113f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 52822e986da5SSam Leffler sc->sc_wd_timer = 0; 52835591b213SSam Leffler } 52845591b213SSam Leffler 52855591b213SSam Leffler /* 52865591b213SSam Leffler * Disable the receive h/w in preparation for a reset. 52875591b213SSam Leffler */ 52885591b213SSam Leffler static void 52899a842e8bSAdrian Chadd ath_stoprecv(struct ath_softc *sc, int dodelay) 52905591b213SSam Leffler { 52918cec0ab9SSam Leffler #define PA2DESC(_sc, _pa) \ 5292c42a7b7eSSam Leffler ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 5293c42a7b7eSSam Leffler ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 52945591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 52955591b213SSam Leffler 52965591b213SSam Leffler ath_hal_stoppcurecv(ah); /* disable PCU */ 52975591b213SSam Leffler ath_hal_setrxfilter(ah, 0); /* clear recv filter */ 52985591b213SSam Leffler ath_hal_stopdmarecv(ah); /* disable DMA engine */ 52999a842e8bSAdrian Chadd if (dodelay) 5300c42a7b7eSSam Leffler DELAY(3000); /* 3ms is long enough for 1 frame */ 5301a585a9a1SSam Leffler #ifdef ATH_DEBUG 5302c42a7b7eSSam Leffler if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) { 53035591b213SSam Leffler struct ath_buf *bf; 53047a4c5ed9SSam Leffler u_int ix; 53055591b213SSam Leffler 5306e325e530SSam Leffler printf("%s: rx queue %p, link %p\n", __func__, 530730310634SPeter Wemm (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink); 53087a4c5ed9SSam Leffler ix = 0; 53096b349e5aSAdrian Chadd TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 53108cec0ab9SSam Leffler struct ath_desc *ds = bf->bf_desc; 531165f9edeeSSam Leffler struct ath_rx_status *rs = &bf->bf_status.ds_rxstat; 5312c42a7b7eSSam Leffler HAL_STATUS status = ath_hal_rxprocdesc(ah, ds, 531365f9edeeSSam Leffler bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 5314c42a7b7eSSam Leffler if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL)) 53156902009eSSam Leffler ath_printrxbuf(sc, bf, ix, status == HAL_OK); 53167a4c5ed9SSam Leffler ix++; 53175591b213SSam Leffler } 53185591b213SSam Leffler } 53195591b213SSam Leffler #endif 532068e8e04eSSam Leffler if (sc->sc_rxpending != NULL) { 532168e8e04eSSam Leffler m_freem(sc->sc_rxpending); 532268e8e04eSSam Leffler sc->sc_rxpending = NULL; 532368e8e04eSSam Leffler } 53245591b213SSam Leffler sc->sc_rxlink = NULL; /* just in case */ 53258cec0ab9SSam Leffler #undef PA2DESC 53265591b213SSam Leffler } 53275591b213SSam Leffler 53285591b213SSam Leffler /* 53295591b213SSam Leffler * Enable the receive h/w following a reset. 53305591b213SSam Leffler */ 53315591b213SSam Leffler static int 53325591b213SSam Leffler ath_startrecv(struct ath_softc *sc) 53335591b213SSam Leffler { 53345591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 53355591b213SSam Leffler struct ath_buf *bf; 53365591b213SSam Leffler 53375591b213SSam Leffler sc->sc_rxlink = NULL; 533868e8e04eSSam Leffler sc->sc_rxpending = NULL; 53396b349e5aSAdrian Chadd TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 53405591b213SSam Leffler int error = ath_rxbuf_init(sc, bf); 53415591b213SSam Leffler if (error != 0) { 5342c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_RECV, 5343c42a7b7eSSam Leffler "%s: ath_rxbuf_init failed %d\n", 5344c42a7b7eSSam Leffler __func__, error); 53455591b213SSam Leffler return error; 53465591b213SSam Leffler } 53475591b213SSam Leffler } 53485591b213SSam Leffler 53496b349e5aSAdrian Chadd bf = TAILQ_FIRST(&sc->sc_rxbuf); 53505591b213SSam Leffler ath_hal_putrxbuf(ah, bf->bf_daddr); 53515591b213SSam Leffler ath_hal_rxena(ah); /* enable recv descriptors */ 53525591b213SSam Leffler ath_mode_init(sc); /* set filters, etc. */ 53535591b213SSam Leffler ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 53545591b213SSam Leffler return 0; 53555591b213SSam Leffler } 53565591b213SSam Leffler 53575591b213SSam Leffler /* 5358c42a7b7eSSam Leffler * Update internal state after a channel change. 5359c42a7b7eSSam Leffler */ 5360c42a7b7eSSam Leffler static void 5361c42a7b7eSSam Leffler ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 5362c42a7b7eSSam Leffler { 5363c42a7b7eSSam Leffler enum ieee80211_phymode mode; 5364c42a7b7eSSam Leffler 5365c42a7b7eSSam Leffler /* 5366c42a7b7eSSam Leffler * Change channels and update the h/w rate map 5367c42a7b7eSSam Leffler * if we're switching; e.g. 11a to 11b/g. 5368c42a7b7eSSam Leffler */ 536968e8e04eSSam Leffler mode = ieee80211_chan2mode(chan); 5370c42a7b7eSSam Leffler if (mode != sc->sc_curmode) 5371c42a7b7eSSam Leffler ath_setcurmode(sc, mode); 537259efa8b5SSam Leffler sc->sc_curchan = chan; 5373c42a7b7eSSam Leffler } 5374c42a7b7eSSam Leffler 5375c42a7b7eSSam Leffler /* 53765591b213SSam Leffler * Set/change channels. If the channel is really being changed, 53774fa8d4efSDaniel Eischen * it's done by resetting the chip. To accomplish this we must 53785591b213SSam Leffler * first cleanup any pending DMA, then restart stuff after a la 53795591b213SSam Leffler * ath_init. 53805591b213SSam Leffler */ 53815591b213SSam Leffler static int 53825591b213SSam Leffler ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 53835591b213SSam Leffler { 5384b032f27cSSam Leffler struct ifnet *ifp = sc->sc_ifp; 5385b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 53865591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 5387ef27340cSAdrian Chadd int ret = 0; 5388ef27340cSAdrian Chadd 5389ef27340cSAdrian Chadd /* Treat this as an interface reset */ 5390ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 5391e78719adSAdrian Chadd ath_hal_intrset(ah, 0); /* Stop new RX/TX completion */ 5392e78719adSAdrian Chadd ath_txrx_stop_locked(sc); /* Stop pending RX/TX completion */ 5393ee321975SAdrian Chadd if (ath_reset_grablock(sc, 1) == 0) { 5394ee321975SAdrian Chadd device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 5395ef27340cSAdrian Chadd __func__); 5396ee321975SAdrian Chadd } 5397ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 5398c42a7b7eSSam Leffler 539959efa8b5SSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", 540059efa8b5SSam Leffler __func__, ieee80211_chan2ieee(ic, chan), 540159efa8b5SSam Leffler chan->ic_freq, chan->ic_flags); 540259efa8b5SSam Leffler if (chan != sc->sc_curchan) { 5403c42a7b7eSSam Leffler HAL_STATUS status; 54045591b213SSam Leffler /* 54055591b213SSam Leffler * To switch channels clear any pending DMA operations; 54065591b213SSam Leffler * wait long enough for the RX fifo to drain, reset the 54075591b213SSam Leffler * hardware at the new frequency, and then re-enable 54085591b213SSam Leffler * the relevant bits of the h/w. 54095591b213SSam Leffler */ 5410ef27340cSAdrian Chadd #if 0 54115591b213SSam Leffler ath_hal_intrset(ah, 0); /* disable interrupts */ 5412ef27340cSAdrian Chadd #endif 54139a842e8bSAdrian Chadd ath_stoprecv(sc, 1); /* turn off frame recv */ 54149a842e8bSAdrian Chadd /* 54159a842e8bSAdrian Chadd * First, handle completed TX/RX frames. 54169a842e8bSAdrian Chadd */ 54179a842e8bSAdrian Chadd ath_rx_proc(sc, 0); 54189a842e8bSAdrian Chadd ath_draintxq(sc, ATH_RESET_NOLOSS); 54199a842e8bSAdrian Chadd /* 54209a842e8bSAdrian Chadd * Next, flush the non-scheduled frames. 54219a842e8bSAdrian Chadd */ 5422517526efSAdrian Chadd ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */ 54239a842e8bSAdrian Chadd 542459efa8b5SSam Leffler if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) { 5425b032f27cSSam Leffler if_printf(ifp, "%s: unable to reset " 542679649302SGavin Atkinson "channel %u (%u MHz, flags 0x%x), hal status %u\n", 542759efa8b5SSam Leffler __func__, ieee80211_chan2ieee(ic, chan), 542859efa8b5SSam Leffler chan->ic_freq, chan->ic_flags, status); 5429ef27340cSAdrian Chadd ret = EIO; 5430ef27340cSAdrian Chadd goto finish; 54315591b213SSam Leffler } 5432c59005e9SSam Leffler sc->sc_diversity = ath_hal_getdiversity(ah); 5433c42a7b7eSSam Leffler 543448237774SAdrian Chadd /* Let DFS at it in case it's a DFS channel */ 543548237774SAdrian Chadd ath_dfs_radar_enable(sc, ic->ic_curchan); 543648237774SAdrian Chadd 54375591b213SSam Leffler /* 54385591b213SSam Leffler * Re-enable rx framework. 54395591b213SSam Leffler */ 54405591b213SSam Leffler if (ath_startrecv(sc) != 0) { 5441b032f27cSSam Leffler if_printf(ifp, "%s: unable to restart recv logic\n", 5442b032f27cSSam Leffler __func__); 5443ef27340cSAdrian Chadd ret = EIO; 5444ef27340cSAdrian Chadd goto finish; 54455591b213SSam Leffler } 54465591b213SSam Leffler 54475591b213SSam Leffler /* 54485591b213SSam Leffler * Change channels and update the h/w rate map 54495591b213SSam Leffler * if we're switching; e.g. 11a to 11b/g. 54505591b213SSam Leffler */ 5451c42a7b7eSSam Leffler ath_chan_change(sc, chan); 54520a915fadSSam Leffler 54530a915fadSSam Leffler /* 54542fd9aabbSAdrian Chadd * Reset clears the beacon timers; reset them 54552fd9aabbSAdrian Chadd * here if needed. 54562fd9aabbSAdrian Chadd */ 54572fd9aabbSAdrian Chadd if (sc->sc_beacons) { /* restart beacons */ 54582fd9aabbSAdrian Chadd #ifdef IEEE80211_SUPPORT_TDMA 54592fd9aabbSAdrian Chadd if (sc->sc_tdma) 54602fd9aabbSAdrian Chadd ath_tdma_config(sc, NULL); 54612fd9aabbSAdrian Chadd else 54622fd9aabbSAdrian Chadd #endif 54632fd9aabbSAdrian Chadd ath_beacon_config(sc, NULL); 54642fd9aabbSAdrian Chadd } 54652fd9aabbSAdrian Chadd 54662fd9aabbSAdrian Chadd /* 54670a915fadSSam Leffler * Re-enable interrupts. 54680a915fadSSam Leffler */ 5469e78719adSAdrian Chadd #if 0 54700a915fadSSam Leffler ath_hal_intrset(ah, sc->sc_imask); 5471ef27340cSAdrian Chadd #endif 54725591b213SSam Leffler } 5473ef27340cSAdrian Chadd 5474ef27340cSAdrian Chadd finish: 5475ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 5476ef27340cSAdrian Chadd sc->sc_inreset_cnt--; 5477ef27340cSAdrian Chadd /* XXX only do this if sc_inreset_cnt == 0? */ 5478ef27340cSAdrian Chadd ath_hal_intrset(ah, sc->sc_imask); 5479ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 5480ef27340cSAdrian Chadd 5481ef27340cSAdrian Chadd /* XXX do this inside of IF_LOCK? */ 5482ef27340cSAdrian Chadd ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5483ef27340cSAdrian Chadd ath_txrx_start(sc); 5484ef27340cSAdrian Chadd /* XXX ath_start? */ 5485ef27340cSAdrian Chadd 5486ef27340cSAdrian Chadd return ret; 54875591b213SSam Leffler } 54885591b213SSam Leffler 54895591b213SSam Leffler /* 54905591b213SSam Leffler * Periodically recalibrate the PHY to account 54915591b213SSam Leffler * for temperature/environment changes. 54925591b213SSam Leffler */ 54935591b213SSam Leffler static void 54945591b213SSam Leffler ath_calibrate(void *arg) 54955591b213SSam Leffler { 54965591b213SSam Leffler struct ath_softc *sc = arg; 54975591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 54982dc7fcc4SSam Leffler struct ifnet *ifp = sc->sc_ifp; 54998d91de92SSam Leffler struct ieee80211com *ic = ifp->if_l2com; 55002dc7fcc4SSam Leffler HAL_BOOL longCal, isCalDone; 5501a108ab63SAdrian Chadd HAL_BOOL aniCal, shortCal = AH_FALSE; 55022dc7fcc4SSam Leffler int nextcal; 55035591b213SSam Leffler 55048d91de92SSam Leffler if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ 55058d91de92SSam Leffler goto restart; 55062dc7fcc4SSam Leffler longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); 5507a108ab63SAdrian Chadd aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); 5508a108ab63SAdrian Chadd if (sc->sc_doresetcal) 5509a108ab63SAdrian Chadd shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); 5510a108ab63SAdrian Chadd 5511a108ab63SAdrian Chadd DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); 5512a108ab63SAdrian Chadd if (aniCal) { 5513a108ab63SAdrian Chadd sc->sc_stats.ast_ani_cal++; 5514a108ab63SAdrian Chadd sc->sc_lastani = ticks; 5515a108ab63SAdrian Chadd ath_hal_ani_poll(ah, sc->sc_curchan); 5516a108ab63SAdrian Chadd } 5517a108ab63SAdrian Chadd 55182dc7fcc4SSam Leffler if (longCal) { 55195591b213SSam Leffler sc->sc_stats.ast_per_cal++; 55208197f57eSAdrian Chadd sc->sc_lastlongcal = ticks; 55215591b213SSam Leffler if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 55225591b213SSam Leffler /* 55235591b213SSam Leffler * Rfgain is out of bounds, reset the chip 55245591b213SSam Leffler * to load new gain values. 55255591b213SSam Leffler */ 5526370572d9SSam Leffler DPRINTF(sc, ATH_DEBUG_CALIBRATE, 5527370572d9SSam Leffler "%s: rfgain change\n", __func__); 55285591b213SSam Leffler sc->sc_stats.ast_per_rfgain++; 5529ef27340cSAdrian Chadd /* 5530ef27340cSAdrian Chadd * Drop lock - we can't hold it across the 5531ef27340cSAdrian Chadd * ath_reset() call. Instead, we'll drop 5532ef27340cSAdrian Chadd * out here, do a reset, then reschedule 5533ef27340cSAdrian Chadd * the callout. 5534ef27340cSAdrian Chadd */ 5535ef27340cSAdrian Chadd callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 5536ef27340cSAdrian Chadd sc->sc_resetcal = 0; 5537ef27340cSAdrian Chadd sc->sc_doresetcal = AH_TRUE; 5538ef27340cSAdrian Chadd ATH_UNLOCK(sc); 5539517526efSAdrian Chadd ath_reset(ifp, ATH_RESET_NOLOSS); 55400fbe75a1SAdrian Chadd ATH_LOCK(sc); 5541ef27340cSAdrian Chadd return; 55425591b213SSam Leffler } 55432dc7fcc4SSam Leffler /* 55442dc7fcc4SSam Leffler * If this long cal is after an idle period, then 55452dc7fcc4SSam Leffler * reset the data collection state so we start fresh. 55462dc7fcc4SSam Leffler */ 55472dc7fcc4SSam Leffler if (sc->sc_resetcal) { 554859efa8b5SSam Leffler (void) ath_hal_calreset(ah, sc->sc_curchan); 55492dc7fcc4SSam Leffler sc->sc_lastcalreset = ticks; 5550a108ab63SAdrian Chadd sc->sc_lastshortcal = ticks; 55512dc7fcc4SSam Leffler sc->sc_resetcal = 0; 5552a108ab63SAdrian Chadd sc->sc_doresetcal = AH_TRUE; 55532dc7fcc4SSam Leffler } 55542dc7fcc4SSam Leffler } 5555a108ab63SAdrian Chadd 5556a108ab63SAdrian Chadd /* Only call if we're doing a short/long cal, not for ANI calibration */ 5557a108ab63SAdrian Chadd if (shortCal || longCal) { 555859efa8b5SSam Leffler if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { 55592dc7fcc4SSam Leffler if (longCal) { 55602dc7fcc4SSam Leffler /* 55612dc7fcc4SSam Leffler * Calibrate noise floor data again in case of change. 55622dc7fcc4SSam Leffler */ 55632dc7fcc4SSam Leffler ath_hal_process_noisefloor(ah); 55642dc7fcc4SSam Leffler } 55652dc7fcc4SSam Leffler } else { 5566c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, 5567c42a7b7eSSam Leffler "%s: calibration of channel %u failed\n", 556859efa8b5SSam Leffler __func__, sc->sc_curchan->ic_freq); 55695591b213SSam Leffler sc->sc_stats.ast_per_calfail++; 55705591b213SSam Leffler } 5571a108ab63SAdrian Chadd if (shortCal) 5572a108ab63SAdrian Chadd sc->sc_lastshortcal = ticks; 5573a108ab63SAdrian Chadd } 55742dc7fcc4SSam Leffler if (!isCalDone) { 55758d91de92SSam Leffler restart: 55767b0c77ecSSam Leffler /* 55772dc7fcc4SSam Leffler * Use a shorter interval to potentially collect multiple 55782dc7fcc4SSam Leffler * data samples required to complete calibration. Once 55792dc7fcc4SSam Leffler * we're told the work is done we drop back to a longer 55802dc7fcc4SSam Leffler * interval between requests. We're more aggressive doing 55812dc7fcc4SSam Leffler * work when operating as an AP to improve operation right 55822dc7fcc4SSam Leffler * after startup. 55837b0c77ecSSam Leffler */ 5584a108ab63SAdrian Chadd sc->sc_lastshortcal = ticks; 5585a108ab63SAdrian Chadd nextcal = ath_shortcalinterval*hz/1000; 55862dc7fcc4SSam Leffler if (sc->sc_opmode != HAL_M_HOSTAP) 55872dc7fcc4SSam Leffler nextcal *= 10; 5588a108ab63SAdrian Chadd sc->sc_doresetcal = AH_TRUE; 55892dc7fcc4SSam Leffler } else { 5590a108ab63SAdrian Chadd /* nextcal should be the shortest time for next event */ 55912dc7fcc4SSam Leffler nextcal = ath_longcalinterval*hz; 55922dc7fcc4SSam Leffler if (sc->sc_lastcalreset == 0) 55932dc7fcc4SSam Leffler sc->sc_lastcalreset = sc->sc_lastlongcal; 55942dc7fcc4SSam Leffler else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) 55952dc7fcc4SSam Leffler sc->sc_resetcal = 1; /* setup reset next trip */ 5596a108ab63SAdrian Chadd sc->sc_doresetcal = AH_FALSE; 5597bd5a9920SSam Leffler } 5598a108ab63SAdrian Chadd /* ANI calibration may occur more often than short/long/resetcal */ 5599a108ab63SAdrian Chadd if (ath_anicalinterval > 0) 5600a108ab63SAdrian Chadd nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); 5601bd5a9920SSam Leffler 56022dc7fcc4SSam Leffler if (nextcal != 0) { 56032dc7fcc4SSam Leffler DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", 56042dc7fcc4SSam Leffler __func__, nextcal, isCalDone ? "" : "!"); 56052dc7fcc4SSam Leffler callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); 56062dc7fcc4SSam Leffler } else { 56072dc7fcc4SSam Leffler DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", 56082dc7fcc4SSam Leffler __func__); 56092dc7fcc4SSam Leffler /* NB: don't rearm timer */ 56102dc7fcc4SSam Leffler } 56115591b213SSam Leffler } 56125591b213SSam Leffler 561368e8e04eSSam Leffler static void 561468e8e04eSSam Leffler ath_scan_start(struct ieee80211com *ic) 561568e8e04eSSam Leffler { 561668e8e04eSSam Leffler struct ifnet *ifp = ic->ic_ifp; 561768e8e04eSSam Leffler struct ath_softc *sc = ifp->if_softc; 561868e8e04eSSam Leffler struct ath_hal *ah = sc->sc_ah; 561968e8e04eSSam Leffler u_int32_t rfilt; 562068e8e04eSSam Leffler 562168e8e04eSSam Leffler /* XXX calibration timer? */ 562268e8e04eSSam Leffler 562368e8e04eSSam Leffler sc->sc_scanning = 1; 562468e8e04eSSam Leffler sc->sc_syncbeacon = 0; 562568e8e04eSSam Leffler rfilt = ath_calcrxfilter(sc); 562668e8e04eSSam Leffler ath_hal_setrxfilter(ah, rfilt); 562768e8e04eSSam Leffler ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0); 562868e8e04eSSam Leffler 562968e8e04eSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", 563068e8e04eSSam Leffler __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr)); 563168e8e04eSSam Leffler } 563268e8e04eSSam Leffler 563368e8e04eSSam Leffler static void 563468e8e04eSSam Leffler ath_scan_end(struct ieee80211com *ic) 563568e8e04eSSam Leffler { 563668e8e04eSSam Leffler struct ifnet *ifp = ic->ic_ifp; 563768e8e04eSSam Leffler struct ath_softc *sc = ifp->if_softc; 563868e8e04eSSam Leffler struct ath_hal *ah = sc->sc_ah; 563968e8e04eSSam Leffler u_int32_t rfilt; 564068e8e04eSSam Leffler 564168e8e04eSSam Leffler sc->sc_scanning = 0; 564268e8e04eSSam Leffler rfilt = ath_calcrxfilter(sc); 564368e8e04eSSam Leffler ath_hal_setrxfilter(ah, rfilt); 564468e8e04eSSam Leffler ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 564568e8e04eSSam Leffler 564668e8e04eSSam Leffler ath_hal_process_noisefloor(ah); 564768e8e04eSSam Leffler 564868e8e04eSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 564968e8e04eSSam Leffler __func__, rfilt, ether_sprintf(sc->sc_curbssid), 565068e8e04eSSam Leffler sc->sc_curaid); 565168e8e04eSSam Leffler } 565268e8e04eSSam Leffler 565368e8e04eSSam Leffler static void 565468e8e04eSSam Leffler ath_set_channel(struct ieee80211com *ic) 565568e8e04eSSam Leffler { 565668e8e04eSSam Leffler struct ifnet *ifp = ic->ic_ifp; 565768e8e04eSSam Leffler struct ath_softc *sc = ifp->if_softc; 565868e8e04eSSam Leffler 565968e8e04eSSam Leffler (void) ath_chan_set(sc, ic->ic_curchan); 566068e8e04eSSam Leffler /* 566168e8e04eSSam Leffler * If we are returning to our bss channel then mark state 566268e8e04eSSam Leffler * so the next recv'd beacon's tsf will be used to sync the 566368e8e04eSSam Leffler * beacon timers. Note that since we only hear beacons in 566468e8e04eSSam Leffler * sta/ibss mode this has no effect in other operating modes. 566568e8e04eSSam Leffler */ 566668e8e04eSSam Leffler if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) 566768e8e04eSSam Leffler sc->sc_syncbeacon = 1; 566868e8e04eSSam Leffler } 566968e8e04eSSam Leffler 5670b032f27cSSam Leffler /* 5671b032f27cSSam Leffler * Walk the vap list and check if there any vap's in RUN state. 5672b032f27cSSam Leffler */ 56735591b213SSam Leffler static int 5674b032f27cSSam Leffler ath_isanyrunningvaps(struct ieee80211vap *this) 56755591b213SSam Leffler { 5676b032f27cSSam Leffler struct ieee80211com *ic = this->iv_ic; 5677b032f27cSSam Leffler struct ieee80211vap *vap; 5678b032f27cSSam Leffler 5679b032f27cSSam Leffler IEEE80211_LOCK_ASSERT(ic); 5680b032f27cSSam Leffler 5681b032f27cSSam Leffler TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 5682309a3e45SSam Leffler if (vap != this && vap->iv_state >= IEEE80211_S_RUN) 5683b032f27cSSam Leffler return 1; 5684b032f27cSSam Leffler } 5685b032f27cSSam Leffler return 0; 5686b032f27cSSam Leffler } 5687b032f27cSSam Leffler 5688b032f27cSSam Leffler static int 5689b032f27cSSam Leffler ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 5690b032f27cSSam Leffler { 5691b032f27cSSam Leffler struct ieee80211com *ic = vap->iv_ic; 5692b032f27cSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 5693b032f27cSSam Leffler struct ath_vap *avp = ATH_VAP(vap); 569445bbf62fSSam Leffler struct ath_hal *ah = sc->sc_ah; 5695b032f27cSSam Leffler struct ieee80211_node *ni = NULL; 569668e8e04eSSam Leffler int i, error, stamode; 56975591b213SSam Leffler u_int32_t rfilt; 5698f52efb6dSAdrian Chadd int csa_run_transition = 0; 56995591b213SSam Leffler static const HAL_LED_STATE leds[] = { 57005591b213SSam Leffler HAL_LED_INIT, /* IEEE80211_S_INIT */ 57015591b213SSam Leffler HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 57025591b213SSam Leffler HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 57035591b213SSam Leffler HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 570477d5e068SSam Leffler HAL_LED_RUN, /* IEEE80211_S_CAC */ 57055591b213SSam Leffler HAL_LED_RUN, /* IEEE80211_S_RUN */ 570677d5e068SSam Leffler HAL_LED_RUN, /* IEEE80211_S_CSA */ 570777d5e068SSam Leffler HAL_LED_RUN, /* IEEE80211_S_SLEEP */ 57085591b213SSam Leffler }; 57095591b213SSam Leffler 5710c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 5711b032f27cSSam Leffler ieee80211_state_name[vap->iv_state], 5712c42a7b7eSSam Leffler ieee80211_state_name[nstate]); 57135591b213SSam Leffler 5714*107fdf96SAdrian Chadd /* 5715*107fdf96SAdrian Chadd * net80211 _should_ have the comlock asserted at this point. 5716*107fdf96SAdrian Chadd * There are some comments around the calls to vap->iv_newstate 5717*107fdf96SAdrian Chadd * which indicate that it (newstate) may end up dropping the 5718*107fdf96SAdrian Chadd * lock. This and the subsequent lock assert check after newstate 5719*107fdf96SAdrian Chadd * are an attempt to catch these and figure out how/why. 5720*107fdf96SAdrian Chadd */ 5721*107fdf96SAdrian Chadd IEEE80211_LOCK_ASSERT(ic); 5722*107fdf96SAdrian Chadd 5723f52efb6dSAdrian Chadd if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) 5724f52efb6dSAdrian Chadd csa_run_transition = 1; 5725f52efb6dSAdrian Chadd 57262e986da5SSam Leffler callout_drain(&sc->sc_cal_ch); 57275591b213SSam Leffler ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 57285591b213SSam Leffler 5729b032f27cSSam Leffler if (nstate == IEEE80211_S_SCAN) { 573058769f58SSam Leffler /* 5731b032f27cSSam Leffler * Scanning: turn off beacon miss and don't beacon. 5732b032f27cSSam Leffler * Mark beacon state so when we reach RUN state we'll 5733b032f27cSSam Leffler * [re]setup beacons. Unblock the task q thread so 5734b032f27cSSam Leffler * deferred interrupt processing is done. 573558769f58SSam Leffler */ 5736b032f27cSSam Leffler ath_hal_intrset(ah, 5737b032f27cSSam Leffler sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 57385591b213SSam Leffler sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5739b032f27cSSam Leffler sc->sc_beacons = 0; 5740b032f27cSSam Leffler taskqueue_unblock(sc->sc_tq); 57415591b213SSam Leffler } 57425591b213SSam Leffler 574380767531SAdrian Chadd ni = ieee80211_ref_node(vap->iv_bss); 574468e8e04eSSam Leffler rfilt = ath_calcrxfilter(sc); 5745b032f27cSSam Leffler stamode = (vap->iv_opmode == IEEE80211_M_STA || 57467b916f89SSam Leffler vap->iv_opmode == IEEE80211_M_AHDEMO || 5747b032f27cSSam Leffler vap->iv_opmode == IEEE80211_M_IBSS); 574868e8e04eSSam Leffler if (stamode && nstate == IEEE80211_S_RUN) { 574968e8e04eSSam Leffler sc->sc_curaid = ni->ni_associd; 575068e8e04eSSam Leffler IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); 5751b032f27cSSam Leffler ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5752b032f27cSSam Leffler } 575368e8e04eSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5754b032f27cSSam Leffler __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); 575568e8e04eSSam Leffler ath_hal_setrxfilter(ah, rfilt); 575668e8e04eSSam Leffler 5757b032f27cSSam Leffler /* XXX is this to restore keycache on resume? */ 5758b032f27cSSam Leffler if (vap->iv_opmode != IEEE80211_M_STA && 5759b032f27cSSam Leffler (vap->iv_flags & IEEE80211_F_PRIVACY)) { 57605591b213SSam Leffler for (i = 0; i < IEEE80211_WEP_NKID; i++) 57615591b213SSam Leffler if (ath_hal_keyisvalid(ah, i)) 576268e8e04eSSam Leffler ath_hal_keysetmac(ah, i, ni->ni_bssid); 57635591b213SSam Leffler } 5764b032f27cSSam Leffler 5765b032f27cSSam Leffler /* 5766b032f27cSSam Leffler * Invoke the parent method to do net80211 work. 5767b032f27cSSam Leffler */ 5768b032f27cSSam Leffler error = avp->av_newstate(vap, nstate, arg); 5769b032f27cSSam Leffler if (error != 0) 5770b032f27cSSam Leffler goto bad; 5771c42a7b7eSSam Leffler 5772*107fdf96SAdrian Chadd /* 5773*107fdf96SAdrian Chadd * See above: ensure av_newstate() doesn't drop the lock 5774*107fdf96SAdrian Chadd * on us. 5775*107fdf96SAdrian Chadd */ 5776*107fdf96SAdrian Chadd IEEE80211_LOCK_ASSERT(ic); 5777*107fdf96SAdrian Chadd 577868e8e04eSSam Leffler if (nstate == IEEE80211_S_RUN) { 5779b032f27cSSam Leffler /* NB: collect bss node again, it may have changed */ 578080767531SAdrian Chadd ieee80211_free_node(ni); 578180767531SAdrian Chadd ni = ieee80211_ref_node(vap->iv_bss); 57825591b213SSam Leffler 5783b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, 5784b032f27cSSam Leffler "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " 5785b032f27cSSam Leffler "capinfo 0x%04x chan %d\n", __func__, 5786b032f27cSSam Leffler vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), 5787b032f27cSSam Leffler ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); 5788b032f27cSSam Leffler 5789b032f27cSSam Leffler switch (vap->iv_opmode) { 5790584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 579110ad9a77SSam Leffler case IEEE80211_M_AHDEMO: 579210ad9a77SSam Leffler if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) 579310ad9a77SSam Leffler break; 579410ad9a77SSam Leffler /* fall thru... */ 579510ad9a77SSam Leffler #endif 5796e8fd88a3SSam Leffler case IEEE80211_M_HOSTAP: 5797e8fd88a3SSam Leffler case IEEE80211_M_IBSS: 579859aa14a9SRui Paulo case IEEE80211_M_MBSS: 57995591b213SSam Leffler /* 5800e8fd88a3SSam Leffler * Allocate and setup the beacon frame. 5801e8fd88a3SSam Leffler * 5802f818612bSSam Leffler * Stop any previous beacon DMA. This may be 5803f818612bSSam Leffler * necessary, for example, when an ibss merge 5804f818612bSSam Leffler * causes reconfiguration; there will be a state 5805f818612bSSam Leffler * transition from RUN->RUN that means we may 5806f818612bSSam Leffler * be called with beacon transmission active. 5807f818612bSSam Leffler */ 5808f818612bSSam Leffler ath_hal_stoptxdma(ah, sc->sc_bhalq); 5809b032f27cSSam Leffler 58105591b213SSam Leffler error = ath_beacon_alloc(sc, ni); 58115591b213SSam Leffler if (error != 0) 58125591b213SSam Leffler goto bad; 58137a04dc27SSam Leffler /* 581480d939bfSSam Leffler * If joining an adhoc network defer beacon timer 581580d939bfSSam Leffler * configuration to the next beacon frame so we 581680d939bfSSam Leffler * have a current TSF to use. Otherwise we're 5817b032f27cSSam Leffler * starting an ibss/bss so there's no need to delay; 5818b032f27cSSam Leffler * if this is the first vap moving to RUN state, then 5819b032f27cSSam Leffler * beacon state needs to be [re]configured. 58207a04dc27SSam Leffler */ 5821b032f27cSSam Leffler if (vap->iv_opmode == IEEE80211_M_IBSS && 5822b032f27cSSam Leffler ni->ni_tstamp.tsf != 0) { 582380d939bfSSam Leffler sc->sc_syncbeacon = 1; 5824b032f27cSSam Leffler } else if (!sc->sc_beacons) { 5825584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 582610ad9a77SSam Leffler if (vap->iv_caps & IEEE80211_C_TDMA) 582710ad9a77SSam Leffler ath_tdma_config(sc, vap); 582810ad9a77SSam Leffler else 582910ad9a77SSam Leffler #endif 5830b032f27cSSam Leffler ath_beacon_config(sc, vap); 5831b032f27cSSam Leffler sc->sc_beacons = 1; 5832b032f27cSSam Leffler } 5833e8fd88a3SSam Leffler break; 5834e8fd88a3SSam Leffler case IEEE80211_M_STA: 5835e8fd88a3SSam Leffler /* 583680d939bfSSam Leffler * Defer beacon timer configuration to the next 583780d939bfSSam Leffler * beacon frame so we have a current TSF to use 583880d939bfSSam Leffler * (any TSF collected when scanning is likely old). 5839f52efb6dSAdrian Chadd * However if it's due to a CSA -> RUN transition, 5840f52efb6dSAdrian Chadd * force a beacon update so we pick up a lack of 5841f52efb6dSAdrian Chadd * beacons from an AP in CAC and thus force a 5842f52efb6dSAdrian Chadd * scan. 58437a04dc27SSam Leffler */ 584480d939bfSSam Leffler sc->sc_syncbeacon = 1; 5845f52efb6dSAdrian Chadd if (csa_run_transition) 5846f52efb6dSAdrian Chadd ath_beacon_config(sc, vap); 5847e8fd88a3SSam Leffler break; 5848b032f27cSSam Leffler case IEEE80211_M_MONITOR: 5849b032f27cSSam Leffler /* 5850b032f27cSSam Leffler * Monitor mode vaps have only INIT->RUN and RUN->RUN 5851b032f27cSSam Leffler * transitions so we must re-enable interrupts here to 5852b032f27cSSam Leffler * handle the case of a single monitor mode vap. 5853b032f27cSSam Leffler */ 5854b032f27cSSam Leffler ath_hal_intrset(ah, sc->sc_imask); 5855b032f27cSSam Leffler break; 5856b032f27cSSam Leffler case IEEE80211_M_WDS: 5857b032f27cSSam Leffler break; 5858e8fd88a3SSam Leffler default: 5859e8fd88a3SSam Leffler break; 58605591b213SSam Leffler } 58615591b213SSam Leffler /* 58627b0c77ecSSam Leffler * Let the hal process statistics collected during a 58637b0c77ecSSam Leffler * scan so it can provide calibrated noise floor data. 58647b0c77ecSSam Leffler */ 58657b0c77ecSSam Leffler ath_hal_process_noisefloor(ah); 58667b0c77ecSSam Leffler /* 5867ffa2cab6SSam Leffler * Reset rssi stats; maybe not the best place... 5868ffa2cab6SSam Leffler */ 5869ffa2cab6SSam Leffler sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 5870ffa2cab6SSam Leffler sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 5871ffa2cab6SSam Leffler sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 587245bbf62fSSam Leffler /* 5873b032f27cSSam Leffler * Finally, start any timers and the task q thread 5874b032f27cSSam Leffler * (in case we didn't go through SCAN state). 587545bbf62fSSam Leffler */ 58762dc7fcc4SSam Leffler if (ath_longcalinterval != 0) { 5877c42a7b7eSSam Leffler /* start periodic recalibration timer */ 58782dc7fcc4SSam Leffler callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 58792dc7fcc4SSam Leffler } else { 58802dc7fcc4SSam Leffler DPRINTF(sc, ATH_DEBUG_CALIBRATE, 58812dc7fcc4SSam Leffler "%s: calibration disabled\n", __func__); 5882c42a7b7eSSam Leffler } 5883b032f27cSSam Leffler taskqueue_unblock(sc->sc_tq); 5884b032f27cSSam Leffler } else if (nstate == IEEE80211_S_INIT) { 5885b032f27cSSam Leffler /* 5886b032f27cSSam Leffler * If there are no vaps left in RUN state then 5887b032f27cSSam Leffler * shutdown host/driver operation: 5888b032f27cSSam Leffler * o disable interrupts 5889b032f27cSSam Leffler * o disable the task queue thread 5890b032f27cSSam Leffler * o mark beacon processing as stopped 5891b032f27cSSam Leffler */ 5892b032f27cSSam Leffler if (!ath_isanyrunningvaps(vap)) { 5893b032f27cSSam Leffler sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5894b032f27cSSam Leffler /* disable interrupts */ 5895b032f27cSSam Leffler ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 5896b032f27cSSam Leffler taskqueue_block(sc->sc_tq); 5897b032f27cSSam Leffler sc->sc_beacons = 0; 5898b032f27cSSam Leffler } 5899584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 590010ad9a77SSam Leffler ath_hal_setcca(ah, AH_TRUE); 590110ad9a77SSam Leffler #endif 5902b032f27cSSam Leffler } 59035591b213SSam Leffler bad: 590480767531SAdrian Chadd ieee80211_free_node(ni); 59055591b213SSam Leffler return error; 59065591b213SSam Leffler } 59075591b213SSam Leffler 59085591b213SSam Leffler /* 5909e8fd88a3SSam Leffler * Allocate a key cache slot to the station so we can 5910e8fd88a3SSam Leffler * setup a mapping from key index to node. The key cache 5911e8fd88a3SSam Leffler * slot is needed for managing antenna state and for 5912e8fd88a3SSam Leffler * compression when stations do not use crypto. We do 5913e8fd88a3SSam Leffler * it uniliaterally here; if crypto is employed this slot 5914e8fd88a3SSam Leffler * will be reassigned. 5915e8fd88a3SSam Leffler */ 5916e8fd88a3SSam Leffler static void 5917e8fd88a3SSam Leffler ath_setup_stationkey(struct ieee80211_node *ni) 5918e8fd88a3SSam Leffler { 5919b032f27cSSam Leffler struct ieee80211vap *vap = ni->ni_vap; 5920b032f27cSSam Leffler struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 5921c1225b52SSam Leffler ieee80211_keyix keyix, rxkeyix; 5922e8fd88a3SSam Leffler 592380767531SAdrian Chadd /* XXX should take a locked ref to vap->iv_bss */ 5924b032f27cSSam Leffler if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 5925e8fd88a3SSam Leffler /* 5926e8fd88a3SSam Leffler * Key cache is full; we'll fall back to doing 5927e8fd88a3SSam Leffler * the more expensive lookup in software. Note 5928e8fd88a3SSam Leffler * this also means no h/w compression. 5929e8fd88a3SSam Leffler */ 5930e8fd88a3SSam Leffler /* XXX msg+statistic */ 5931e8fd88a3SSam Leffler } else { 5932c1225b52SSam Leffler /* XXX locking? */ 5933e8fd88a3SSam Leffler ni->ni_ucastkey.wk_keyix = keyix; 5934c1225b52SSam Leffler ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 593533052833SSam Leffler /* NB: must mark device key to get called back on delete */ 593633052833SSam Leffler ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; 5937d3ac945bSSam Leffler IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); 5938e8fd88a3SSam Leffler /* NB: this will create a pass-thru key entry */ 593955c7b877SAdrian Chadd ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss); 5940e8fd88a3SSam Leffler } 5941e8fd88a3SSam Leffler } 5942e8fd88a3SSam Leffler 5943e8fd88a3SSam Leffler /* 59445591b213SSam Leffler * Setup driver-specific state for a newly associated node. 59455591b213SSam Leffler * Note that we're called also on a re-associate, the isnew 59465591b213SSam Leffler * param tells us if this is the first time or not. 59475591b213SSam Leffler */ 59485591b213SSam Leffler static void 5949e9962332SSam Leffler ath_newassoc(struct ieee80211_node *ni, int isnew) 59505591b213SSam Leffler { 5951b032f27cSSam Leffler struct ath_node *an = ATH_NODE(ni); 5952b032f27cSSam Leffler struct ieee80211vap *vap = ni->ni_vap; 5953b032f27cSSam Leffler struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 5954c62362cbSSam Leffler const struct ieee80211_txparam *tp = ni->ni_txparms; 59555591b213SSam Leffler 5956ab06fdf2SSam Leffler an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); 5957ab06fdf2SSam Leffler an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); 5958b032f27cSSam Leffler 5959b032f27cSSam Leffler ath_rate_newassoc(sc, an, isnew); 5960e8fd88a3SSam Leffler if (isnew && 5961b032f27cSSam Leffler (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && 5962b032f27cSSam Leffler ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 5963e8fd88a3SSam Leffler ath_setup_stationkey(ni); 5964e8fd88a3SSam Leffler } 59655591b213SSam Leffler 59665591b213SSam Leffler static int 596759efa8b5SSam Leffler ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, 5968b032f27cSSam Leffler int nchans, struct ieee80211_channel chans[]) 5969b032f27cSSam Leffler { 5970b032f27cSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 5971b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 597259efa8b5SSam Leffler HAL_STATUS status; 5973b032f27cSSam Leffler 5974033022a9SSam Leffler DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 597559efa8b5SSam Leffler "%s: rd %u cc %u location %c%s\n", 597659efa8b5SSam Leffler __func__, reg->regdomain, reg->country, reg->location, 597759efa8b5SSam Leffler reg->ecm ? " ecm" : ""); 5978033022a9SSam Leffler 597959efa8b5SSam Leffler status = ath_hal_set_channels(ah, chans, nchans, 598059efa8b5SSam Leffler reg->country, reg->regdomain); 598159efa8b5SSam Leffler if (status != HAL_OK) { 598259efa8b5SSam Leffler DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", 598359efa8b5SSam Leffler __func__, status); 598459efa8b5SSam Leffler return EINVAL; /* XXX */ 5985b032f27cSSam Leffler } 59868db87e40SAdrian Chadd 5987b032f27cSSam Leffler return 0; 5988b032f27cSSam Leffler } 5989b032f27cSSam Leffler 5990b032f27cSSam Leffler static void 5991b032f27cSSam Leffler ath_getradiocaps(struct ieee80211com *ic, 59925fe9f044SSam Leffler int maxchans, int *nchans, struct ieee80211_channel chans[]) 5993b032f27cSSam Leffler { 5994b032f27cSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 5995b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 5996b032f27cSSam Leffler 599759efa8b5SSam Leffler DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", 599859efa8b5SSam Leffler __func__, SKU_DEBUG, CTRY_DEFAULT); 5999033022a9SSam Leffler 600059efa8b5SSam Leffler /* XXX check return */ 600159efa8b5SSam Leffler (void) ath_hal_getchannels(ah, chans, maxchans, nchans, 600259efa8b5SSam Leffler HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); 6003033022a9SSam Leffler 6004b032f27cSSam Leffler } 6005b032f27cSSam Leffler 6006b032f27cSSam Leffler static int 6007b032f27cSSam Leffler ath_getchannels(struct ath_softc *sc) 6008b032f27cSSam Leffler { 6009b032f27cSSam Leffler struct ifnet *ifp = sc->sc_ifp; 6010b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 6011b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 601259efa8b5SSam Leffler HAL_STATUS status; 6013b032f27cSSam Leffler 6014b032f27cSSam Leffler /* 601559efa8b5SSam Leffler * Collect channel set based on EEPROM contents. 6016b032f27cSSam Leffler */ 601759efa8b5SSam Leffler status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, 601859efa8b5SSam Leffler &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); 601959efa8b5SSam Leffler if (status != HAL_OK) { 602059efa8b5SSam Leffler if_printf(ifp, "%s: unable to collect channel list from hal, " 602159efa8b5SSam Leffler "status %d\n", __func__, status); 602259efa8b5SSam Leffler return EINVAL; 602359efa8b5SSam Leffler } 6024ca876918SSam Leffler (void) ath_hal_getregdomain(ah, &sc->sc_eerd); 6025ca876918SSam Leffler ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ 602659efa8b5SSam Leffler /* XXX map Atheros sku's to net80211 SKU's */ 602759efa8b5SSam Leffler /* XXX net80211 types too small */ 602859efa8b5SSam Leffler ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; 602959efa8b5SSam Leffler ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; 603059efa8b5SSam Leffler ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ 603159efa8b5SSam Leffler ic->ic_regdomain.isocc[1] = ' '; 603259efa8b5SSam Leffler 6033b032f27cSSam Leffler ic->ic_regdomain.ecm = 1; 6034b032f27cSSam Leffler ic->ic_regdomain.location = 'I'; 6035033022a9SSam Leffler 6036033022a9SSam Leffler DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 603759efa8b5SSam Leffler "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", 6038033022a9SSam Leffler __func__, sc->sc_eerd, sc->sc_eecc, 6039033022a9SSam Leffler ic->ic_regdomain.regdomain, ic->ic_regdomain.country, 604059efa8b5SSam Leffler ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); 60415591b213SSam Leffler return 0; 60425591b213SSam Leffler } 60435591b213SSam Leffler 60446c4612b9SSam Leffler static int 60456c4612b9SSam Leffler ath_rate_setup(struct ath_softc *sc, u_int mode) 60466c4612b9SSam Leffler { 60476c4612b9SSam Leffler struct ath_hal *ah = sc->sc_ah; 60486c4612b9SSam Leffler const HAL_RATE_TABLE *rt; 60496c4612b9SSam Leffler 60506c4612b9SSam Leffler switch (mode) { 60516c4612b9SSam Leffler case IEEE80211_MODE_11A: 60526c4612b9SSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11A); 60536c4612b9SSam Leffler break; 6054724c193aSSam Leffler case IEEE80211_MODE_HALF: 6055aaa70f2fSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 6056aaa70f2fSSam Leffler break; 6057724c193aSSam Leffler case IEEE80211_MODE_QUARTER: 6058aaa70f2fSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 6059aaa70f2fSSam Leffler break; 60606c4612b9SSam Leffler case IEEE80211_MODE_11B: 60616c4612b9SSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11B); 60626c4612b9SSam Leffler break; 60636c4612b9SSam Leffler case IEEE80211_MODE_11G: 60646c4612b9SSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11G); 60656c4612b9SSam Leffler break; 60666c4612b9SSam Leffler case IEEE80211_MODE_TURBO_A: 606768e8e04eSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_108A); 60686c4612b9SSam Leffler break; 60696c4612b9SSam Leffler case IEEE80211_MODE_TURBO_G: 60706c4612b9SSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_108G); 60716c4612b9SSam Leffler break; 607268e8e04eSSam Leffler case IEEE80211_MODE_STURBO_A: 607368e8e04eSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 607468e8e04eSSam Leffler break; 607568e8e04eSSam Leffler case IEEE80211_MODE_11NA: 607668e8e04eSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); 607768e8e04eSSam Leffler break; 607868e8e04eSSam Leffler case IEEE80211_MODE_11NG: 607968e8e04eSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); 608068e8e04eSSam Leffler break; 60816c4612b9SSam Leffler default: 60826c4612b9SSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 60836c4612b9SSam Leffler __func__, mode); 60846c4612b9SSam Leffler return 0; 60856c4612b9SSam Leffler } 60866c4612b9SSam Leffler sc->sc_rates[mode] = rt; 6087aaa70f2fSSam Leffler return (rt != NULL); 60885591b213SSam Leffler } 60895591b213SSam Leffler 60905591b213SSam Leffler static void 60915591b213SSam Leffler ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 60925591b213SSam Leffler { 60933e50ec2cSSam Leffler #define N(a) (sizeof(a)/sizeof(a[0])) 60943e50ec2cSSam Leffler /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 60953e50ec2cSSam Leffler static const struct { 60963e50ec2cSSam Leffler u_int rate; /* tx/rx 802.11 rate */ 60973e50ec2cSSam Leffler u_int16_t timeOn; /* LED on time (ms) */ 60983e50ec2cSSam Leffler u_int16_t timeOff; /* LED off time (ms) */ 60993e50ec2cSSam Leffler } blinkrates[] = { 61003e50ec2cSSam Leffler { 108, 40, 10 }, 61013e50ec2cSSam Leffler { 96, 44, 11 }, 61023e50ec2cSSam Leffler { 72, 50, 13 }, 61033e50ec2cSSam Leffler { 48, 57, 14 }, 61043e50ec2cSSam Leffler { 36, 67, 16 }, 61053e50ec2cSSam Leffler { 24, 80, 20 }, 61063e50ec2cSSam Leffler { 22, 100, 25 }, 61073e50ec2cSSam Leffler { 18, 133, 34 }, 61083e50ec2cSSam Leffler { 12, 160, 40 }, 61093e50ec2cSSam Leffler { 10, 200, 50 }, 61103e50ec2cSSam Leffler { 6, 240, 58 }, 61113e50ec2cSSam Leffler { 4, 267, 66 }, 61123e50ec2cSSam Leffler { 2, 400, 100 }, 61133e50ec2cSSam Leffler { 0, 500, 130 }, 6114724c193aSSam Leffler /* XXX half/quarter rates */ 61153e50ec2cSSam Leffler }; 61165591b213SSam Leffler const HAL_RATE_TABLE *rt; 61173e50ec2cSSam Leffler int i, j; 61185591b213SSam Leffler 61195591b213SSam Leffler memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 61205591b213SSam Leffler rt = sc->sc_rates[mode]; 61215591b213SSam Leffler KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 6122180f268dSSam Leffler for (i = 0; i < rt->rateCount; i++) { 6123180f268dSSam Leffler uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 6124180f268dSSam Leffler if (rt->info[i].phy != IEEE80211_T_HT) 6125180f268dSSam Leffler sc->sc_rixmap[ieeerate] = i; 6126180f268dSSam Leffler else 6127180f268dSSam Leffler sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; 6128180f268dSSam Leffler } 61291b1a8e41SSam Leffler memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 613046d4d74cSSam Leffler for (i = 0; i < N(sc->sc_hwmap); i++) { 613146d4d74cSSam Leffler if (i >= rt->rateCount) { 61323e50ec2cSSam Leffler sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 61333e50ec2cSSam Leffler sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 613416b4851aSSam Leffler continue; 61353e50ec2cSSam Leffler } 61363e50ec2cSSam Leffler sc->sc_hwmap[i].ieeerate = 613746d4d74cSSam Leffler rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 613846d4d74cSSam Leffler if (rt->info[i].phy == IEEE80211_T_HT) 613926041a14SSam Leffler sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; 6140d3be6f5bSSam Leffler sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 614146d4d74cSSam Leffler if (rt->info[i].shortPreamble || 614246d4d74cSSam Leffler rt->info[i].phy == IEEE80211_T_OFDM) 6143d3be6f5bSSam Leffler sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 61445463c4a4SSam Leffler sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; 61453e50ec2cSSam Leffler for (j = 0; j < N(blinkrates)-1; j++) 61463e50ec2cSSam Leffler if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 61473e50ec2cSSam Leffler break; 61483e50ec2cSSam Leffler /* NB: this uses the last entry if the rate isn't found */ 61493e50ec2cSSam Leffler /* XXX beware of overlow */ 61503e50ec2cSSam Leffler sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 61513e50ec2cSSam Leffler sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 6152c42a7b7eSSam Leffler } 61535591b213SSam Leffler sc->sc_currates = rt; 61545591b213SSam Leffler sc->sc_curmode = mode; 61555591b213SSam Leffler /* 6156c42a7b7eSSam Leffler * All protection frames are transmited at 2Mb/s for 6157c42a7b7eSSam Leffler * 11g, otherwise at 1Mb/s. 61585591b213SSam Leffler */ 6159913a1ba1SSam Leffler if (mode == IEEE80211_MODE_11G) 6160ab06fdf2SSam Leffler sc->sc_protrix = ath_tx_findrix(sc, 2*2); 6161913a1ba1SSam Leffler else 6162ab06fdf2SSam Leffler sc->sc_protrix = ath_tx_findrix(sc, 2*1); 61634fa8d4efSDaniel Eischen /* NB: caller is responsible for resetting rate control state */ 61643e50ec2cSSam Leffler #undef N 61655591b213SSam Leffler } 61665591b213SSam Leffler 6167c42a7b7eSSam Leffler static void 61682e986da5SSam Leffler ath_watchdog(void *arg) 6169c42a7b7eSSam Leffler { 61702e986da5SSam Leffler struct ath_softc *sc = arg; 6171ef27340cSAdrian Chadd int do_reset = 0; 6172c42a7b7eSSam Leffler 61732e986da5SSam Leffler if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { 61742e986da5SSam Leffler struct ifnet *ifp = sc->sc_ifp; 6175459bc4f0SSam Leffler uint32_t hangs; 6176459bc4f0SSam Leffler 6177459bc4f0SSam Leffler if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && 6178459bc4f0SSam Leffler hangs != 0) { 6179459bc4f0SSam Leffler if_printf(ifp, "%s hang detected (0x%x)\n", 6180459bc4f0SSam Leffler hangs & 0xff ? "bb" : "mac", hangs); 6181459bc4f0SSam Leffler } else 6182c42a7b7eSSam Leffler if_printf(ifp, "device timeout\n"); 6183ef27340cSAdrian Chadd do_reset = 1; 6184c42a7b7eSSam Leffler ifp->if_oerrors++; 6185c42a7b7eSSam Leffler sc->sc_stats.ast_watchdog++; 6186c42a7b7eSSam Leffler } 6187ef27340cSAdrian Chadd 6188ef27340cSAdrian Chadd /* 6189ef27340cSAdrian Chadd * We can't hold the lock across the ath_reset() call. 6190ef27340cSAdrian Chadd */ 6191ef27340cSAdrian Chadd if (do_reset) { 6192ef27340cSAdrian Chadd ATH_UNLOCK(sc); 6193ef27340cSAdrian Chadd ath_reset(sc->sc_ifp, ATH_RESET_NOLOSS); 6194ef27340cSAdrian Chadd ATH_LOCK(sc); 6195ef27340cSAdrian Chadd } 6196ef27340cSAdrian Chadd 61972e986da5SSam Leffler callout_schedule(&sc->sc_wd_ch, hz); 6198c42a7b7eSSam Leffler } 6199c42a7b7eSSam Leffler 6200a585a9a1SSam Leffler #ifdef ATH_DIAGAPI 6201c42a7b7eSSam Leffler /* 6202c42a7b7eSSam Leffler * Diagnostic interface to the HAL. This is used by various 6203c42a7b7eSSam Leffler * tools to do things like retrieve register contents for 6204c42a7b7eSSam Leffler * debugging. The mechanism is intentionally opaque so that 6205c42a7b7eSSam Leffler * it can change frequently w/o concern for compatiblity. 6206c42a7b7eSSam Leffler */ 6207c42a7b7eSSam Leffler static int 6208c42a7b7eSSam Leffler ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 6209c42a7b7eSSam Leffler { 6210c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 6211c42a7b7eSSam Leffler u_int id = ad->ad_id & ATH_DIAG_ID; 6212c42a7b7eSSam Leffler void *indata = NULL; 6213c42a7b7eSSam Leffler void *outdata = NULL; 6214c42a7b7eSSam Leffler u_int32_t insize = ad->ad_in_size; 6215c42a7b7eSSam Leffler u_int32_t outsize = ad->ad_out_size; 6216c42a7b7eSSam Leffler int error = 0; 6217c42a7b7eSSam Leffler 6218c42a7b7eSSam Leffler if (ad->ad_id & ATH_DIAG_IN) { 6219c42a7b7eSSam Leffler /* 6220c42a7b7eSSam Leffler * Copy in data. 6221c42a7b7eSSam Leffler */ 6222c42a7b7eSSam Leffler indata = malloc(insize, M_TEMP, M_NOWAIT); 6223c42a7b7eSSam Leffler if (indata == NULL) { 6224c42a7b7eSSam Leffler error = ENOMEM; 6225c42a7b7eSSam Leffler goto bad; 6226c42a7b7eSSam Leffler } 6227c42a7b7eSSam Leffler error = copyin(ad->ad_in_data, indata, insize); 6228c42a7b7eSSam Leffler if (error) 6229c42a7b7eSSam Leffler goto bad; 6230c42a7b7eSSam Leffler } 6231c42a7b7eSSam Leffler if (ad->ad_id & ATH_DIAG_DYN) { 6232c42a7b7eSSam Leffler /* 6233c42a7b7eSSam Leffler * Allocate a buffer for the results (otherwise the HAL 6234c42a7b7eSSam Leffler * returns a pointer to a buffer where we can read the 6235c42a7b7eSSam Leffler * results). Note that we depend on the HAL leaving this 6236c42a7b7eSSam Leffler * pointer for us to use below in reclaiming the buffer; 6237c42a7b7eSSam Leffler * may want to be more defensive. 6238c42a7b7eSSam Leffler */ 6239c42a7b7eSSam Leffler outdata = malloc(outsize, M_TEMP, M_NOWAIT); 6240c42a7b7eSSam Leffler if (outdata == NULL) { 6241c42a7b7eSSam Leffler error = ENOMEM; 6242c42a7b7eSSam Leffler goto bad; 6243c42a7b7eSSam Leffler } 6244c42a7b7eSSam Leffler } 6245c42a7b7eSSam Leffler if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 6246c42a7b7eSSam Leffler if (outsize < ad->ad_out_size) 6247c42a7b7eSSam Leffler ad->ad_out_size = outsize; 6248c42a7b7eSSam Leffler if (outdata != NULL) 6249c42a7b7eSSam Leffler error = copyout(outdata, ad->ad_out_data, 6250c42a7b7eSSam Leffler ad->ad_out_size); 6251c42a7b7eSSam Leffler } else { 6252c42a7b7eSSam Leffler error = EINVAL; 6253c42a7b7eSSam Leffler } 6254c42a7b7eSSam Leffler bad: 6255c42a7b7eSSam Leffler if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 6256c42a7b7eSSam Leffler free(indata, M_TEMP); 6257c42a7b7eSSam Leffler if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 6258c42a7b7eSSam Leffler free(outdata, M_TEMP); 6259c42a7b7eSSam Leffler return error; 6260c42a7b7eSSam Leffler } 6261a585a9a1SSam Leffler #endif /* ATH_DIAGAPI */ 6262c42a7b7eSSam Leffler 6263c42a7b7eSSam Leffler static int 6264c42a7b7eSSam Leffler ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 6265c42a7b7eSSam Leffler { 6266c42a7b7eSSam Leffler #define IS_RUNNING(ifp) \ 626713f4c340SRobert Watson ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 6268c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 6269b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 6270c42a7b7eSSam Leffler struct ifreq *ifr = (struct ifreq *)data; 627184784be1SSam Leffler const HAL_RATE_TABLE *rt; 6272c42a7b7eSSam Leffler int error = 0; 6273c42a7b7eSSam Leffler 6274c42a7b7eSSam Leffler switch (cmd) { 6275c42a7b7eSSam Leffler case SIOCSIFFLAGS: 627631a8c1edSAndrew Thompson ATH_LOCK(sc); 6277c42a7b7eSSam Leffler if (IS_RUNNING(ifp)) { 6278c42a7b7eSSam Leffler /* 6279c42a7b7eSSam Leffler * To avoid rescanning another access point, 6280c42a7b7eSSam Leffler * do not call ath_init() here. Instead, 6281c42a7b7eSSam Leffler * only reflect promisc mode settings. 6282c42a7b7eSSam Leffler */ 6283c42a7b7eSSam Leffler ath_mode_init(sc); 6284c42a7b7eSSam Leffler } else if (ifp->if_flags & IFF_UP) { 6285c42a7b7eSSam Leffler /* 6286c42a7b7eSSam Leffler * Beware of being called during attach/detach 6287c42a7b7eSSam Leffler * to reset promiscuous mode. In that case we 6288c42a7b7eSSam Leffler * will still be marked UP but not RUNNING. 6289c42a7b7eSSam Leffler * However trying to re-init the interface 6290c42a7b7eSSam Leffler * is the wrong thing to do as we've already 6291c42a7b7eSSam Leffler * torn down much of our state. There's 6292c42a7b7eSSam Leffler * probably a better way to deal with this. 6293c42a7b7eSSam Leffler */ 6294b032f27cSSam Leffler if (!sc->sc_invalid) 6295fc74a9f9SBrooks Davis ath_init(sc); /* XXX lose error */ 6296d3ac945bSSam Leffler } else { 6297c42a7b7eSSam Leffler ath_stop_locked(ifp); 6298d3ac945bSSam Leffler #ifdef notyet 6299d3ac945bSSam Leffler /* XXX must wakeup in places like ath_vap_delete */ 6300d3ac945bSSam Leffler if (!sc->sc_invalid) 6301d3ac945bSSam Leffler ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP); 6302d3ac945bSSam Leffler #endif 6303d3ac945bSSam Leffler } 630431a8c1edSAndrew Thompson ATH_UNLOCK(sc); 6305c42a7b7eSSam Leffler break; 6306b032f27cSSam Leffler case SIOCGIFMEDIA: 6307b032f27cSSam Leffler case SIOCSIFMEDIA: 6308b032f27cSSam Leffler error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 6309b032f27cSSam Leffler break; 6310c42a7b7eSSam Leffler case SIOCGATHSTATS: 6311c42a7b7eSSam Leffler /* NB: embed these numbers to get a consistent view */ 6312c42a7b7eSSam Leffler sc->sc_stats.ast_tx_packets = ifp->if_opackets; 6313c42a7b7eSSam Leffler sc->sc_stats.ast_rx_packets = ifp->if_ipackets; 631484784be1SSam Leffler sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi); 631584784be1SSam Leffler sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi); 6316584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 631710ad9a77SSam Leffler sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap); 631810ad9a77SSam Leffler sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam); 631910ad9a77SSam Leffler #endif 632084784be1SSam Leffler rt = sc->sc_currates; 632146d4d74cSSam Leffler sc->sc_stats.ast_tx_rate = 632246d4d74cSSam Leffler rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC; 63236aa113fdSAdrian Chadd if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT) 63246aa113fdSAdrian Chadd sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS; 6325c42a7b7eSSam Leffler return copyout(&sc->sc_stats, 6326c42a7b7eSSam Leffler ifr->ifr_data, sizeof (sc->sc_stats)); 63273fc21fedSSam Leffler case SIOCZATHSTATS: 63283fc21fedSSam Leffler error = priv_check(curthread, PRIV_DRIVER); 63293fc21fedSSam Leffler if (error == 0) 63303fc21fedSSam Leffler memset(&sc->sc_stats, 0, sizeof(sc->sc_stats)); 63313fc21fedSSam Leffler break; 6332a585a9a1SSam Leffler #ifdef ATH_DIAGAPI 6333c42a7b7eSSam Leffler case SIOCGATHDIAG: 6334c42a7b7eSSam Leffler error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 6335c42a7b7eSSam Leffler break; 6336f51c84eaSAdrian Chadd case SIOCGATHPHYERR: 6337f51c84eaSAdrian Chadd error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr); 6338f51c84eaSAdrian Chadd break; 6339a585a9a1SSam Leffler #endif 634031a8c1edSAndrew Thompson case SIOCGIFADDR: 6341b032f27cSSam Leffler error = ether_ioctl(ifp, cmd, data); 6342c42a7b7eSSam Leffler break; 634331a8c1edSAndrew Thompson default: 634431a8c1edSAndrew Thompson error = EINVAL; 634531a8c1edSAndrew Thompson break; 6346c42a7b7eSSam Leffler } 6347c42a7b7eSSam Leffler return error; 6348a614e076SSam Leffler #undef IS_RUNNING 6349c42a7b7eSSam Leffler } 6350c42a7b7eSSam Leffler 6351c42a7b7eSSam Leffler /* 6352c42a7b7eSSam Leffler * Announce various information on device/driver attach. 6353c42a7b7eSSam Leffler */ 6354c42a7b7eSSam Leffler static void 6355c42a7b7eSSam Leffler ath_announce(struct ath_softc *sc) 6356c42a7b7eSSam Leffler { 6357fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 6358c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 6359c42a7b7eSSam Leffler 6360498657cfSSam Leffler if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n", 6361498657cfSSam Leffler ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, 6362498657cfSSam Leffler ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 636346a924c4SAdrian Chadd if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n", 636446a924c4SAdrian Chadd ah->ah_analog2GhzRev, ah->ah_analog5GhzRev); 6365c42a7b7eSSam Leffler if (bootverbose) { 6366c42a7b7eSSam Leffler int i; 6367c42a7b7eSSam Leffler for (i = 0; i <= WME_AC_VO; i++) { 6368c42a7b7eSSam Leffler struct ath_txq *txq = sc->sc_ac2q[i]; 6369c42a7b7eSSam Leffler if_printf(ifp, "Use hw queue %u for %s traffic\n", 6370c42a7b7eSSam Leffler txq->axq_qnum, ieee80211_wme_acnames[i]); 6371c42a7b7eSSam Leffler } 6372c42a7b7eSSam Leffler if_printf(ifp, "Use hw queue %u for CAB traffic\n", 6373c42a7b7eSSam Leffler sc->sc_cabq->axq_qnum); 6374c42a7b7eSSam Leffler if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 6375c42a7b7eSSam Leffler } 6376e2d787faSSam Leffler if (ath_rxbuf != ATH_RXBUF) 6377e2d787faSSam Leffler if_printf(ifp, "using %u rx buffers\n", ath_rxbuf); 6378e2d787faSSam Leffler if (ath_txbuf != ATH_TXBUF) 6379e2d787faSSam Leffler if_printf(ifp, "using %u tx buffers\n", ath_txbuf); 63809ac01d39SRui Paulo if (sc->sc_mcastkey && bootverbose) 63819ac01d39SRui Paulo if_printf(ifp, "using multicast key search\n"); 6382c42a7b7eSSam Leffler } 638310ad9a77SSam Leffler 6384584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 638510ad9a77SSam Leffler static void 638610ad9a77SSam Leffler ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, u_int32_t bintval) 638710ad9a77SSam Leffler { 638810ad9a77SSam Leffler struct ath_hal *ah = sc->sc_ah; 638910ad9a77SSam Leffler HAL_BEACON_TIMERS bt; 639010ad9a77SSam Leffler 639110ad9a77SSam Leffler bt.bt_intval = bintval | HAL_BEACON_ENA; 639210ad9a77SSam Leffler bt.bt_nexttbtt = nexttbtt; 639310ad9a77SSam Leffler bt.bt_nextdba = (nexttbtt<<3) - sc->sc_tdmadbaprep; 639410ad9a77SSam Leffler bt.bt_nextswba = (nexttbtt<<3) - sc->sc_tdmaswbaprep; 639510ad9a77SSam Leffler bt.bt_nextatim = nexttbtt+1; 6396f3fb1687SAdrian Chadd /* Enables TBTT, DBA, SWBA timers by default */ 6397f3fb1687SAdrian Chadd bt.bt_flags = 0; 639810ad9a77SSam Leffler ath_hal_beaconsettimers(ah, &bt); 639910ad9a77SSam Leffler } 640010ad9a77SSam Leffler 640110ad9a77SSam Leffler /* 640210ad9a77SSam Leffler * Calculate the beacon interval. This is periodic in the 640310ad9a77SSam Leffler * superframe for the bss. We assume each station is configured 640410ad9a77SSam Leffler * identically wrt transmit rate so the guard time we calculate 640510ad9a77SSam Leffler * above will be the same on all stations. Note we need to 640610ad9a77SSam Leffler * factor in the xmit time because the hardware will schedule 640710ad9a77SSam Leffler * a frame for transmit if the start of the frame is within 640810ad9a77SSam Leffler * the burst time. When we get hardware that properly kills 640910ad9a77SSam Leffler * frames in the PCU we can reduce/eliminate the guard time. 641010ad9a77SSam Leffler * 641110ad9a77SSam Leffler * Roundup to 1024 is so we have 1 TU buffer in the guard time 641210ad9a77SSam Leffler * to deal with the granularity of the nexttbtt timer. 11n MAC's 641310ad9a77SSam Leffler * with 1us timer granularity should allow us to reduce/eliminate 641410ad9a77SSam Leffler * this. 641510ad9a77SSam Leffler */ 641610ad9a77SSam Leffler static void 641710ad9a77SSam Leffler ath_tdma_bintvalsetup(struct ath_softc *sc, 641810ad9a77SSam Leffler const struct ieee80211_tdma_state *tdma) 641910ad9a77SSam Leffler { 642010ad9a77SSam Leffler /* copy from vap state (XXX check all vaps have same value?) */ 642110ad9a77SSam Leffler sc->sc_tdmaslotlen = tdma->tdma_slotlen; 642210ad9a77SSam Leffler 642310ad9a77SSam Leffler sc->sc_tdmabintval = roundup((sc->sc_tdmaslotlen+sc->sc_tdmaguard) * 642410ad9a77SSam Leffler tdma->tdma_slotcnt, 1024); 642510ad9a77SSam Leffler sc->sc_tdmabintval >>= 10; /* TSF -> TU */ 642610ad9a77SSam Leffler if (sc->sc_tdmabintval & 1) 642710ad9a77SSam Leffler sc->sc_tdmabintval++; 642810ad9a77SSam Leffler 642910ad9a77SSam Leffler if (tdma->tdma_slot == 0) { 643010ad9a77SSam Leffler /* 643110ad9a77SSam Leffler * Only slot 0 beacons; other slots respond. 643210ad9a77SSam Leffler */ 643310ad9a77SSam Leffler sc->sc_imask |= HAL_INT_SWBA; 643410ad9a77SSam Leffler sc->sc_tdmaswba = 0; /* beacon immediately */ 643510ad9a77SSam Leffler } else { 643610ad9a77SSam Leffler /* XXX all vaps must be slot 0 or slot !0 */ 643710ad9a77SSam Leffler sc->sc_imask &= ~HAL_INT_SWBA; 643810ad9a77SSam Leffler } 643910ad9a77SSam Leffler } 644010ad9a77SSam Leffler 644110ad9a77SSam Leffler /* 644210ad9a77SSam Leffler * Max 802.11 overhead. This assumes no 4-address frames and 644310ad9a77SSam Leffler * the encapsulation done by ieee80211_encap (llc). We also 644410ad9a77SSam Leffler * include potential crypto overhead. 644510ad9a77SSam Leffler */ 644610ad9a77SSam Leffler #define IEEE80211_MAXOVERHEAD \ 644710ad9a77SSam Leffler (sizeof(struct ieee80211_qosframe) \ 644810ad9a77SSam Leffler + sizeof(struct llc) \ 644910ad9a77SSam Leffler + IEEE80211_ADDR_LEN \ 645010ad9a77SSam Leffler + IEEE80211_WEP_IVLEN \ 645110ad9a77SSam Leffler + IEEE80211_WEP_KIDLEN \ 645210ad9a77SSam Leffler + IEEE80211_WEP_CRCLEN \ 645310ad9a77SSam Leffler + IEEE80211_WEP_MICLEN \ 645410ad9a77SSam Leffler + IEEE80211_CRC_LEN) 645510ad9a77SSam Leffler 645610ad9a77SSam Leffler /* 645710ad9a77SSam Leffler * Setup initially for tdma operation. Start the beacon 645810ad9a77SSam Leffler * timers and enable SWBA if we are slot 0. Otherwise 645910ad9a77SSam Leffler * we wait for slot 0 to arrive so we can sync up before 646010ad9a77SSam Leffler * starting to transmit. 646110ad9a77SSam Leffler */ 646210ad9a77SSam Leffler static void 646310ad9a77SSam Leffler ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap) 646410ad9a77SSam Leffler { 646510ad9a77SSam Leffler struct ath_hal *ah = sc->sc_ah; 646610ad9a77SSam Leffler struct ifnet *ifp = sc->sc_ifp; 646710ad9a77SSam Leffler struct ieee80211com *ic = ifp->if_l2com; 646810ad9a77SSam Leffler const struct ieee80211_txparam *tp; 646910ad9a77SSam Leffler const struct ieee80211_tdma_state *tdma = NULL; 647010ad9a77SSam Leffler int rix; 647110ad9a77SSam Leffler 647210ad9a77SSam Leffler if (vap == NULL) { 647310ad9a77SSam Leffler vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */ 647410ad9a77SSam Leffler if (vap == NULL) { 647510ad9a77SSam Leffler if_printf(ifp, "%s: no vaps?\n", __func__); 647610ad9a77SSam Leffler return; 647710ad9a77SSam Leffler } 647810ad9a77SSam Leffler } 647980767531SAdrian Chadd /* XXX should take a locked ref to iv_bss */ 648010ad9a77SSam Leffler tp = vap->iv_bss->ni_txparms; 648110ad9a77SSam Leffler /* 648210ad9a77SSam Leffler * Calculate the guard time for each slot. This is the 648310ad9a77SSam Leffler * time to send a maximal-size frame according to the 648410ad9a77SSam Leffler * fixed/lowest transmit rate. Note that the interface 648510ad9a77SSam Leffler * mtu does not include the 802.11 overhead so we must 648610ad9a77SSam Leffler * tack that on (ath_hal_computetxtime includes the 648710ad9a77SSam Leffler * preamble and plcp in it's calculation). 648810ad9a77SSam Leffler */ 648910ad9a77SSam Leffler tdma = vap->iv_tdma; 649010ad9a77SSam Leffler if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 6491ab06fdf2SSam Leffler rix = ath_tx_findrix(sc, tp->ucastrate); 649210ad9a77SSam Leffler else 6493ab06fdf2SSam Leffler rix = ath_tx_findrix(sc, tp->mcastrate); 649410ad9a77SSam Leffler /* XXX short preamble assumed */ 649510ad9a77SSam Leffler sc->sc_tdmaguard = ath_hal_computetxtime(ah, sc->sc_currates, 649610ad9a77SSam Leffler ifp->if_mtu + IEEE80211_MAXOVERHEAD, rix, AH_TRUE); 649710ad9a77SSam Leffler 649810ad9a77SSam Leffler ath_hal_intrset(ah, 0); 649910ad9a77SSam Leffler 650010ad9a77SSam Leffler ath_beaconq_config(sc); /* setup h/w beacon q */ 65019c859a04SSam Leffler if (sc->sc_setcca) 650210ad9a77SSam Leffler ath_hal_setcca(ah, AH_FALSE); /* disable CCA */ 650310ad9a77SSam Leffler ath_tdma_bintvalsetup(sc, tdma); /* calculate beacon interval */ 650410ad9a77SSam Leffler ath_tdma_settimers(sc, sc->sc_tdmabintval, 650510ad9a77SSam Leffler sc->sc_tdmabintval | HAL_BEACON_RESET_TSF); 650610ad9a77SSam Leffler sc->sc_syncbeacon = 0; 650710ad9a77SSam Leffler 650810ad9a77SSam Leffler sc->sc_avgtsfdeltap = TDMA_DUMMY_MARKER; 650910ad9a77SSam Leffler sc->sc_avgtsfdeltam = TDMA_DUMMY_MARKER; 651010ad9a77SSam Leffler 651110ad9a77SSam Leffler ath_hal_intrset(ah, sc->sc_imask); 651210ad9a77SSam Leffler 651310ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_TDMA, "%s: slot %u len %uus cnt %u " 651410ad9a77SSam Leffler "bsched %u guard %uus bintval %u TU dba prep %u\n", __func__, 651510ad9a77SSam Leffler tdma->tdma_slot, tdma->tdma_slotlen, tdma->tdma_slotcnt, 651610ad9a77SSam Leffler tdma->tdma_bintval, sc->sc_tdmaguard, sc->sc_tdmabintval, 651710ad9a77SSam Leffler sc->sc_tdmadbaprep); 651810ad9a77SSam Leffler } 651910ad9a77SSam Leffler 652010ad9a77SSam Leffler /* 652110ad9a77SSam Leffler * Update tdma operation. Called from the 802.11 layer 652210ad9a77SSam Leffler * when a beacon is received from the TDMA station operating 652310ad9a77SSam Leffler * in the slot immediately preceding us in the bss. Use 652410ad9a77SSam Leffler * the rx timestamp for the beacon frame to update our 652510ad9a77SSam Leffler * beacon timers so we follow their schedule. Note that 652610ad9a77SSam Leffler * by using the rx timestamp we implicitly include the 652710ad9a77SSam Leffler * propagation delay in our schedule. 652810ad9a77SSam Leffler */ 652910ad9a77SSam Leffler static void 653010ad9a77SSam Leffler ath_tdma_update(struct ieee80211_node *ni, 65312bc3ce77SSam Leffler const struct ieee80211_tdma_param *tdma, int changed) 653210ad9a77SSam Leffler { 653310ad9a77SSam Leffler #define TSF_TO_TU(_h,_l) \ 653410ad9a77SSam Leffler ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) 653510ad9a77SSam Leffler #define TU_TO_TSF(_tu) (((u_int64_t)(_tu)) << 10) 653610ad9a77SSam Leffler struct ieee80211vap *vap = ni->ni_vap; 653710ad9a77SSam Leffler struct ieee80211com *ic = ni->ni_ic; 653810ad9a77SSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 653910ad9a77SSam Leffler struct ath_hal *ah = sc->sc_ah; 654010ad9a77SSam Leffler const HAL_RATE_TABLE *rt = sc->sc_currates; 6541fc4de9b7SAdrian Chadd u_int64_t tsf, rstamp, nextslot, nexttbtt; 6542fc4de9b7SAdrian Chadd u_int32_t txtime, nextslottu; 654310ad9a77SSam Leffler int32_t tudelta, tsfdelta; 654410ad9a77SSam Leffler const struct ath_rx_status *rs; 654510ad9a77SSam Leffler int rix; 654610ad9a77SSam Leffler 654710ad9a77SSam Leffler sc->sc_stats.ast_tdma_update++; 654810ad9a77SSam Leffler 654910ad9a77SSam Leffler /* 655010ad9a77SSam Leffler * Check for and adopt configuration changes. 655110ad9a77SSam Leffler */ 65522bc3ce77SSam Leffler if (changed != 0) { 655310ad9a77SSam Leffler const struct ieee80211_tdma_state *ts = vap->iv_tdma; 655410ad9a77SSam Leffler 655510ad9a77SSam Leffler ath_tdma_bintvalsetup(sc, ts); 6556040972a1SSam Leffler if (changed & TDMA_UPDATE_SLOTLEN) 6557040972a1SSam Leffler ath_wme_update(ic); 655810ad9a77SSam Leffler 655910ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_TDMA, 656010ad9a77SSam Leffler "%s: adopt slot %u slotcnt %u slotlen %u us " 656110ad9a77SSam Leffler "bintval %u TU\n", __func__, 656210ad9a77SSam Leffler ts->tdma_slot, ts->tdma_slotcnt, ts->tdma_slotlen, 656310ad9a77SSam Leffler sc->sc_tdmabintval); 656410ad9a77SSam Leffler 656510ad9a77SSam Leffler /* XXX right? */ 656610ad9a77SSam Leffler ath_hal_intrset(ah, sc->sc_imask); 656710ad9a77SSam Leffler /* NB: beacon timers programmed below */ 656810ad9a77SSam Leffler } 656910ad9a77SSam Leffler 657010ad9a77SSam Leffler /* extend rx timestamp to 64 bits */ 65715463c4a4SSam Leffler rs = sc->sc_lastrs; 657210ad9a77SSam Leffler tsf = ath_hal_gettsf64(ah); 6573fc4de9b7SAdrian Chadd rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf); 657410ad9a77SSam Leffler /* 657510ad9a77SSam Leffler * The rx timestamp is set by the hardware on completing 657610ad9a77SSam Leffler * reception (at the point where the rx descriptor is DMA'd 657710ad9a77SSam Leffler * to the host). To find the start of our next slot we 657810ad9a77SSam Leffler * must adjust this time by the time required to send 657910ad9a77SSam Leffler * the packet just received. 658010ad9a77SSam Leffler */ 658110ad9a77SSam Leffler rix = rt->rateCodeToIndex[rs->rs_rate]; 658210ad9a77SSam Leffler txtime = ath_hal_computetxtime(ah, rt, rs->rs_datalen, rix, 658310ad9a77SSam Leffler rt->info[rix].shortPreamble); 658410ad9a77SSam Leffler /* NB: << 9 is to cvt to TU and /2 */ 658510ad9a77SSam Leffler nextslot = (rstamp - txtime) + (sc->sc_tdmabintval << 9); 658610ad9a77SSam Leffler nextslottu = TSF_TO_TU(nextslot>>32, nextslot) & HAL_BEACON_PERIOD; 658710ad9a77SSam Leffler 658810ad9a77SSam Leffler /* 6589fc4de9b7SAdrian Chadd * Retrieve the hardware NextTBTT in usecs 6590fc4de9b7SAdrian Chadd * and calculate the difference between what the 659110ad9a77SSam Leffler * other station thinks and what we have programmed. This 659210ad9a77SSam Leffler * lets us figure how to adjust our timers to match. The 659310ad9a77SSam Leffler * adjustments are done by pulling the TSF forward and possibly 659410ad9a77SSam Leffler * rewriting the beacon timers. 659510ad9a77SSam Leffler */ 6596fc4de9b7SAdrian Chadd nexttbtt = ath_hal_getnexttbtt(ah); 6597fc4de9b7SAdrian Chadd tsfdelta = (int32_t)((nextslot % TU_TO_TSF(HAL_BEACON_PERIOD + 1)) - nexttbtt); 659810ad9a77SSam Leffler 659910ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_TDMA_TIMER, 660010ad9a77SSam Leffler "tsfdelta %d avg +%d/-%d\n", tsfdelta, 660110ad9a77SSam Leffler TDMA_AVG(sc->sc_avgtsfdeltap), TDMA_AVG(sc->sc_avgtsfdeltam)); 660210ad9a77SSam Leffler 660310ad9a77SSam Leffler if (tsfdelta < 0) { 660410ad9a77SSam Leffler TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0); 660510ad9a77SSam Leffler TDMA_SAMPLE(sc->sc_avgtsfdeltam, -tsfdelta); 660610ad9a77SSam Leffler tsfdelta = -tsfdelta % 1024; 660710ad9a77SSam Leffler nextslottu++; 660810ad9a77SSam Leffler } else if (tsfdelta > 0) { 660910ad9a77SSam Leffler TDMA_SAMPLE(sc->sc_avgtsfdeltap, tsfdelta); 661010ad9a77SSam Leffler TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0); 661110ad9a77SSam Leffler tsfdelta = 1024 - (tsfdelta % 1024); 661210ad9a77SSam Leffler nextslottu++; 661310ad9a77SSam Leffler } else { 661410ad9a77SSam Leffler TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0); 661510ad9a77SSam Leffler TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0); 661610ad9a77SSam Leffler } 6617fc4de9b7SAdrian Chadd tudelta = nextslottu - TSF_TO_TU(nexttbtt >> 32, nexttbtt); 661810ad9a77SSam Leffler 661910ad9a77SSam Leffler /* 662010ad9a77SSam Leffler * Copy sender's timetstamp into tdma ie so they can 662110ad9a77SSam Leffler * calculate roundtrip time. We submit a beacon frame 662210ad9a77SSam Leffler * below after any timer adjustment. The frame goes out 662310ad9a77SSam Leffler * at the next TBTT so the sender can calculate the 662410ad9a77SSam Leffler * roundtrip by inspecting the tdma ie in our beacon frame. 662510ad9a77SSam Leffler * 662610ad9a77SSam Leffler * NB: This tstamp is subtlely preserved when 662710ad9a77SSam Leffler * IEEE80211_BEACON_TDMA is marked (e.g. when the 662810ad9a77SSam Leffler * slot position changes) because ieee80211_add_tdma 662910ad9a77SSam Leffler * skips over the data. 663010ad9a77SSam Leffler */ 663110ad9a77SSam Leffler memcpy(ATH_VAP(vap)->av_boff.bo_tdma + 663210ad9a77SSam Leffler __offsetof(struct ieee80211_tdma_param, tdma_tstamp), 663310ad9a77SSam Leffler &ni->ni_tstamp.data, 8); 663410ad9a77SSam Leffler #if 0 663510ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_TDMA_TIMER, 6636fc4de9b7SAdrian Chadd "tsf %llu nextslot %llu (%d, %d) nextslottu %u nexttbtt %llu (%d)\n", 663710ad9a77SSam Leffler (unsigned long long) tsf, (unsigned long long) nextslot, 6638fc4de9b7SAdrian Chadd (int)(nextslot - tsf), tsfdelta, nextslottu, nexttbtt, tudelta); 663910ad9a77SSam Leffler #endif 664010ad9a77SSam Leffler /* 664110ad9a77SSam Leffler * Adjust the beacon timers only when pulling them forward 664210ad9a77SSam Leffler * or when going back by less than the beacon interval. 664310ad9a77SSam Leffler * Negative jumps larger than the beacon interval seem to 664410ad9a77SSam Leffler * cause the timers to stop and generally cause instability. 664510ad9a77SSam Leffler * This basically filters out jumps due to missed beacons. 664610ad9a77SSam Leffler */ 664710ad9a77SSam Leffler if (tudelta != 0 && (tudelta > 0 || -tudelta < sc->sc_tdmabintval)) { 664810ad9a77SSam Leffler ath_tdma_settimers(sc, nextslottu, sc->sc_tdmabintval); 664910ad9a77SSam Leffler sc->sc_stats.ast_tdma_timers++; 665010ad9a77SSam Leffler } 665110ad9a77SSam Leffler if (tsfdelta > 0) { 665210ad9a77SSam Leffler ath_hal_adjusttsf(ah, tsfdelta); 665310ad9a77SSam Leffler sc->sc_stats.ast_tdma_tsf++; 665410ad9a77SSam Leffler } 665510ad9a77SSam Leffler ath_tdma_beacon_send(sc, vap); /* prepare response */ 665610ad9a77SSam Leffler #undef TU_TO_TSF 665710ad9a77SSam Leffler #undef TSF_TO_TU 665810ad9a77SSam Leffler } 665910ad9a77SSam Leffler 666010ad9a77SSam Leffler /* 666110ad9a77SSam Leffler * Transmit a beacon frame at SWBA. Dynamic updates 666210ad9a77SSam Leffler * to the frame contents are done as needed. 666310ad9a77SSam Leffler */ 666410ad9a77SSam Leffler static void 666510ad9a77SSam Leffler ath_tdma_beacon_send(struct ath_softc *sc, struct ieee80211vap *vap) 666610ad9a77SSam Leffler { 666710ad9a77SSam Leffler struct ath_hal *ah = sc->sc_ah; 666810ad9a77SSam Leffler struct ath_buf *bf; 666910ad9a77SSam Leffler int otherant; 667010ad9a77SSam Leffler 667110ad9a77SSam Leffler /* 667210ad9a77SSam Leffler * Check if the previous beacon has gone out. If 667310ad9a77SSam Leffler * not don't try to post another, skip this period 667410ad9a77SSam Leffler * and wait for the next. Missed beacons indicate 667510ad9a77SSam Leffler * a problem and should not occur. If we miss too 667610ad9a77SSam Leffler * many consecutive beacons reset the device. 667710ad9a77SSam Leffler */ 667810ad9a77SSam Leffler if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { 667910ad9a77SSam Leffler sc->sc_bmisscount++; 668010ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 668110ad9a77SSam Leffler "%s: missed %u consecutive beacons\n", 668210ad9a77SSam Leffler __func__, sc->sc_bmisscount); 6683a32ac9d3SSam Leffler if (sc->sc_bmisscount >= ath_bstuck_threshold) 668410ad9a77SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask); 668510ad9a77SSam Leffler return; 668610ad9a77SSam Leffler } 668710ad9a77SSam Leffler if (sc->sc_bmisscount != 0) { 668810ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 668910ad9a77SSam Leffler "%s: resume beacon xmit after %u misses\n", 669010ad9a77SSam Leffler __func__, sc->sc_bmisscount); 669110ad9a77SSam Leffler sc->sc_bmisscount = 0; 669210ad9a77SSam Leffler } 669310ad9a77SSam Leffler 669410ad9a77SSam Leffler /* 669510ad9a77SSam Leffler * Check recent per-antenna transmit statistics and flip 669610ad9a77SSam Leffler * the default antenna if noticeably more frames went out 669710ad9a77SSam Leffler * on the non-default antenna. 669810ad9a77SSam Leffler * XXX assumes 2 anntenae 669910ad9a77SSam Leffler */ 670010ad9a77SSam Leffler if (!sc->sc_diversity) { 670110ad9a77SSam Leffler otherant = sc->sc_defant & 1 ? 2 : 1; 670210ad9a77SSam Leffler if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) 670310ad9a77SSam Leffler ath_setdefantenna(sc, otherant); 670410ad9a77SSam Leffler sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; 670510ad9a77SSam Leffler } 670610ad9a77SSam Leffler 670710ad9a77SSam Leffler bf = ath_beacon_generate(sc, vap); 670810ad9a77SSam Leffler if (bf != NULL) { 670910ad9a77SSam Leffler /* 671010ad9a77SSam Leffler * Stop any current dma and put the new frame on the queue. 671110ad9a77SSam Leffler * This should never fail since we check above that no frames 671210ad9a77SSam Leffler * are still pending on the queue. 671310ad9a77SSam Leffler */ 671410ad9a77SSam Leffler if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { 671510ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, 671610ad9a77SSam Leffler "%s: beacon queue %u did not stop?\n", 671710ad9a77SSam Leffler __func__, sc->sc_bhalq); 671810ad9a77SSam Leffler /* NB: the HAL still stops DMA, so proceed */ 671910ad9a77SSam Leffler } 672010ad9a77SSam Leffler ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); 672110ad9a77SSam Leffler ath_hal_txstart(ah, sc->sc_bhalq); 672210ad9a77SSam Leffler 672310ad9a77SSam Leffler sc->sc_stats.ast_be_xmit++; /* XXX per-vap? */ 672410ad9a77SSam Leffler 672510ad9a77SSam Leffler /* 672610ad9a77SSam Leffler * Record local TSF for our last send for use 672710ad9a77SSam Leffler * in arbitrating slot collisions. 672810ad9a77SSam Leffler */ 672980767531SAdrian Chadd /* XXX should take a locked ref to iv_bss */ 673010ad9a77SSam Leffler vap->iv_bss->ni_tstamp.tsf = ath_hal_gettsf64(ah); 673110ad9a77SSam Leffler } 673210ad9a77SSam Leffler } 6733584f7327SSam Leffler #endif /* IEEE80211_SUPPORT_TDMA */ 6734e8dabfbeSAdrian Chadd 673548237774SAdrian Chadd static void 673648237774SAdrian Chadd ath_dfs_tasklet(void *p, int npending) 673748237774SAdrian Chadd { 673848237774SAdrian Chadd struct ath_softc *sc = (struct ath_softc *) p; 673948237774SAdrian Chadd struct ifnet *ifp = sc->sc_ifp; 674048237774SAdrian Chadd struct ieee80211com *ic = ifp->if_l2com; 674148237774SAdrian Chadd 674248237774SAdrian Chadd /* 674348237774SAdrian Chadd * If previous processing has found a radar event, 674448237774SAdrian Chadd * signal this to the net80211 layer to begin DFS 674548237774SAdrian Chadd * processing. 674648237774SAdrian Chadd */ 674748237774SAdrian Chadd if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { 674848237774SAdrian Chadd /* DFS event found, initiate channel change */ 674906fc4a10SAdrian Chadd /* 675006fc4a10SAdrian Chadd * XXX doesn't currently tell us whether the event 675106fc4a10SAdrian Chadd * XXX was found in the primary or extension 675206fc4a10SAdrian Chadd * XXX channel! 675306fc4a10SAdrian Chadd */ 675406fc4a10SAdrian Chadd IEEE80211_LOCK(ic); 675548237774SAdrian Chadd ieee80211_dfs_notify_radar(ic, sc->sc_curchan); 675606fc4a10SAdrian Chadd IEEE80211_UNLOCK(ic); 675748237774SAdrian Chadd } 675848237774SAdrian Chadd } 675948237774SAdrian Chadd 6760dba9c859SAdrian Chadd MODULE_VERSION(if_ath, 1); 6761dba9c859SAdrian Chadd MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */ 6762