15591b213SSam Leffler /*- 210ad9a77SSam Leffler * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 35591b213SSam Leffler * All rights reserved. 45591b213SSam Leffler * 55591b213SSam Leffler * Redistribution and use in source and binary forms, with or without 65591b213SSam Leffler * modification, are permitted provided that the following conditions 75591b213SSam Leffler * are met: 85591b213SSam Leffler * 1. Redistributions of source code must retain the above copyright 95591b213SSam Leffler * notice, this list of conditions and the following disclaimer, 105591b213SSam Leffler * without modification. 115591b213SSam Leffler * 2. Redistributions in binary form must reproduce at minimum a disclaimer 125591b213SSam Leffler * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 135591b213SSam Leffler * redistribution must be conditioned upon including a substantially 145591b213SSam Leffler * similar Disclaimer requirement for further binary redistribution. 155591b213SSam Leffler * 165591b213SSam Leffler * NO WARRANTY 175591b213SSam Leffler * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 185591b213SSam Leffler * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 195591b213SSam Leffler * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 205591b213SSam Leffler * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 215591b213SSam Leffler * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 225591b213SSam Leffler * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 235591b213SSam Leffler * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 245591b213SSam Leffler * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 255591b213SSam Leffler * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 265591b213SSam Leffler * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 275591b213SSam Leffler * THE POSSIBILITY OF SUCH DAMAGES. 285591b213SSam Leffler */ 295591b213SSam Leffler 305591b213SSam Leffler #include <sys/cdefs.h> 315591b213SSam Leffler __FBSDID("$FreeBSD$"); 325591b213SSam Leffler 335591b213SSam Leffler /* 345591b213SSam Leffler * Driver for the Atheros Wireless LAN controller. 355f3721d5SSam Leffler * 365f3721d5SSam Leffler * This software is derived from work of Atsushi Onoe; his contribution 375f3721d5SSam Leffler * is greatly appreciated. 385591b213SSam Leffler */ 395591b213SSam Leffler 405591b213SSam Leffler #include "opt_inet.h" 41a585a9a1SSam Leffler #include "opt_ath.h" 423f3087fdSAdrian Chadd /* 433f3087fdSAdrian Chadd * This is needed for register operations which are performed 443f3087fdSAdrian Chadd * by the driver - eg, calls to ath_hal_gettsf32(). 453f3087fdSAdrian Chadd */ 463f3087fdSAdrian Chadd #include "opt_ah.h" 47584f7327SSam Leffler #include "opt_wlan.h" 485591b213SSam Leffler 495591b213SSam Leffler #include <sys/param.h> 505591b213SSam Leffler #include <sys/systm.h> 515591b213SSam Leffler #include <sys/sysctl.h> 525591b213SSam Leffler #include <sys/mbuf.h> 535591b213SSam Leffler #include <sys/malloc.h> 545591b213SSam Leffler #include <sys/lock.h> 555591b213SSam Leffler #include <sys/mutex.h> 565591b213SSam Leffler #include <sys/kernel.h> 575591b213SSam Leffler #include <sys/socket.h> 585591b213SSam Leffler #include <sys/sockio.h> 595591b213SSam Leffler #include <sys/errno.h> 605591b213SSam Leffler #include <sys/callout.h> 615591b213SSam Leffler #include <sys/bus.h> 625591b213SSam Leffler #include <sys/endian.h> 630bbf5441SSam Leffler #include <sys/kthread.h> 640bbf5441SSam Leffler #include <sys/taskqueue.h> 653fc21fedSSam Leffler #include <sys/priv.h> 66dba9c859SAdrian Chadd #include <sys/module.h> 67f52d3452SAdrian Chadd #include <sys/ktr.h> 68ddbe3036SAdrian Chadd #include <sys/smp.h> /* for mp_ncpus */ 695591b213SSam Leffler 705591b213SSam Leffler #include <machine/bus.h> 715591b213SSam Leffler 725591b213SSam Leffler #include <net/if.h> 735591b213SSam Leffler #include <net/if_dl.h> 745591b213SSam Leffler #include <net/if_media.h> 75fc74a9f9SBrooks Davis #include <net/if_types.h> 765591b213SSam Leffler #include <net/if_arp.h> 775591b213SSam Leffler #include <net/ethernet.h> 785591b213SSam Leffler #include <net/if_llc.h> 795591b213SSam Leffler 805591b213SSam Leffler #include <net80211/ieee80211_var.h> 8159efa8b5SSam Leffler #include <net80211/ieee80211_regdomain.h> 82339ccfb3SSam Leffler #ifdef IEEE80211_SUPPORT_SUPERG 83339ccfb3SSam Leffler #include <net80211/ieee80211_superg.h> 84339ccfb3SSam Leffler #endif 85584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 8610ad9a77SSam Leffler #include <net80211/ieee80211_tdma.h> 8710ad9a77SSam Leffler #endif 885591b213SSam Leffler 895591b213SSam Leffler #include <net/bpf.h> 905591b213SSam Leffler 915591b213SSam Leffler #ifdef INET 925591b213SSam Leffler #include <netinet/in.h> 935591b213SSam Leffler #include <netinet/if_ether.h> 945591b213SSam Leffler #endif 955591b213SSam Leffler 965591b213SSam Leffler #include <dev/ath/if_athvar.h> 9733644623SSam Leffler #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 980dbe9289SAdrian Chadd #include <dev/ath/ath_hal/ah_diagcodes.h> 995591b213SSam Leffler 1005bc8125aSAdrian Chadd #include <dev/ath/if_ath_debug.h> 101b8e788a5SAdrian Chadd #include <dev/ath/if_ath_misc.h> 102b8e788a5SAdrian Chadd #include <dev/ath/if_ath_tx.h> 1036079fdbeSAdrian Chadd #include <dev/ath/if_ath_sysctl.h> 104c65ee21dSAdrian Chadd #include <dev/ath/if_ath_led.h> 105d2d7a00aSAdrian Chadd #include <dev/ath/if_ath_keycache.h> 10648237774SAdrian Chadd #include <dev/ath/if_athdfs.h> 1075bc8125aSAdrian Chadd 10886e07743SSam Leffler #ifdef ATH_TX99_DIAG 10986e07743SSam Leffler #include <dev/ath/ath_tx99/ath_tx99.h> 11086e07743SSam Leffler #endif 11186e07743SSam Leffler 112f52d3452SAdrian Chadd #define ATH_KTR_INTR KTR_SPARE4 113f52d3452SAdrian Chadd #define ATH_KTR_ERR KTR_SPARE3 11448237774SAdrian Chadd 115b032f27cSSam Leffler /* 116b032f27cSSam Leffler * ATH_BCBUF determines the number of vap's that can transmit 117b032f27cSSam Leffler * beacons and also (currently) the number of vap's that can 118b032f27cSSam Leffler * have unique mac addresses/bssid. When staggering beacons 119b032f27cSSam Leffler * 4 is probably a good max as otherwise the beacons become 120b032f27cSSam Leffler * very closely spaced and there is limited time for cab q traffic 121b032f27cSSam Leffler * to go out. You can burst beacons instead but that is not good 122b032f27cSSam Leffler * for stations in power save and at some point you really want 123b032f27cSSam Leffler * another radio (and channel). 124b032f27cSSam Leffler * 125b032f27cSSam Leffler * The limit on the number of mac addresses is tied to our use of 126b032f27cSSam Leffler * the U/L bit and tracking addresses in a byte; it would be 127b032f27cSSam Leffler * worthwhile to allow more for applications like proxy sta. 128b032f27cSSam Leffler */ 129b032f27cSSam Leffler CTASSERT(ATH_BCBUF <= 8); 130b032f27cSSam Leffler 131b032f27cSSam Leffler static struct ieee80211vap *ath_vap_create(struct ieee80211com *, 132fcd9500fSBernhard Schmidt const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 133fcd9500fSBernhard Schmidt const uint8_t [IEEE80211_ADDR_LEN], 134fcd9500fSBernhard Schmidt const uint8_t [IEEE80211_ADDR_LEN]); 135b032f27cSSam Leffler static void ath_vap_delete(struct ieee80211vap *); 1365591b213SSam Leffler static void ath_init(void *); 137c42a7b7eSSam Leffler static void ath_stop_locked(struct ifnet *); 1385591b213SSam Leffler static void ath_stop(struct ifnet *); 1395591b213SSam Leffler static void ath_start(struct ifnet *); 140b032f27cSSam Leffler static int ath_reset_vap(struct ieee80211vap *, u_long); 1415591b213SSam Leffler static int ath_media_change(struct ifnet *); 1422e986da5SSam Leffler static void ath_watchdog(void *); 1435591b213SSam Leffler static int ath_ioctl(struct ifnet *, u_long, caddr_t); 1445591b213SSam Leffler static void ath_fatal_proc(void *, int); 145b032f27cSSam Leffler static void ath_bmiss_vap(struct ieee80211vap *); 1465591b213SSam Leffler static void ath_bmiss_proc(void *, int); 147b032f27cSSam Leffler static void ath_key_update_begin(struct ieee80211vap *); 148b032f27cSSam Leffler static void ath_key_update_end(struct ieee80211vap *); 149b032f27cSSam Leffler static void ath_update_mcast(struct ifnet *); 150b032f27cSSam Leffler static void ath_update_promisc(struct ifnet *); 1515591b213SSam Leffler static void ath_mode_init(struct ath_softc *); 152c42a7b7eSSam Leffler static void ath_setslottime(struct ath_softc *); 153c42a7b7eSSam Leffler static void ath_updateslot(struct ifnet *); 15480d2765fSSam Leffler static int ath_beaconq_setup(struct ath_hal *); 1555591b213SSam Leffler static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); 156b032f27cSSam Leffler static void ath_beacon_update(struct ieee80211vap *, int item); 157c42a7b7eSSam Leffler static void ath_beacon_setup(struct ath_softc *, struct ath_buf *); 1585591b213SSam Leffler static void ath_beacon_proc(void *, int); 159b032f27cSSam Leffler static struct ath_buf *ath_beacon_generate(struct ath_softc *, 160b032f27cSSam Leffler struct ieee80211vap *); 161c42a7b7eSSam Leffler static void ath_bstuck_proc(void *, int); 162b032f27cSSam Leffler static void ath_beacon_return(struct ath_softc *, struct ath_buf *); 1635591b213SSam Leffler static void ath_beacon_free(struct ath_softc *); 164b032f27cSSam Leffler static void ath_beacon_config(struct ath_softc *, struct ieee80211vap *); 165c42a7b7eSSam Leffler static void ath_descdma_cleanup(struct ath_softc *sc, 166c42a7b7eSSam Leffler struct ath_descdma *, ath_bufhead *); 1675591b213SSam Leffler static int ath_desc_alloc(struct ath_softc *); 1685591b213SSam Leffler static void ath_desc_free(struct ath_softc *); 16938c208f8SSam Leffler static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, 17038c208f8SSam Leffler const uint8_t [IEEE80211_ADDR_LEN]); 1714afa805eSAdrian Chadd static void ath_node_cleanup(struct ieee80211_node *); 172c42a7b7eSSam Leffler static void ath_node_free(struct ieee80211_node *); 17368e8e04eSSam Leffler static void ath_node_getsignal(const struct ieee80211_node *, 17468e8e04eSSam Leffler int8_t *, int8_t *); 1755591b213SSam Leffler static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); 176b032f27cSSam Leffler static void ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, 1775463c4a4SSam Leffler int subtype, int rssi, int nf); 178c42a7b7eSSam Leffler static void ath_setdefantenna(struct ath_softc *, u_int); 17996ff485dSAdrian Chadd static void ath_rx_proc(struct ath_softc *sc, int); 18096ff485dSAdrian Chadd static void ath_rx_tasklet(void *, int); 181622b3fd2SSam Leffler static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 182c42a7b7eSSam Leffler static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 183c42a7b7eSSam Leffler static int ath_tx_setup(struct ath_softc *, int, int); 184c42a7b7eSSam Leffler static int ath_wme_update(struct ieee80211com *); 185c42a7b7eSSam Leffler static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 186c42a7b7eSSam Leffler static void ath_tx_cleanup(struct ath_softc *); 187c42a7b7eSSam Leffler static void ath_tx_proc_q0(void *, int); 188c42a7b7eSSam Leffler static void ath_tx_proc_q0123(void *, int); 1895591b213SSam Leffler static void ath_tx_proc(void *, int); 1905591b213SSam Leffler static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 191517526efSAdrian Chadd static void ath_draintxq(struct ath_softc *, ATH_RESET_TYPE reset_type); 1929a842e8bSAdrian Chadd static void ath_stoprecv(struct ath_softc *, int); 1935591b213SSam Leffler static int ath_startrecv(struct ath_softc *); 194c42a7b7eSSam Leffler static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 19568e8e04eSSam Leffler static void ath_scan_start(struct ieee80211com *); 19668e8e04eSSam Leffler static void ath_scan_end(struct ieee80211com *); 19768e8e04eSSam Leffler static void ath_set_channel(struct ieee80211com *); 1985591b213SSam Leffler static void ath_calibrate(void *); 199b032f27cSSam Leffler static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); 200e8fd88a3SSam Leffler static void ath_setup_stationkey(struct ieee80211_node *); 201e9962332SSam Leffler static void ath_newassoc(struct ieee80211_node *, int); 202b032f27cSSam Leffler static int ath_setregdomain(struct ieee80211com *, 203b032f27cSSam Leffler struct ieee80211_regdomain *, int, 204b032f27cSSam Leffler struct ieee80211_channel []); 2055fe9f044SSam Leffler static void ath_getradiocaps(struct ieee80211com *, int, int *, 206b032f27cSSam Leffler struct ieee80211_channel []); 207b032f27cSSam Leffler static int ath_getchannels(struct ath_softc *); 2085591b213SSam Leffler 209c42a7b7eSSam Leffler static int ath_rate_setup(struct ath_softc *, u_int mode); 2105591b213SSam Leffler static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 211c42a7b7eSSam Leffler 212c42a7b7eSSam Leffler static void ath_announce(struct ath_softc *); 2135591b213SSam Leffler 21448237774SAdrian Chadd static void ath_dfs_tasklet(void *, int); 21548237774SAdrian Chadd 216584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 21710ad9a77SSam Leffler static void ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, 21810ad9a77SSam Leffler u_int32_t bintval); 21910ad9a77SSam Leffler static void ath_tdma_bintvalsetup(struct ath_softc *sc, 22010ad9a77SSam Leffler const struct ieee80211_tdma_state *tdma); 22110ad9a77SSam Leffler static void ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap); 22210ad9a77SSam Leffler static void ath_tdma_update(struct ieee80211_node *ni, 2232bc3ce77SSam Leffler const struct ieee80211_tdma_param *tdma, int); 22410ad9a77SSam Leffler static void ath_tdma_beacon_send(struct ath_softc *sc, 22510ad9a77SSam Leffler struct ieee80211vap *vap); 22610ad9a77SSam Leffler 22710ad9a77SSam Leffler #define TDMA_EP_MULTIPLIER (1<<10) /* pow2 to optimize out * and / */ 22810ad9a77SSam Leffler #define TDMA_LPF_LEN 6 22910ad9a77SSam Leffler #define TDMA_DUMMY_MARKER 0x127 23010ad9a77SSam Leffler #define TDMA_EP_MUL(x, mul) ((x) * (mul)) 23110ad9a77SSam Leffler #define TDMA_IN(x) (TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER)) 23210ad9a77SSam Leffler #define TDMA_LPF(x, y, len) \ 23310ad9a77SSam Leffler ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y)) 23410ad9a77SSam Leffler #define TDMA_SAMPLE(x, y) do { \ 23510ad9a77SSam Leffler x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN); \ 23610ad9a77SSam Leffler } while (0) 23710ad9a77SSam Leffler #define TDMA_EP_RND(x,mul) \ 23810ad9a77SSam Leffler ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 23910ad9a77SSam Leffler #define TDMA_AVG(x) TDMA_EP_RND(x, TDMA_EP_MULTIPLIER) 240584f7327SSam Leffler #endif /* IEEE80211_SUPPORT_TDMA */ 24110ad9a77SSam Leffler 2425591b213SSam Leffler SYSCTL_DECL(_hw_ath); 2435591b213SSam Leffler 2445591b213SSam Leffler /* XXX validate sysctl values */ 2452dc7fcc4SSam Leffler static int ath_longcalinterval = 30; /* long cals every 30 secs */ 2462dc7fcc4SSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, 2472dc7fcc4SSam Leffler 0, "long chip calibration interval (secs)"); 2482dc7fcc4SSam Leffler static int ath_shortcalinterval = 100; /* short cals every 100 ms */ 2492dc7fcc4SSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, 2502dc7fcc4SSam Leffler 0, "short chip calibration interval (msecs)"); 2512dc7fcc4SSam Leffler static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ 2522dc7fcc4SSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, 2532dc7fcc4SSam Leffler 0, "reset chip calibration results (secs)"); 254a108ab63SAdrian Chadd static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ 255a108ab63SAdrian Chadd SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, 256a108ab63SAdrian Chadd 0, "ANI calibration (msecs)"); 2575591b213SSam Leffler 258e2d787faSSam Leffler static int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 259aaa70f2fSSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf, 260e2d787faSSam Leffler 0, "rx buffers allocated"); 261e2d787faSSam Leffler TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf); 262e2d787faSSam Leffler static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 263aaa70f2fSSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf, 264e2d787faSSam Leffler 0, "tx buffers allocated"); 265e2d787faSSam Leffler TUNABLE_INT("hw.ath.txbuf", &ath_txbuf); 266e2d787faSSam Leffler 267a32ac9d3SSam Leffler static int ath_bstuck_threshold = 4; /* max missed beacons */ 268a32ac9d3SSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, 269a32ac9d3SSam Leffler 0, "max missed beacon xmits before chip reset"); 270a32ac9d3SSam Leffler 2716b349e5aSAdrian Chadd MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 272c42a7b7eSSam Leffler 27367397d39SAdrian Chadd #define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) 27467397d39SAdrian Chadd #define HAL_MODE_HT40 \ 27567397d39SAdrian Chadd (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ 27667397d39SAdrian Chadd HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) 2775591b213SSam Leffler int 2785591b213SSam Leffler ath_attach(u_int16_t devid, struct ath_softc *sc) 2795591b213SSam Leffler { 280fc74a9f9SBrooks Davis struct ifnet *ifp; 281b032f27cSSam Leffler struct ieee80211com *ic; 282fc74a9f9SBrooks Davis struct ath_hal *ah = NULL; 2835591b213SSam Leffler HAL_STATUS status; 284c42a7b7eSSam Leffler int error = 0, i; 285411373ebSSam Leffler u_int wmodes; 28629aca940SSam Leffler uint8_t macaddr[IEEE80211_ADDR_LEN]; 2875591b213SSam Leffler 288c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 2895591b213SSam Leffler 290b032f27cSSam Leffler ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 291fc74a9f9SBrooks Davis if (ifp == NULL) { 292fc74a9f9SBrooks Davis device_printf(sc->sc_dev, "can not if_alloc()\n"); 293fc74a9f9SBrooks Davis error = ENOSPC; 294fc74a9f9SBrooks Davis goto bad; 295fc74a9f9SBrooks Davis } 296b032f27cSSam Leffler ic = ifp->if_l2com; 297fc74a9f9SBrooks Davis 2985591b213SSam Leffler /* set these up early for if_printf use */ 2999bf40edeSBrooks Davis if_initname(ifp, device_get_name(sc->sc_dev), 3009bf40edeSBrooks Davis device_get_unit(sc->sc_dev)); 3015591b213SSam Leffler 3027e97436bSAdrian Chadd ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 3037e97436bSAdrian Chadd sc->sc_eepromdata, &status); 3045591b213SSam Leffler if (ah == NULL) { 3055591b213SSam Leffler if_printf(ifp, "unable to attach hardware; HAL status %u\n", 3065591b213SSam Leffler status); 3075591b213SSam Leffler error = ENXIO; 3085591b213SSam Leffler goto bad; 3095591b213SSam Leffler } 3105591b213SSam Leffler sc->sc_ah = ah; 311b58b3803SSam Leffler sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 3123297be13SSam Leffler #ifdef ATH_DEBUG 3133297be13SSam Leffler sc->sc_debug = ath_debug; 3143297be13SSam Leffler #endif 3155591b213SSam Leffler 3165591b213SSam Leffler /* 317c42a7b7eSSam Leffler * Check if the MAC has multi-rate retry support. 318c42a7b7eSSam Leffler * We do this by trying to setup a fake extended 319c42a7b7eSSam Leffler * descriptor. MAC's that don't have support will 320c42a7b7eSSam Leffler * return false w/o doing anything. MAC's that do 321c42a7b7eSSam Leffler * support it will return true w/o doing anything. 322c42a7b7eSSam Leffler */ 323c42a7b7eSSam Leffler sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 324c42a7b7eSSam Leffler 325c42a7b7eSSam Leffler /* 326c42a7b7eSSam Leffler * Check if the device has hardware counters for PHY 327c42a7b7eSSam Leffler * errors. If so we need to enable the MIB interrupt 328c42a7b7eSSam Leffler * so we can act on stat triggers. 329c42a7b7eSSam Leffler */ 330c42a7b7eSSam Leffler if (ath_hal_hwphycounters(ah)) 331c42a7b7eSSam Leffler sc->sc_needmib = 1; 332c42a7b7eSSam Leffler 333c42a7b7eSSam Leffler /* 334c42a7b7eSSam Leffler * Get the hardware key cache size. 335c42a7b7eSSam Leffler */ 336c42a7b7eSSam Leffler sc->sc_keymax = ath_hal_keycachesize(ah); 337e8fd88a3SSam Leffler if (sc->sc_keymax > ATH_KEYMAX) { 338e8fd88a3SSam Leffler if_printf(ifp, "Warning, using only %u of %u key cache slots\n", 339e8fd88a3SSam Leffler ATH_KEYMAX, sc->sc_keymax); 340e8fd88a3SSam Leffler sc->sc_keymax = ATH_KEYMAX; 341c42a7b7eSSam Leffler } 342c42a7b7eSSam Leffler /* 343c42a7b7eSSam Leffler * Reset the key cache since some parts do not 344c42a7b7eSSam Leffler * reset the contents on initial power up. 345c42a7b7eSSam Leffler */ 346c42a7b7eSSam Leffler for (i = 0; i < sc->sc_keymax; i++) 347c42a7b7eSSam Leffler ath_hal_keyreset(ah, i); 348c42a7b7eSSam Leffler 349c42a7b7eSSam Leffler /* 350b032f27cSSam Leffler * Collect the default channel list. 3515591b213SSam Leffler */ 352b032f27cSSam Leffler error = ath_getchannels(sc); 3535591b213SSam Leffler if (error != 0) 3545591b213SSam Leffler goto bad; 3555591b213SSam Leffler 3565591b213SSam Leffler /* 3575591b213SSam Leffler * Setup rate tables for all potential media types. 3585591b213SSam Leffler */ 3595591b213SSam Leffler ath_rate_setup(sc, IEEE80211_MODE_11A); 3605591b213SSam Leffler ath_rate_setup(sc, IEEE80211_MODE_11B); 3615591b213SSam Leffler ath_rate_setup(sc, IEEE80211_MODE_11G); 362c42a7b7eSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 363c42a7b7eSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 36468e8e04eSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); 36568e8e04eSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_11NA); 36668e8e04eSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_11NG); 367724c193aSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_HALF); 368724c193aSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 369aaa70f2fSSam Leffler 370c42a7b7eSSam Leffler /* NB: setup here so ath_rate_update is happy */ 371c42a7b7eSSam Leffler ath_setcurmode(sc, IEEE80211_MODE_11A); 3725591b213SSam Leffler 373c42a7b7eSSam Leffler /* 374c42a7b7eSSam Leffler * Allocate tx+rx descriptors and populate the lists. 375c42a7b7eSSam Leffler */ 3765591b213SSam Leffler error = ath_desc_alloc(sc); 3775591b213SSam Leffler if (error != 0) { 3785591b213SSam Leffler if_printf(ifp, "failed to allocate descriptors: %d\n", error); 3795591b213SSam Leffler goto bad; 3805591b213SSam Leffler } 3812e986da5SSam Leffler callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0); 3822e986da5SSam Leffler callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0); 3835591b213SSam Leffler 384f0b2a0beSSam Leffler ATH_TXBUF_LOCK_INIT(sc); 3855591b213SSam Leffler 3860bbf5441SSam Leffler sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, 3870bbf5441SSam Leffler taskqueue_thread_enqueue, &sc->sc_tq); 3880bbf5441SSam Leffler taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, 3890bbf5441SSam Leffler "%s taskq", ifp->if_xname); 3900bbf5441SSam Leffler 39196ff485dSAdrian Chadd TASK_INIT(&sc->sc_rxtask, 0, ath_rx_tasklet, sc); 3925591b213SSam Leffler TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 393c42a7b7eSSam Leffler TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 3945591b213SSam Leffler 3955591b213SSam Leffler /* 396c42a7b7eSSam Leffler * Allocate hardware transmit queues: one queue for 397c42a7b7eSSam Leffler * beacon frames and one data queue for each QoS 3984fa8d4efSDaniel Eischen * priority. Note that the hal handles resetting 399c42a7b7eSSam Leffler * these queues at the needed time. 400c42a7b7eSSam Leffler * 401c42a7b7eSSam Leffler * XXX PS-Poll 4025591b213SSam Leffler */ 40380d2765fSSam Leffler sc->sc_bhalq = ath_beaconq_setup(ah); 4045591b213SSam Leffler if (sc->sc_bhalq == (u_int) -1) { 4055591b213SSam Leffler if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 406c42a7b7eSSam Leffler error = EIO; 407b28b4653SSam Leffler goto bad2; 4085591b213SSam Leffler } 409c42a7b7eSSam Leffler sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 410c42a7b7eSSam Leffler if (sc->sc_cabq == NULL) { 411c42a7b7eSSam Leffler if_printf(ifp, "unable to setup CAB xmit queue!\n"); 412c42a7b7eSSam Leffler error = EIO; 413c42a7b7eSSam Leffler goto bad2; 414c42a7b7eSSam Leffler } 415c42a7b7eSSam Leffler /* NB: insure BK queue is the lowest priority h/w queue */ 416c42a7b7eSSam Leffler if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 417c42a7b7eSSam Leffler if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 418c42a7b7eSSam Leffler ieee80211_wme_acnames[WME_AC_BK]); 419c42a7b7eSSam Leffler error = EIO; 420c42a7b7eSSam Leffler goto bad2; 421c42a7b7eSSam Leffler } 422c42a7b7eSSam Leffler if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 423c42a7b7eSSam Leffler !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 424c42a7b7eSSam Leffler !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 425c42a7b7eSSam Leffler /* 426c42a7b7eSSam Leffler * Not enough hardware tx queues to properly do WME; 427c42a7b7eSSam Leffler * just punt and assign them all to the same h/w queue. 428c42a7b7eSSam Leffler * We could do a better job of this if, for example, 429c42a7b7eSSam Leffler * we allocate queues when we switch from station to 430c42a7b7eSSam Leffler * AP mode. 431c42a7b7eSSam Leffler */ 432c42a7b7eSSam Leffler if (sc->sc_ac2q[WME_AC_VI] != NULL) 433c42a7b7eSSam Leffler ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 434c42a7b7eSSam Leffler if (sc->sc_ac2q[WME_AC_BE] != NULL) 435c42a7b7eSSam Leffler ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 436c42a7b7eSSam Leffler sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 437c42a7b7eSSam Leffler sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 438c42a7b7eSSam Leffler sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 439c42a7b7eSSam Leffler } 440c42a7b7eSSam Leffler 441c42a7b7eSSam Leffler /* 442c42a7b7eSSam Leffler * Special case certain configurations. Note the 443c42a7b7eSSam Leffler * CAB queue is handled by these specially so don't 444c42a7b7eSSam Leffler * include them when checking the txq setup mask. 445c42a7b7eSSam Leffler */ 446c42a7b7eSSam Leffler switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 447c42a7b7eSSam Leffler case 0x01: 448c42a7b7eSSam Leffler TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 449c42a7b7eSSam Leffler break; 450c42a7b7eSSam Leffler case 0x0f: 451c42a7b7eSSam Leffler TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 452c42a7b7eSSam Leffler break; 453c42a7b7eSSam Leffler default: 454c42a7b7eSSam Leffler TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 455c42a7b7eSSam Leffler break; 456c42a7b7eSSam Leffler } 457c42a7b7eSSam Leffler 458c42a7b7eSSam Leffler /* 459c42a7b7eSSam Leffler * Setup rate control. Some rate control modules 460c42a7b7eSSam Leffler * call back to change the anntena state so expose 461c42a7b7eSSam Leffler * the necessary entry points. 462c42a7b7eSSam Leffler * XXX maybe belongs in struct ath_ratectrl? 463c42a7b7eSSam Leffler */ 464c42a7b7eSSam Leffler sc->sc_setdefantenna = ath_setdefantenna; 465c42a7b7eSSam Leffler sc->sc_rc = ath_rate_attach(sc); 466c42a7b7eSSam Leffler if (sc->sc_rc == NULL) { 467c42a7b7eSSam Leffler error = EIO; 468c42a7b7eSSam Leffler goto bad2; 469c42a7b7eSSam Leffler } 470c42a7b7eSSam Leffler 47148237774SAdrian Chadd /* Attach DFS module */ 47248237774SAdrian Chadd if (! ath_dfs_attach(sc)) { 4737e97436bSAdrian Chadd device_printf(sc->sc_dev, 4747e97436bSAdrian Chadd "%s: unable to attach DFS\n", __func__); 47548237774SAdrian Chadd error = EIO; 47648237774SAdrian Chadd goto bad2; 47748237774SAdrian Chadd } 47848237774SAdrian Chadd 47948237774SAdrian Chadd /* Start DFS processing tasklet */ 48048237774SAdrian Chadd TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); 48148237774SAdrian Chadd 4823440495aSAdrian Chadd /* Configure LED state */ 4833e50ec2cSSam Leffler sc->sc_blinking = 0; 484c42a7b7eSSam Leffler sc->sc_ledstate = 1; 4853e50ec2cSSam Leffler sc->sc_ledon = 0; /* low true */ 4863e50ec2cSSam Leffler sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 4873e50ec2cSSam Leffler callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE); 4883440495aSAdrian Chadd 4893440495aSAdrian Chadd /* 4903440495aSAdrian Chadd * Don't setup hardware-based blinking. 4913440495aSAdrian Chadd * 4923440495aSAdrian Chadd * Although some NICs may have this configured in the 4933440495aSAdrian Chadd * default reset register values, the user may wish 4943440495aSAdrian Chadd * to alter which pins have which function. 4953440495aSAdrian Chadd * 4963440495aSAdrian Chadd * The reference driver attaches the MAC network LED to GPIO1 and 4973440495aSAdrian Chadd * the MAC power LED to GPIO2. However, the DWA-552 cardbus 4983440495aSAdrian Chadd * NIC has these reversed. 4993440495aSAdrian Chadd */ 5003440495aSAdrian Chadd sc->sc_hardled = (1 == 0); 5013440495aSAdrian Chadd sc->sc_led_net_pin = -1; 5023440495aSAdrian Chadd sc->sc_led_pwr_pin = -1; 503c42a7b7eSSam Leffler /* 504c42a7b7eSSam Leffler * Auto-enable soft led processing for IBM cards and for 505c42a7b7eSSam Leffler * 5211 minipci cards. Users can also manually enable/disable 506c42a7b7eSSam Leffler * support with a sysctl. 507c42a7b7eSSam Leffler */ 508c42a7b7eSSam Leffler sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 5096558ffd9SAdrian Chadd ath_led_config(sc); 510a497cd88SAdrian Chadd ath_hal_setledstate(ah, HAL_LED_INIT); 5115591b213SSam Leffler 5125591b213SSam Leffler ifp->if_softc = sc; 5135591b213SSam Leffler ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 5145591b213SSam Leffler ifp->if_start = ath_start; 5155591b213SSam Leffler ifp->if_ioctl = ath_ioctl; 5165591b213SSam Leffler ifp->if_init = ath_init; 517e50d35e6SMaxim Sobolev IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 518e50d35e6SMaxim Sobolev ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 519154b8df2SMax Laier IFQ_SET_READY(&ifp->if_snd); 5205591b213SSam Leffler 521c42a7b7eSSam Leffler ic->ic_ifp = ifp; 5225591b213SSam Leffler /* XXX not right but it's not used anywhere important */ 5235591b213SSam Leffler ic->ic_phytype = IEEE80211_T_OFDM; 5245591b213SSam Leffler ic->ic_opmode = IEEE80211_M_STA; 525c42a7b7eSSam Leffler ic->ic_caps = 526c43feedeSSam Leffler IEEE80211_C_STA /* station mode */ 527c43feedeSSam Leffler | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 528fe32c3efSSam Leffler | IEEE80211_C_HOSTAP /* hostap mode */ 529fe32c3efSSam Leffler | IEEE80211_C_MONITOR /* monitor mode */ 5307a04dc27SSam Leffler | IEEE80211_C_AHDEMO /* adhoc demo mode */ 531b032f27cSSam Leffler | IEEE80211_C_WDS /* 4-address traffic works */ 53259aa14a9SRui Paulo | IEEE80211_C_MBSS /* mesh point link mode */ 533fe32c3efSSam Leffler | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 534c42a7b7eSSam Leffler | IEEE80211_C_SHSLOT /* short slot time supported */ 535c42a7b7eSSam Leffler | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 53668e8e04eSSam Leffler | IEEE80211_C_BGSCAN /* capable of bg scanning */ 53768e8e04eSSam Leffler | IEEE80211_C_TXFRAG /* handle tx frags */ 53810dc8de4SAdrian Chadd #ifdef ATH_ENABLE_DFS 5397e97436bSAdrian Chadd | IEEE80211_C_DFS /* Enable radar detection */ 54010dc8de4SAdrian Chadd #endif 54101e7e035SSam Leffler ; 542c42a7b7eSSam Leffler /* 543c42a7b7eSSam Leffler * Query the hal to figure out h/w crypto support. 544c42a7b7eSSam Leffler */ 545c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 546b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; 547c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 548b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; 549c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 550b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 551c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 552b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; 553c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 554b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; 555c42a7b7eSSam Leffler /* 556c42a7b7eSSam Leffler * Check if h/w does the MIC and/or whether the 557c42a7b7eSSam Leffler * separate key cache entries are required to 558c42a7b7eSSam Leffler * handle both tx+rx MIC keys. 559c42a7b7eSSam Leffler */ 560c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 561b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 5625901d2d3SSam Leffler /* 5635901d2d3SSam Leffler * If the h/w supports storing tx+rx MIC keys 5645901d2d3SSam Leffler * in one cache slot automatically enable use. 5655901d2d3SSam Leffler */ 5665901d2d3SSam Leffler if (ath_hal_hastkipsplit(ah) || 5675901d2d3SSam Leffler !ath_hal_settkipsplit(ah, AH_FALSE)) 568c42a7b7eSSam Leffler sc->sc_splitmic = 1; 569b032f27cSSam Leffler /* 570b032f27cSSam Leffler * If the h/w can do TKIP MIC together with WME then 571b032f27cSSam Leffler * we use it; otherwise we force the MIC to be done 572b032f27cSSam Leffler * in software by the net80211 layer. 573b032f27cSSam Leffler */ 574b032f27cSSam Leffler if (ath_hal_haswmetkipmic(ah)) 575b032f27cSSam Leffler sc->sc_wmetkipmic = 1; 576c42a7b7eSSam Leffler } 577e8fd88a3SSam Leffler sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 5789ac01d39SRui Paulo /* 5791ac5dac2SRui Paulo * Check for multicast key search support. 5809ac01d39SRui Paulo */ 5819ac01d39SRui Paulo if (ath_hal_hasmcastkeysearch(sc->sc_ah) && 5829ac01d39SRui Paulo !ath_hal_getmcastkeysearch(sc->sc_ah)) { 5839ac01d39SRui Paulo ath_hal_setmcastkeysearch(sc->sc_ah, 1); 5849ac01d39SRui Paulo } 585e8fd88a3SSam Leffler sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 586c42a7b7eSSam Leffler /* 5875901d2d3SSam Leffler * Mark key cache slots associated with global keys 5885901d2d3SSam Leffler * as in use. If we knew TKIP was not to be used we 5895901d2d3SSam Leffler * could leave the +32, +64, and +32+64 slots free. 5905901d2d3SSam Leffler */ 5915901d2d3SSam Leffler for (i = 0; i < IEEE80211_WEP_NKID; i++) { 5925901d2d3SSam Leffler setbit(sc->sc_keymap, i); 5935901d2d3SSam Leffler setbit(sc->sc_keymap, i+64); 5945901d2d3SSam Leffler if (sc->sc_splitmic) { 5955901d2d3SSam Leffler setbit(sc->sc_keymap, i+32); 5965901d2d3SSam Leffler setbit(sc->sc_keymap, i+32+64); 5975901d2d3SSam Leffler } 5985901d2d3SSam Leffler } 5995901d2d3SSam Leffler /* 600c42a7b7eSSam Leffler * TPC support can be done either with a global cap or 601c42a7b7eSSam Leffler * per-packet support. The latter is not available on 602c42a7b7eSSam Leffler * all parts. We're a bit pedantic here as all parts 603c42a7b7eSSam Leffler * support a global cap. 604c42a7b7eSSam Leffler */ 605c59005e9SSam Leffler if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 606c42a7b7eSSam Leffler ic->ic_caps |= IEEE80211_C_TXPMGT; 607c42a7b7eSSam Leffler 608c42a7b7eSSam Leffler /* 609c42a7b7eSSam Leffler * Mark WME capability only if we have sufficient 610c42a7b7eSSam Leffler * hardware queues to do proper priority scheduling. 611c42a7b7eSSam Leffler */ 612c42a7b7eSSam Leffler if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 613c42a7b7eSSam Leffler ic->ic_caps |= IEEE80211_C_WME; 614c42a7b7eSSam Leffler /* 615e8fd88a3SSam Leffler * Check for misc other capabilities. 616c42a7b7eSSam Leffler */ 617c42a7b7eSSam Leffler if (ath_hal_hasbursting(ah)) 618c42a7b7eSSam Leffler ic->ic_caps |= IEEE80211_C_BURST; 619b032f27cSSam Leffler sc->sc_hasbmask = ath_hal_hasbssidmask(ah); 62059aa14a9SRui Paulo sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); 621b032f27cSSam Leffler sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); 6228a2a6beeSAdrian Chadd sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); 623fc4de9b7SAdrian Chadd sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah); 62468e8e04eSSam Leffler if (ath_hal_hasfastframes(ah)) 62568e8e04eSSam Leffler ic->ic_caps |= IEEE80211_C_FF; 62659efa8b5SSam Leffler wmodes = ath_hal_getwirelessmodes(ah); 627411373ebSSam Leffler if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) 62868e8e04eSSam Leffler ic->ic_caps |= IEEE80211_C_TURBOP; 629584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 63010ad9a77SSam Leffler if (ath_hal_macversion(ah) > 0x78) { 63110ad9a77SSam Leffler ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ 63210ad9a77SSam Leffler ic->ic_tdma_update = ath_tdma_update; 63310ad9a77SSam Leffler } 63410ad9a77SSam Leffler #endif 63567397d39SAdrian Chadd 63667397d39SAdrian Chadd /* 63767397d39SAdrian Chadd * The if_ath 11n support is completely not ready for normal use. 63867397d39SAdrian Chadd * Enabling this option will likely break everything and everything. 63967397d39SAdrian Chadd * Don't think of doing that unless you know what you're doing. 64067397d39SAdrian Chadd */ 64167397d39SAdrian Chadd 6428fd67f92SAdrian Chadd #ifdef ATH_ENABLE_11N 64367397d39SAdrian Chadd /* 64467397d39SAdrian Chadd * Query HT capabilities 64567397d39SAdrian Chadd */ 64667397d39SAdrian Chadd if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && 64767397d39SAdrian Chadd (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { 64867397d39SAdrian Chadd int rxs, txs; 64967397d39SAdrian Chadd 65067397d39SAdrian Chadd device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); 65167397d39SAdrian Chadd ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ 65267397d39SAdrian Chadd | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ 65367397d39SAdrian Chadd | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ 6547e97436bSAdrian Chadd | IEEE80211_HTCAP_MAXAMSDU_3839 6557e97436bSAdrian Chadd /* max A-MSDU length */ 65667397d39SAdrian Chadd | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ 65767397d39SAdrian Chadd ; 65867397d39SAdrian Chadd 65976355edbSAdrian Chadd /* 66076355edbSAdrian Chadd * Enable short-GI for HT20 only if the hardware 66176355edbSAdrian Chadd * advertises support. 66276355edbSAdrian Chadd * Notably, anything earlier than the AR9287 doesn't. 66376355edbSAdrian Chadd */ 66476355edbSAdrian Chadd if ((ath_hal_getcapability(ah, 66576355edbSAdrian Chadd HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && 66676355edbSAdrian Chadd (wmodes & HAL_MODE_HT20)) { 66776355edbSAdrian Chadd device_printf(sc->sc_dev, 66876355edbSAdrian Chadd "[HT] enabling short-GI in 20MHz mode\n"); 66976355edbSAdrian Chadd ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; 67076355edbSAdrian Chadd } 67176355edbSAdrian Chadd 67267397d39SAdrian Chadd if (wmodes & HAL_MODE_HT40) 67367397d39SAdrian Chadd ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 67467397d39SAdrian Chadd | IEEE80211_HTCAP_SHORTGI40; 67567397d39SAdrian Chadd 67667397d39SAdrian Chadd /* 6777e97436bSAdrian Chadd * TX/RX streams need to be taken into account when 6787e97436bSAdrian Chadd * negotiating which MCS rates it'll receive and 67967397d39SAdrian Chadd * what MCS rates are available for TX. 68067397d39SAdrian Chadd */ 68167397d39SAdrian Chadd (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &rxs); 68267397d39SAdrian Chadd (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &txs); 68367397d39SAdrian Chadd 68467397d39SAdrian Chadd ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); 68567397d39SAdrian Chadd ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); 68667397d39SAdrian Chadd 68767397d39SAdrian Chadd ic->ic_txstream = txs; 68867397d39SAdrian Chadd ic->ic_rxstream = rxs; 68967397d39SAdrian Chadd 6907e97436bSAdrian Chadd device_printf(sc->sc_dev, 6917e97436bSAdrian Chadd "[HT] %d RX streams; %d TX streams\n", rxs, txs); 69267397d39SAdrian Chadd } 69367397d39SAdrian Chadd #endif 69467397d39SAdrian Chadd 695c42a7b7eSSam Leffler /* 696ddbe3036SAdrian Chadd * Check if the hardware requires PCI register serialisation. 697ddbe3036SAdrian Chadd * Some of the Owl based MACs require this. 698ddbe3036SAdrian Chadd */ 699ddbe3036SAdrian Chadd if (mp_ncpus > 1 && 700ddbe3036SAdrian Chadd ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR, 701ddbe3036SAdrian Chadd 0, NULL) == HAL_OK) { 702ddbe3036SAdrian Chadd sc->sc_ah->ah_config.ah_serialise_reg_war = 1; 7037e97436bSAdrian Chadd device_printf(sc->sc_dev, 7047e97436bSAdrian Chadd "Enabling register serialisation\n"); 705ddbe3036SAdrian Chadd } 706ddbe3036SAdrian Chadd 707ddbe3036SAdrian Chadd /* 708c42a7b7eSSam Leffler * Indicate we need the 802.11 header padded to a 709c42a7b7eSSam Leffler * 32-bit boundary for 4-address and QoS frames. 710c42a7b7eSSam Leffler */ 711c42a7b7eSSam Leffler ic->ic_flags |= IEEE80211_F_DATAPAD; 712c42a7b7eSSam Leffler 713c42a7b7eSSam Leffler /* 714c42a7b7eSSam Leffler * Query the hal about antenna support. 715c42a7b7eSSam Leffler */ 716c42a7b7eSSam Leffler sc->sc_defant = ath_hal_getdefantenna(ah); 717c42a7b7eSSam Leffler 718c42a7b7eSSam Leffler /* 719c42a7b7eSSam Leffler * Not all chips have the VEOL support we want to 720c42a7b7eSSam Leffler * use with IBSS beacons; check here for it. 721c42a7b7eSSam Leffler */ 722c42a7b7eSSam Leffler sc->sc_hasveol = ath_hal_hasveol(ah); 7235591b213SSam Leffler 7245591b213SSam Leffler /* get mac address from hardware */ 72529aca940SSam Leffler ath_hal_getmac(ah, macaddr); 726b032f27cSSam Leffler if (sc->sc_hasbmask) 727b032f27cSSam Leffler ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); 7285591b213SSam Leffler 729b032f27cSSam Leffler /* NB: used to size node table key mapping array */ 730b032f27cSSam Leffler ic->ic_max_keyix = sc->sc_keymax; 7315591b213SSam Leffler /* call MI attach routine. */ 73229aca940SSam Leffler ieee80211_ifattach(ic, macaddr); 733b032f27cSSam Leffler ic->ic_setregdomain = ath_setregdomain; 734b032f27cSSam Leffler ic->ic_getradiocaps = ath_getradiocaps; 735b032f27cSSam Leffler sc->sc_opmode = HAL_M_STA; 736b032f27cSSam Leffler 7375591b213SSam Leffler /* override default methods */ 738b032f27cSSam Leffler ic->ic_newassoc = ath_newassoc; 739b032f27cSSam Leffler ic->ic_updateslot = ath_updateslot; 740b032f27cSSam Leffler ic->ic_wme.wme_update = ath_wme_update; 741b032f27cSSam Leffler ic->ic_vap_create = ath_vap_create; 742b032f27cSSam Leffler ic->ic_vap_delete = ath_vap_delete; 743b032f27cSSam Leffler ic->ic_raw_xmit = ath_raw_xmit; 744b032f27cSSam Leffler ic->ic_update_mcast = ath_update_mcast; 745b032f27cSSam Leffler ic->ic_update_promisc = ath_update_promisc; 7465591b213SSam Leffler ic->ic_node_alloc = ath_node_alloc; 7471e774079SSam Leffler sc->sc_node_free = ic->ic_node_free; 7485591b213SSam Leffler ic->ic_node_free = ath_node_free; 7494afa805eSAdrian Chadd sc->sc_node_cleanup = ic->ic_node_cleanup; 7504afa805eSAdrian Chadd ic->ic_node_cleanup = ath_node_cleanup; 75168e8e04eSSam Leffler ic->ic_node_getsignal = ath_node_getsignal; 75268e8e04eSSam Leffler ic->ic_scan_start = ath_scan_start; 75368e8e04eSSam Leffler ic->ic_scan_end = ath_scan_end; 75468e8e04eSSam Leffler ic->ic_set_channel = ath_set_channel; 7555591b213SSam Leffler 756eb6f0de0SAdrian Chadd /* 802.11n specific - but just override anyway */ 757eb6f0de0SAdrian Chadd sc->sc_addba_request = ic->ic_addba_request; 758eb6f0de0SAdrian Chadd sc->sc_addba_response = ic->ic_addba_response; 759eb6f0de0SAdrian Chadd sc->sc_addba_stop = ic->ic_addba_stop; 760eb6f0de0SAdrian Chadd sc->sc_bar_response = ic->ic_bar_response; 761eb6f0de0SAdrian Chadd sc->sc_addba_response_timeout = ic->ic_addba_response_timeout; 762eb6f0de0SAdrian Chadd 763eb6f0de0SAdrian Chadd ic->ic_addba_request = ath_addba_request; 764eb6f0de0SAdrian Chadd ic->ic_addba_response = ath_addba_response; 765eb6f0de0SAdrian Chadd ic->ic_addba_response_timeout = ath_addba_response_timeout; 766eb6f0de0SAdrian Chadd ic->ic_addba_stop = ath_addba_stop; 767eb6f0de0SAdrian Chadd ic->ic_bar_response = ath_bar_response; 768eb6f0de0SAdrian Chadd 7695463c4a4SSam Leffler ieee80211_radiotap_attach(ic, 7705463c4a4SSam Leffler &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 7715463c4a4SSam Leffler ATH_TX_RADIOTAP_PRESENT, 7725463c4a4SSam Leffler &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 7735463c4a4SSam Leffler ATH_RX_RADIOTAP_PRESENT); 7745463c4a4SSam Leffler 7754866e6c2SSam Leffler /* 7764866e6c2SSam Leffler * Setup dynamic sysctl's now that country code and 7774866e6c2SSam Leffler * regdomain are available from the hal. 7784866e6c2SSam Leffler */ 7794866e6c2SSam Leffler ath_sysctlattach(sc); 780e8dabfbeSAdrian Chadd ath_sysctl_stats_attach(sc); 78137931a35SAdrian Chadd ath_sysctl_hal_attach(sc); 78273454c73SSam Leffler 783c42a7b7eSSam Leffler if (bootverbose) 784c42a7b7eSSam Leffler ieee80211_announce(ic); 785c42a7b7eSSam Leffler ath_announce(sc); 7865591b213SSam Leffler return 0; 787b28b4653SSam Leffler bad2: 788c42a7b7eSSam Leffler ath_tx_cleanup(sc); 789b28b4653SSam Leffler ath_desc_free(sc); 7905591b213SSam Leffler bad: 7915591b213SSam Leffler if (ah) 7925591b213SSam Leffler ath_hal_detach(ah); 793fc74a9f9SBrooks Davis if (ifp != NULL) 794fc74a9f9SBrooks Davis if_free(ifp); 7955591b213SSam Leffler sc->sc_invalid = 1; 7965591b213SSam Leffler return error; 7975591b213SSam Leffler } 7985591b213SSam Leffler 7995591b213SSam Leffler int 8005591b213SSam Leffler ath_detach(struct ath_softc *sc) 8015591b213SSam Leffler { 802fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 8035591b213SSam Leffler 804c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 805c42a7b7eSSam Leffler __func__, ifp->if_flags); 8065591b213SSam Leffler 807c42a7b7eSSam Leffler /* 808c42a7b7eSSam Leffler * NB: the order of these is important: 80971b85077SSam Leffler * o stop the chip so no more interrupts will fire 810c42a7b7eSSam Leffler * o call the 802.11 layer before detaching the hal to 811c42a7b7eSSam Leffler * insure callbacks into the driver to delete global 812c42a7b7eSSam Leffler * key cache entries can be handled 81371b85077SSam Leffler * o free the taskqueue which drains any pending tasks 814c42a7b7eSSam Leffler * o reclaim the tx queue data structures after calling 815c42a7b7eSSam Leffler * the 802.11 layer as we'll get called back to reclaim 816c42a7b7eSSam Leffler * node state and potentially want to use them 817c42a7b7eSSam Leffler * o to cleanup the tx queues the hal is called, so detach 818c42a7b7eSSam Leffler * it last 819c42a7b7eSSam Leffler * Other than that, it's straightforward... 820c42a7b7eSSam Leffler */ 82171b85077SSam Leffler ath_stop(ifp); 822b032f27cSSam Leffler ieee80211_ifdetach(ifp->if_l2com); 82371b85077SSam Leffler taskqueue_free(sc->sc_tq); 82486e07743SSam Leffler #ifdef ATH_TX99_DIAG 82586e07743SSam Leffler if (sc->sc_tx99 != NULL) 82686e07743SSam Leffler sc->sc_tx99->detach(sc->sc_tx99); 82786e07743SSam Leffler #endif 828c42a7b7eSSam Leffler ath_rate_detach(sc->sc_rc); 82948237774SAdrian Chadd 83048237774SAdrian Chadd ath_dfs_detach(sc); 8315591b213SSam Leffler ath_desc_free(sc); 832c42a7b7eSSam Leffler ath_tx_cleanup(sc); 83371b85077SSam Leffler ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ 834c4c6f08fSRuslan Ermilov if_free(ifp); 835f0b2a0beSSam Leffler 8365591b213SSam Leffler return 0; 8375591b213SSam Leffler } 8385591b213SSam Leffler 839b032f27cSSam Leffler /* 840b032f27cSSam Leffler * MAC address handling for multiple BSS on the same radio. 841b032f27cSSam Leffler * The first vap uses the MAC address from the EEPROM. For 842b032f27cSSam Leffler * subsequent vap's we set the U/L bit (bit 1) in the MAC 843b032f27cSSam Leffler * address and use the next six bits as an index. 844b032f27cSSam Leffler */ 845b032f27cSSam Leffler static void 846b032f27cSSam Leffler assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) 847b032f27cSSam Leffler { 848b032f27cSSam Leffler int i; 849b032f27cSSam Leffler 850b032f27cSSam Leffler if (clone && sc->sc_hasbmask) { 851b032f27cSSam Leffler /* NB: we only do this if h/w supports multiple bssid */ 852b032f27cSSam Leffler for (i = 0; i < 8; i++) 853b032f27cSSam Leffler if ((sc->sc_bssidmask & (1<<i)) == 0) 854b032f27cSSam Leffler break; 855b032f27cSSam Leffler if (i != 0) 856b032f27cSSam Leffler mac[0] |= (i << 2)|0x2; 857b032f27cSSam Leffler } else 858b032f27cSSam Leffler i = 0; 859b032f27cSSam Leffler sc->sc_bssidmask |= 1<<i; 860b032f27cSSam Leffler sc->sc_hwbssidmask[0] &= ~mac[0]; 861b032f27cSSam Leffler if (i == 0) 862b032f27cSSam Leffler sc->sc_nbssid0++; 863b032f27cSSam Leffler } 864b032f27cSSam Leffler 865b032f27cSSam Leffler static void 866b032f27cSSam Leffler reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) 867b032f27cSSam Leffler { 868b032f27cSSam Leffler int i = mac[0] >> 2; 869b032f27cSSam Leffler uint8_t mask; 870b032f27cSSam Leffler 871b032f27cSSam Leffler if (i != 0 || --sc->sc_nbssid0 == 0) { 872b032f27cSSam Leffler sc->sc_bssidmask &= ~(1<<i); 873b032f27cSSam Leffler /* recalculate bssid mask from remaining addresses */ 874b032f27cSSam Leffler mask = 0xff; 875b032f27cSSam Leffler for (i = 1; i < 8; i++) 876b032f27cSSam Leffler if (sc->sc_bssidmask & (1<<i)) 877b032f27cSSam Leffler mask &= ~((i<<2)|0x2); 878b032f27cSSam Leffler sc->sc_hwbssidmask[0] |= mask; 879b032f27cSSam Leffler } 880b032f27cSSam Leffler } 881b032f27cSSam Leffler 882b032f27cSSam Leffler /* 883b032f27cSSam Leffler * Assign a beacon xmit slot. We try to space out 884b032f27cSSam Leffler * assignments so when beacons are staggered the 885b032f27cSSam Leffler * traffic coming out of the cab q has maximal time 886b032f27cSSam Leffler * to go out before the next beacon is scheduled. 887b032f27cSSam Leffler */ 888b032f27cSSam Leffler static int 889b032f27cSSam Leffler assign_bslot(struct ath_softc *sc) 890b032f27cSSam Leffler { 891b032f27cSSam Leffler u_int slot, free; 892b032f27cSSam Leffler 893b032f27cSSam Leffler free = 0; 894b032f27cSSam Leffler for (slot = 0; slot < ATH_BCBUF; slot++) 895b032f27cSSam Leffler if (sc->sc_bslot[slot] == NULL) { 896b032f27cSSam Leffler if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && 897b032f27cSSam Leffler sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) 898b032f27cSSam Leffler return slot; 899b032f27cSSam Leffler free = slot; 900b032f27cSSam Leffler /* NB: keep looking for a double slot */ 901b032f27cSSam Leffler } 902b032f27cSSam Leffler return free; 903b032f27cSSam Leffler } 904b032f27cSSam Leffler 905b032f27cSSam Leffler static struct ieee80211vap * 906fcd9500fSBernhard Schmidt ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 907fcd9500fSBernhard Schmidt enum ieee80211_opmode opmode, int flags, 908b032f27cSSam Leffler const uint8_t bssid[IEEE80211_ADDR_LEN], 909b032f27cSSam Leffler const uint8_t mac0[IEEE80211_ADDR_LEN]) 910b032f27cSSam Leffler { 911b032f27cSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 912b032f27cSSam Leffler struct ath_vap *avp; 913b032f27cSSam Leffler struct ieee80211vap *vap; 914b032f27cSSam Leffler uint8_t mac[IEEE80211_ADDR_LEN]; 915fcd9500fSBernhard Schmidt int needbeacon, error; 916fcd9500fSBernhard Schmidt enum ieee80211_opmode ic_opmode; 917b032f27cSSam Leffler 918b032f27cSSam Leffler avp = (struct ath_vap *) malloc(sizeof(struct ath_vap), 919b032f27cSSam Leffler M_80211_VAP, M_WAITOK | M_ZERO); 920b032f27cSSam Leffler needbeacon = 0; 921b032f27cSSam Leffler IEEE80211_ADDR_COPY(mac, mac0); 922b032f27cSSam Leffler 923b032f27cSSam Leffler ATH_LOCK(sc); 924a8962181SSam Leffler ic_opmode = opmode; /* default to opmode of new vap */ 925b032f27cSSam Leffler switch (opmode) { 926b032f27cSSam Leffler case IEEE80211_M_STA: 927a8962181SSam Leffler if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ 928b032f27cSSam Leffler device_printf(sc->sc_dev, "only 1 sta vap supported\n"); 929b032f27cSSam Leffler goto bad; 930b032f27cSSam Leffler } 931b032f27cSSam Leffler if (sc->sc_nvaps) { 932b032f27cSSam Leffler /* 933a8962181SSam Leffler * With multiple vaps we must fall back 934a8962181SSam Leffler * to s/w beacon miss handling. 935b032f27cSSam Leffler */ 936b032f27cSSam Leffler flags |= IEEE80211_CLONE_NOBEACONS; 937b032f27cSSam Leffler } 938a8962181SSam Leffler if (flags & IEEE80211_CLONE_NOBEACONS) { 939a8962181SSam Leffler /* 940a8962181SSam Leffler * Station mode w/o beacons are implemented w/ AP mode. 941a8962181SSam Leffler */ 942b032f27cSSam Leffler ic_opmode = IEEE80211_M_HOSTAP; 943a8962181SSam Leffler } 944b032f27cSSam Leffler break; 945b032f27cSSam Leffler case IEEE80211_M_IBSS: 946b032f27cSSam Leffler if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ 947b032f27cSSam Leffler device_printf(sc->sc_dev, 948b032f27cSSam Leffler "only 1 ibss vap supported\n"); 949b032f27cSSam Leffler goto bad; 950b032f27cSSam Leffler } 951b032f27cSSam Leffler needbeacon = 1; 952b032f27cSSam Leffler break; 953b032f27cSSam Leffler case IEEE80211_M_AHDEMO: 954584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 95510ad9a77SSam Leffler if (flags & IEEE80211_CLONE_TDMA) { 956a8962181SSam Leffler if (sc->sc_nvaps != 0) { 957a8962181SSam Leffler device_printf(sc->sc_dev, 958a8962181SSam Leffler "only 1 tdma vap supported\n"); 959a8962181SSam Leffler goto bad; 960a8962181SSam Leffler } 96110ad9a77SSam Leffler needbeacon = 1; 96210ad9a77SSam Leffler flags |= IEEE80211_CLONE_NOBEACONS; 96310ad9a77SSam Leffler } 964b032f27cSSam Leffler /* fall thru... */ 96510ad9a77SSam Leffler #endif 966b032f27cSSam Leffler case IEEE80211_M_MONITOR: 967b032f27cSSam Leffler if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { 968a8962181SSam Leffler /* 969a8962181SSam Leffler * Adopt existing mode. Adding a monitor or ahdemo 970a8962181SSam Leffler * vap to an existing configuration is of dubious 971a8962181SSam Leffler * value but should be ok. 972a8962181SSam Leffler */ 973b032f27cSSam Leffler /* XXX not right for monitor mode */ 974b032f27cSSam Leffler ic_opmode = ic->ic_opmode; 975a8962181SSam Leffler } 976b032f27cSSam Leffler break; 977b032f27cSSam Leffler case IEEE80211_M_HOSTAP: 97859aa14a9SRui Paulo case IEEE80211_M_MBSS: 979b032f27cSSam Leffler needbeacon = 1; 980a8962181SSam Leffler break; 981b032f27cSSam Leffler case IEEE80211_M_WDS: 982a8962181SSam Leffler if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { 983b032f27cSSam Leffler device_printf(sc->sc_dev, 984b032f27cSSam Leffler "wds not supported in sta mode\n"); 985b032f27cSSam Leffler goto bad; 986b032f27cSSam Leffler } 987b032f27cSSam Leffler /* 988b032f27cSSam Leffler * Silently remove any request for a unique 989b032f27cSSam Leffler * bssid; WDS vap's always share the local 990b032f27cSSam Leffler * mac address. 991b032f27cSSam Leffler */ 992b032f27cSSam Leffler flags &= ~IEEE80211_CLONE_BSSID; 993a8962181SSam Leffler if (sc->sc_nvaps == 0) 994b032f27cSSam Leffler ic_opmode = IEEE80211_M_HOSTAP; 995a8962181SSam Leffler else 996a8962181SSam Leffler ic_opmode = ic->ic_opmode; 9977d261891SRui Paulo break; 998b032f27cSSam Leffler default: 999b032f27cSSam Leffler device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 1000b032f27cSSam Leffler goto bad; 1001b032f27cSSam Leffler } 1002b032f27cSSam Leffler /* 1003b032f27cSSam Leffler * Check that a beacon buffer is available; the code below assumes it. 1004b032f27cSSam Leffler */ 10056b349e5aSAdrian Chadd if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { 1006b032f27cSSam Leffler device_printf(sc->sc_dev, "no beacon buffer available\n"); 1007b032f27cSSam Leffler goto bad; 1008b032f27cSSam Leffler } 1009b032f27cSSam Leffler 1010b032f27cSSam Leffler /* STA, AHDEMO? */ 101159aa14a9SRui Paulo if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { 1012b032f27cSSam Leffler assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 1013b032f27cSSam Leffler ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1014b032f27cSSam Leffler } 1015b032f27cSSam Leffler 1016b032f27cSSam Leffler vap = &avp->av_vap; 1017b032f27cSSam Leffler /* XXX can't hold mutex across if_alloc */ 1018b032f27cSSam Leffler ATH_UNLOCK(sc); 1019b032f27cSSam Leffler error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, 1020b032f27cSSam Leffler bssid, mac); 1021b032f27cSSam Leffler ATH_LOCK(sc); 1022b032f27cSSam Leffler if (error != 0) { 1023b032f27cSSam Leffler device_printf(sc->sc_dev, "%s: error %d creating vap\n", 1024b032f27cSSam Leffler __func__, error); 1025b032f27cSSam Leffler goto bad2; 1026b032f27cSSam Leffler } 1027b032f27cSSam Leffler 1028b032f27cSSam Leffler /* h/w crypto support */ 1029b032f27cSSam Leffler vap->iv_key_alloc = ath_key_alloc; 1030b032f27cSSam Leffler vap->iv_key_delete = ath_key_delete; 1031b032f27cSSam Leffler vap->iv_key_set = ath_key_set; 1032b032f27cSSam Leffler vap->iv_key_update_begin = ath_key_update_begin; 1033b032f27cSSam Leffler vap->iv_key_update_end = ath_key_update_end; 1034b032f27cSSam Leffler 1035b032f27cSSam Leffler /* override various methods */ 1036b032f27cSSam Leffler avp->av_recv_mgmt = vap->iv_recv_mgmt; 1037b032f27cSSam Leffler vap->iv_recv_mgmt = ath_recv_mgmt; 1038b032f27cSSam Leffler vap->iv_reset = ath_reset_vap; 1039b032f27cSSam Leffler vap->iv_update_beacon = ath_beacon_update; 1040b032f27cSSam Leffler avp->av_newstate = vap->iv_newstate; 1041b032f27cSSam Leffler vap->iv_newstate = ath_newstate; 1042b032f27cSSam Leffler avp->av_bmiss = vap->iv_bmiss; 1043b032f27cSSam Leffler vap->iv_bmiss = ath_bmiss_vap; 1044b032f27cSSam Leffler 10459be25f4aSAdrian Chadd /* Set default parameters */ 10469be25f4aSAdrian Chadd 10479be25f4aSAdrian Chadd /* 10489be25f4aSAdrian Chadd * Anything earlier than some AR9300 series MACs don't 10499be25f4aSAdrian Chadd * support a smaller MPDU density. 10509be25f4aSAdrian Chadd */ 10519be25f4aSAdrian Chadd vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; 10529be25f4aSAdrian Chadd /* 10539be25f4aSAdrian Chadd * All NICs can handle the maximum size, however 10549be25f4aSAdrian Chadd * AR5416 based MACs can only TX aggregates w/ RTS 10559be25f4aSAdrian Chadd * protection when the total aggregate size is <= 8k. 10569be25f4aSAdrian Chadd * However, for now that's enforced by the TX path. 10579be25f4aSAdrian Chadd */ 10589be25f4aSAdrian Chadd vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 10599be25f4aSAdrian Chadd 1060b032f27cSSam Leffler avp->av_bslot = -1; 1061b032f27cSSam Leffler if (needbeacon) { 1062b032f27cSSam Leffler /* 1063b032f27cSSam Leffler * Allocate beacon state and setup the q for buffered 1064b032f27cSSam Leffler * multicast frames. We know a beacon buffer is 1065b032f27cSSam Leffler * available because we checked above. 1066b032f27cSSam Leffler */ 10676b349e5aSAdrian Chadd avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); 10686b349e5aSAdrian Chadd TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); 1069b032f27cSSam Leffler if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { 1070b032f27cSSam Leffler /* 1071b032f27cSSam Leffler * Assign the vap to a beacon xmit slot. As above 1072b032f27cSSam Leffler * this cannot fail to find a free one. 1073b032f27cSSam Leffler */ 1074b032f27cSSam Leffler avp->av_bslot = assign_bslot(sc); 1075b032f27cSSam Leffler KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, 1076b032f27cSSam Leffler ("beacon slot %u not empty", avp->av_bslot)); 1077b032f27cSSam Leffler sc->sc_bslot[avp->av_bslot] = vap; 1078b032f27cSSam Leffler sc->sc_nbcnvaps++; 1079b032f27cSSam Leffler } 1080b032f27cSSam Leffler if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { 1081b032f27cSSam Leffler /* 1082b032f27cSSam Leffler * Multple vaps are to transmit beacons and we 1083b032f27cSSam Leffler * have h/w support for TSF adjusting; enable 1084b032f27cSSam Leffler * use of staggered beacons. 1085b032f27cSSam Leffler */ 1086b032f27cSSam Leffler sc->sc_stagbeacons = 1; 1087b032f27cSSam Leffler } 1088b032f27cSSam Leffler ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); 1089b032f27cSSam Leffler } 1090b032f27cSSam Leffler 1091b032f27cSSam Leffler ic->ic_opmode = ic_opmode; 1092b032f27cSSam Leffler if (opmode != IEEE80211_M_WDS) { 1093b032f27cSSam Leffler sc->sc_nvaps++; 1094b032f27cSSam Leffler if (opmode == IEEE80211_M_STA) 1095b032f27cSSam Leffler sc->sc_nstavaps++; 1096fe0dd789SSam Leffler if (opmode == IEEE80211_M_MBSS) 1097fe0dd789SSam Leffler sc->sc_nmeshvaps++; 1098b032f27cSSam Leffler } 1099b032f27cSSam Leffler switch (ic_opmode) { 1100b032f27cSSam Leffler case IEEE80211_M_IBSS: 1101b032f27cSSam Leffler sc->sc_opmode = HAL_M_IBSS; 1102b032f27cSSam Leffler break; 1103b032f27cSSam Leffler case IEEE80211_M_STA: 1104b032f27cSSam Leffler sc->sc_opmode = HAL_M_STA; 1105b032f27cSSam Leffler break; 1106b032f27cSSam Leffler case IEEE80211_M_AHDEMO: 1107584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 110810ad9a77SSam Leffler if (vap->iv_caps & IEEE80211_C_TDMA) { 110910ad9a77SSam Leffler sc->sc_tdma = 1; 111010ad9a77SSam Leffler /* NB: disable tsf adjust */ 111110ad9a77SSam Leffler sc->sc_stagbeacons = 0; 111210ad9a77SSam Leffler } 111310ad9a77SSam Leffler /* 111410ad9a77SSam Leffler * NB: adhoc demo mode is a pseudo mode; to the hal it's 111510ad9a77SSam Leffler * just ap mode. 111610ad9a77SSam Leffler */ 111710ad9a77SSam Leffler /* fall thru... */ 111810ad9a77SSam Leffler #endif 1119b032f27cSSam Leffler case IEEE80211_M_HOSTAP: 112059aa14a9SRui Paulo case IEEE80211_M_MBSS: 1121b032f27cSSam Leffler sc->sc_opmode = HAL_M_HOSTAP; 1122b032f27cSSam Leffler break; 1123b032f27cSSam Leffler case IEEE80211_M_MONITOR: 1124b032f27cSSam Leffler sc->sc_opmode = HAL_M_MONITOR; 1125b032f27cSSam Leffler break; 1126b032f27cSSam Leffler default: 1127b032f27cSSam Leffler /* XXX should not happen */ 1128b032f27cSSam Leffler break; 1129b032f27cSSam Leffler } 1130b032f27cSSam Leffler if (sc->sc_hastsfadd) { 1131b032f27cSSam Leffler /* 1132b032f27cSSam Leffler * Configure whether or not TSF adjust should be done. 1133b032f27cSSam Leffler */ 1134b032f27cSSam Leffler ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); 1135b032f27cSSam Leffler } 113610ad9a77SSam Leffler if (flags & IEEE80211_CLONE_NOBEACONS) { 113710ad9a77SSam Leffler /* 113810ad9a77SSam Leffler * Enable s/w beacon miss handling. 113910ad9a77SSam Leffler */ 114010ad9a77SSam Leffler sc->sc_swbmiss = 1; 114110ad9a77SSam Leffler } 1142b032f27cSSam Leffler ATH_UNLOCK(sc); 1143b032f27cSSam Leffler 1144b032f27cSSam Leffler /* complete setup */ 1145b032f27cSSam Leffler ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status); 1146b032f27cSSam Leffler return vap; 1147b032f27cSSam Leffler bad2: 1148b032f27cSSam Leffler reclaim_address(sc, mac); 1149b032f27cSSam Leffler ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1150b032f27cSSam Leffler bad: 1151b032f27cSSam Leffler free(avp, M_80211_VAP); 1152b032f27cSSam Leffler ATH_UNLOCK(sc); 1153b032f27cSSam Leffler return NULL; 1154b032f27cSSam Leffler } 1155b032f27cSSam Leffler 1156b032f27cSSam Leffler static void 1157b032f27cSSam Leffler ath_vap_delete(struct ieee80211vap *vap) 1158b032f27cSSam Leffler { 1159b032f27cSSam Leffler struct ieee80211com *ic = vap->iv_ic; 1160b032f27cSSam Leffler struct ifnet *ifp = ic->ic_ifp; 1161b032f27cSSam Leffler struct ath_softc *sc = ifp->if_softc; 1162b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 1163b032f27cSSam Leffler struct ath_vap *avp = ATH_VAP(vap); 1164b032f27cSSam Leffler 1165f52d3452SAdrian Chadd DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 1166b032f27cSSam Leffler if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1167b032f27cSSam Leffler /* 1168b032f27cSSam Leffler * Quiesce the hardware while we remove the vap. In 1169b032f27cSSam Leffler * particular we need to reclaim all references to 1170b032f27cSSam Leffler * the vap state by any frames pending on the tx queues. 1171b032f27cSSam Leffler */ 1172b032f27cSSam Leffler ath_hal_intrset(ah, 0); /* disable interrupts */ 1173517526efSAdrian Chadd ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */ 1174517526efSAdrian Chadd /* XXX Do all frames from all vaps/nodes need draining here? */ 11759a842e8bSAdrian Chadd ath_stoprecv(sc, 1); /* stop recv side */ 1176b032f27cSSam Leffler } 1177b032f27cSSam Leffler 1178b032f27cSSam Leffler ieee80211_vap_detach(vap); 117916d4de92SAdrian Chadd 118016d4de92SAdrian Chadd /* 118116d4de92SAdrian Chadd * XXX Danger Will Robinson! Danger! 118216d4de92SAdrian Chadd * 118316d4de92SAdrian Chadd * Because ieee80211_vap_detach() can queue a frame (the station 118416d4de92SAdrian Chadd * diassociate message?) after we've drained the TXQ and 118516d4de92SAdrian Chadd * flushed the software TXQ, we will end up with a frame queued 118616d4de92SAdrian Chadd * to a node whose vap is about to be freed. 118716d4de92SAdrian Chadd * 118816d4de92SAdrian Chadd * To work around this, flush the hardware/software again. 118916d4de92SAdrian Chadd * This may be racy - the ath task may be running and the packet 119016d4de92SAdrian Chadd * may be being scheduled between sw->hw txq. Tsk. 119116d4de92SAdrian Chadd * 119216d4de92SAdrian Chadd * TODO: figure out why a new node gets allocated somewhere around 119316d4de92SAdrian Chadd * here (after the ath_tx_swq() call; and after an ath_stop_locked() 119416d4de92SAdrian Chadd * call!) 119516d4de92SAdrian Chadd */ 119616d4de92SAdrian Chadd 119716d4de92SAdrian Chadd ath_draintxq(sc, ATH_RESET_DEFAULT); 119816d4de92SAdrian Chadd 1199b032f27cSSam Leffler ATH_LOCK(sc); 1200b032f27cSSam Leffler /* 1201b032f27cSSam Leffler * Reclaim beacon state. Note this must be done before 1202b032f27cSSam Leffler * the vap instance is reclaimed as we may have a reference 1203b032f27cSSam Leffler * to it in the buffer for the beacon frame. 1204b032f27cSSam Leffler */ 1205b032f27cSSam Leffler if (avp->av_bcbuf != NULL) { 1206b032f27cSSam Leffler if (avp->av_bslot != -1) { 1207b032f27cSSam Leffler sc->sc_bslot[avp->av_bslot] = NULL; 1208b032f27cSSam Leffler sc->sc_nbcnvaps--; 1209b032f27cSSam Leffler } 1210b032f27cSSam Leffler ath_beacon_return(sc, avp->av_bcbuf); 1211b032f27cSSam Leffler avp->av_bcbuf = NULL; 1212b032f27cSSam Leffler if (sc->sc_nbcnvaps == 0) { 1213b032f27cSSam Leffler sc->sc_stagbeacons = 0; 1214b032f27cSSam Leffler if (sc->sc_hastsfadd) 1215b032f27cSSam Leffler ath_hal_settsfadjust(sc->sc_ah, 0); 1216b032f27cSSam Leffler } 1217b032f27cSSam Leffler /* 1218b032f27cSSam Leffler * Reclaim any pending mcast frames for the vap. 1219b032f27cSSam Leffler */ 1220b032f27cSSam Leffler ath_tx_draintxq(sc, &avp->av_mcastq); 1221b032f27cSSam Leffler ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq); 1222b032f27cSSam Leffler } 1223b032f27cSSam Leffler /* 1224b032f27cSSam Leffler * Update bookkeeping. 1225b032f27cSSam Leffler */ 1226b032f27cSSam Leffler if (vap->iv_opmode == IEEE80211_M_STA) { 1227b032f27cSSam Leffler sc->sc_nstavaps--; 1228b032f27cSSam Leffler if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) 1229b032f27cSSam Leffler sc->sc_swbmiss = 0; 123059aa14a9SRui Paulo } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || 123159aa14a9SRui Paulo vap->iv_opmode == IEEE80211_M_MBSS) { 1232b032f27cSSam Leffler reclaim_address(sc, vap->iv_myaddr); 1233b032f27cSSam Leffler ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); 1234fe0dd789SSam Leffler if (vap->iv_opmode == IEEE80211_M_MBSS) 1235fe0dd789SSam Leffler sc->sc_nmeshvaps--; 1236b032f27cSSam Leffler } 1237b032f27cSSam Leffler if (vap->iv_opmode != IEEE80211_M_WDS) 1238b032f27cSSam Leffler sc->sc_nvaps--; 1239584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 124010ad9a77SSam Leffler /* TDMA operation ceases when the last vap is destroyed */ 124110ad9a77SSam Leffler if (sc->sc_tdma && sc->sc_nvaps == 0) { 124210ad9a77SSam Leffler sc->sc_tdma = 0; 124310ad9a77SSam Leffler sc->sc_swbmiss = 0; 124410ad9a77SSam Leffler } 124510ad9a77SSam Leffler #endif 1246b032f27cSSam Leffler free(avp, M_80211_VAP); 1247b032f27cSSam Leffler 1248b032f27cSSam Leffler if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1249b032f27cSSam Leffler /* 1250b032f27cSSam Leffler * Restart rx+tx machines if still running (RUNNING will 1251b032f27cSSam Leffler * be reset if we just destroyed the last vap). 1252b032f27cSSam Leffler */ 1253b032f27cSSam Leffler if (ath_startrecv(sc) != 0) 1254b032f27cSSam Leffler if_printf(ifp, "%s: unable to restart recv logic\n", 1255b032f27cSSam Leffler __func__); 1256c89b957aSSam Leffler if (sc->sc_beacons) { /* restart beacons */ 1257c89b957aSSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 1258c89b957aSSam Leffler if (sc->sc_tdma) 1259c89b957aSSam Leffler ath_tdma_config(sc, NULL); 1260c89b957aSSam Leffler else 1261c89b957aSSam Leffler #endif 1262b032f27cSSam Leffler ath_beacon_config(sc, NULL); 1263c89b957aSSam Leffler } 1264b032f27cSSam Leffler ath_hal_intrset(ah, sc->sc_imask); 1265b032f27cSSam Leffler } 126616d4de92SAdrian Chadd ATH_UNLOCK(sc); 1267b032f27cSSam Leffler } 1268b032f27cSSam Leffler 12695591b213SSam Leffler void 12705591b213SSam Leffler ath_suspend(struct ath_softc *sc) 12715591b213SSam Leffler { 1272fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 1273d3ac945bSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 12745591b213SSam Leffler 1275c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1276c42a7b7eSSam Leffler __func__, ifp->if_flags); 12775591b213SSam Leffler 1278d3ac945bSSam Leffler sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0; 1279d3ac945bSSam Leffler if (ic->ic_opmode == IEEE80211_M_STA) 12805591b213SSam Leffler ath_stop(ifp); 1281d3ac945bSSam Leffler else 1282d3ac945bSSam Leffler ieee80211_suspend_all(ic); 1283d3ac945bSSam Leffler /* 1284d3ac945bSSam Leffler * NB: don't worry about putting the chip in low power 1285d3ac945bSSam Leffler * mode; pci will power off our socket on suspend and 1286f29b8b7fSWarner Losh * CardBus detaches the device. 1287d3ac945bSSam Leffler */ 1288d3ac945bSSam Leffler } 1289d3ac945bSSam Leffler 1290d3ac945bSSam Leffler /* 1291d3ac945bSSam Leffler * Reset the key cache since some parts do not reset the 1292d3ac945bSSam Leffler * contents on resume. First we clear all entries, then 1293d3ac945bSSam Leffler * re-load keys that the 802.11 layer assumes are setup 1294d3ac945bSSam Leffler * in h/w. 1295d3ac945bSSam Leffler */ 1296d3ac945bSSam Leffler static void 1297d3ac945bSSam Leffler ath_reset_keycache(struct ath_softc *sc) 1298d3ac945bSSam Leffler { 1299d3ac945bSSam Leffler struct ifnet *ifp = sc->sc_ifp; 1300d3ac945bSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 1301d3ac945bSSam Leffler struct ath_hal *ah = sc->sc_ah; 1302d3ac945bSSam Leffler int i; 1303d3ac945bSSam Leffler 1304d3ac945bSSam Leffler for (i = 0; i < sc->sc_keymax; i++) 1305d3ac945bSSam Leffler ath_hal_keyreset(ah, i); 1306d3ac945bSSam Leffler ieee80211_crypto_reload_keys(ic); 13075591b213SSam Leffler } 13085591b213SSam Leffler 13095591b213SSam Leffler void 13105591b213SSam Leffler ath_resume(struct ath_softc *sc) 13115591b213SSam Leffler { 1312fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 1313d3ac945bSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 1314d3ac945bSSam Leffler struct ath_hal *ah = sc->sc_ah; 1315d3ac945bSSam Leffler HAL_STATUS status; 13165591b213SSam Leffler 1317c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1318c42a7b7eSSam Leffler __func__, ifp->if_flags); 13195591b213SSam Leffler 1320d3ac945bSSam Leffler /* 1321d3ac945bSSam Leffler * Must reset the chip before we reload the 1322d3ac945bSSam Leffler * keycache as we were powered down on suspend. 1323d3ac945bSSam Leffler */ 1324054d7b69SSam Leffler ath_hal_reset(ah, sc->sc_opmode, 1325054d7b69SSam Leffler sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, 1326054d7b69SSam Leffler AH_FALSE, &status); 1327d3ac945bSSam Leffler ath_reset_keycache(sc); 13287e5eb44dSAdrian Chadd 13297e5eb44dSAdrian Chadd /* Let DFS at it in case it's a DFS channel */ 13307e5eb44dSAdrian Chadd ath_dfs_radar_enable(sc, ic->ic_curchan); 13317e5eb44dSAdrian Chadd 1332a497cd88SAdrian Chadd /* Restore the LED configuration */ 1333a497cd88SAdrian Chadd ath_led_config(sc); 1334a497cd88SAdrian Chadd ath_hal_setledstate(ah, HAL_LED_INIT); 1335a497cd88SAdrian Chadd 1336d3ac945bSSam Leffler if (sc->sc_resume_up) { 1337d3ac945bSSam Leffler if (ic->ic_opmode == IEEE80211_M_STA) { 1338fc74a9f9SBrooks Davis ath_init(sc); 1339a497cd88SAdrian Chadd ath_hal_setledstate(ah, HAL_LED_RUN); 1340394f34a5SSam Leffler /* 1341394f34a5SSam Leffler * Program the beacon registers using the last rx'd 1342394f34a5SSam Leffler * beacon frame and enable sync on the next beacon 1343394f34a5SSam Leffler * we see. This should handle the case where we 1344394f34a5SSam Leffler * wakeup and find the same AP and also the case where 1345394f34a5SSam Leffler * we wakeup and need to roam. For the latter we 1346394f34a5SSam Leffler * should get bmiss events that trigger a roam. 1347394f34a5SSam Leffler */ 1348394f34a5SSam Leffler ath_beacon_config(sc, NULL); 1349394f34a5SSam Leffler sc->sc_syncbeacon = 1; 1350d3ac945bSSam Leffler } else 1351d3ac945bSSam Leffler ieee80211_resume_all(ic); 13525591b213SSam Leffler } 13532fd9aabbSAdrian Chadd 13542fd9aabbSAdrian Chadd /* XXX beacons ? */ 13556b59f5e3SSam Leffler } 13565591b213SSam Leffler 13575591b213SSam Leffler void 13585591b213SSam Leffler ath_shutdown(struct ath_softc *sc) 13595591b213SSam Leffler { 1360fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 13615591b213SSam Leffler 1362c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1363c42a7b7eSSam Leffler __func__, ifp->if_flags); 13645591b213SSam Leffler 13655591b213SSam Leffler ath_stop(ifp); 1366d3ac945bSSam Leffler /* NB: no point powering down chip as we're about to reboot */ 13675591b213SSam Leffler } 13685591b213SSam Leffler 1369c42a7b7eSSam Leffler /* 1370c42a7b7eSSam Leffler * Interrupt handler. Most of the actual processing is deferred. 1371c42a7b7eSSam Leffler */ 13725591b213SSam Leffler void 13735591b213SSam Leffler ath_intr(void *arg) 13745591b213SSam Leffler { 13755591b213SSam Leffler struct ath_softc *sc = arg; 1376fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 13775591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 13786f5fe81eSAdrian Chadd HAL_INT status = 0; 13798f939e79SAdrian Chadd uint32_t txqs; 13805591b213SSam Leffler 1381ef27340cSAdrian Chadd /* 1382ef27340cSAdrian Chadd * If we're inside a reset path, just print a warning and 1383ef27340cSAdrian Chadd * clear the ISR. The reset routine will finish it for us. 1384ef27340cSAdrian Chadd */ 1385ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 1386ef27340cSAdrian Chadd if (sc->sc_inreset_cnt) { 1387ef27340cSAdrian Chadd HAL_INT status; 1388ef27340cSAdrian Chadd ath_hal_getisr(ah, &status); /* clear ISR */ 1389ef27340cSAdrian Chadd ath_hal_intrset(ah, 0); /* disable further intr's */ 1390ef27340cSAdrian Chadd DPRINTF(sc, ATH_DEBUG_ANY, 1391ef27340cSAdrian Chadd "%s: in reset, ignoring: status=0x%x\n", 1392ef27340cSAdrian Chadd __func__, status); 1393ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 1394ef27340cSAdrian Chadd return; 1395ef27340cSAdrian Chadd } 1396ef27340cSAdrian Chadd 13975591b213SSam Leffler if (sc->sc_invalid) { 13985591b213SSam Leffler /* 1399b58b3803SSam Leffler * The hardware is not ready/present, don't touch anything. 1400b58b3803SSam Leffler * Note this can happen early on if the IRQ is shared. 14015591b213SSam Leffler */ 1402c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 1403ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 14045591b213SSam Leffler return; 14055591b213SSam Leffler } 1406ef27340cSAdrian Chadd if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */ 1407ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 1408fdd758d4SSam Leffler return; 1409ef27340cSAdrian Chadd } 1410ef27340cSAdrian Chadd 141168e8e04eSSam Leffler if ((ifp->if_flags & IFF_UP) == 0 || 141268e8e04eSSam Leffler (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 141368e8e04eSSam Leffler HAL_INT status; 141468e8e04eSSam Leffler 1415c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1416c42a7b7eSSam Leffler __func__, ifp->if_flags); 14175591b213SSam Leffler ath_hal_getisr(ah, &status); /* clear ISR */ 14185591b213SSam Leffler ath_hal_intrset(ah, 0); /* disable further intr's */ 1419ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 14205591b213SSam Leffler return; 14215591b213SSam Leffler } 1422ef27340cSAdrian Chadd 1423c42a7b7eSSam Leffler /* 1424c42a7b7eSSam Leffler * Figure out the reason(s) for the interrupt. Note 1425c42a7b7eSSam Leffler * that the hal returns a pseudo-ISR that may include 1426c42a7b7eSSam Leffler * bits we haven't explicitly enabled so we mask the 1427c42a7b7eSSam Leffler * value to insure we only process bits we requested. 1428c42a7b7eSSam Leffler */ 14295591b213SSam Leffler ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 1430c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 1431f52d3452SAdrian Chadd CTR1(ATH_KTR_INTR, "ath_intr: mask=0x%.8x", status); 143231fdf3d6SAdrian Chadd #ifdef ATH_KTR_INTR_DEBUG 1433f52d3452SAdrian Chadd CTR5(ATH_KTR_INTR, 1434f52d3452SAdrian Chadd "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x", 1435f52d3452SAdrian Chadd ah->ah_intrstate[0], 1436f52d3452SAdrian Chadd ah->ah_intrstate[1], 1437f52d3452SAdrian Chadd ah->ah_intrstate[2], 1438f52d3452SAdrian Chadd ah->ah_intrstate[3], 1439f52d3452SAdrian Chadd ah->ah_intrstate[6]); 144031fdf3d6SAdrian Chadd #endif 1441ecddff40SSam Leffler status &= sc->sc_imask; /* discard unasked for bits */ 14426f5fe81eSAdrian Chadd 14436f5fe81eSAdrian Chadd /* Short-circuit un-handled interrupts */ 1444ef27340cSAdrian Chadd if (status == 0x0) { 1445ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 14466f5fe81eSAdrian Chadd return; 1447ef27340cSAdrian Chadd } 14486f5fe81eSAdrian Chadd 1449ef27340cSAdrian Chadd /* 1450ef27340cSAdrian Chadd * Take a note that we're inside the interrupt handler, so 1451ef27340cSAdrian Chadd * the reset routines know to wait. 1452ef27340cSAdrian Chadd */ 1453ef27340cSAdrian Chadd sc->sc_intr_cnt++; 1454ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 1455ef27340cSAdrian Chadd 1456ef27340cSAdrian Chadd /* 1457ef27340cSAdrian Chadd * Handle the interrupt. We won't run concurrent with the reset 1458ef27340cSAdrian Chadd * or channel change routines as they'll wait for sc_intr_cnt 1459ef27340cSAdrian Chadd * to be 0 before continuing. 1460ef27340cSAdrian Chadd */ 14615591b213SSam Leffler if (status & HAL_INT_FATAL) { 14625591b213SSam Leffler sc->sc_stats.ast_hardware++; 14635591b213SSam Leffler ath_hal_intrset(ah, 0); /* disable intr's until reset */ 146416c8acaaSSam Leffler ath_fatal_proc(sc, 0); 14655591b213SSam Leffler } else { 1466c42a7b7eSSam Leffler if (status & HAL_INT_SWBA) { 1467c42a7b7eSSam Leffler /* 1468c42a7b7eSSam Leffler * Software beacon alert--time to send a beacon. 1469c42a7b7eSSam Leffler * Handle beacon transmission directly; deferring 1470c42a7b7eSSam Leffler * this is too slow to meet timing constraints 1471c42a7b7eSSam Leffler * under load. 1472c42a7b7eSSam Leffler */ 1473584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 147410ad9a77SSam Leffler if (sc->sc_tdma) { 147510ad9a77SSam Leffler if (sc->sc_tdmaswba == 0) { 147610ad9a77SSam Leffler struct ieee80211com *ic = ifp->if_l2com; 147710ad9a77SSam Leffler struct ieee80211vap *vap = 147810ad9a77SSam Leffler TAILQ_FIRST(&ic->ic_vaps); 147910ad9a77SSam Leffler ath_tdma_beacon_send(sc, vap); 148010ad9a77SSam Leffler sc->sc_tdmaswba = 148110ad9a77SSam Leffler vap->iv_tdma->tdma_bintval; 148210ad9a77SSam Leffler } else 148310ad9a77SSam Leffler sc->sc_tdmaswba--; 148410ad9a77SSam Leffler } else 148510ad9a77SSam Leffler #endif 1486339ccfb3SSam Leffler { 1487c42a7b7eSSam Leffler ath_beacon_proc(sc, 0); 1488339ccfb3SSam Leffler #ifdef IEEE80211_SUPPORT_SUPERG 1489339ccfb3SSam Leffler /* 1490339ccfb3SSam Leffler * Schedule the rx taskq in case there's no 1491339ccfb3SSam Leffler * traffic so any frames held on the staging 1492339ccfb3SSam Leffler * queue are aged and potentially flushed. 1493339ccfb3SSam Leffler */ 1494339ccfb3SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1495339ccfb3SSam Leffler #endif 1496339ccfb3SSam Leffler } 1497c42a7b7eSSam Leffler } 14985591b213SSam Leffler if (status & HAL_INT_RXEOL) { 14998f939e79SAdrian Chadd int imask; 1500f52d3452SAdrian Chadd CTR0(ATH_KTR_ERR, "ath_intr: RXEOL"); 1501ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 15025591b213SSam Leffler /* 15035591b213SSam Leffler * NB: the hardware should re-read the link when 15045591b213SSam Leffler * RXE bit is written, but it doesn't work at 15055591b213SSam Leffler * least on older hardware revs. 15065591b213SSam Leffler */ 15075591b213SSam Leffler sc->sc_stats.ast_rxeol++; 150873f895fcSAdrian Chadd /* 150973f895fcSAdrian Chadd * Disable RXEOL/RXORN - prevent an interrupt 151073f895fcSAdrian Chadd * storm until the PCU logic can be reset. 15111fdadc0fSAdrian Chadd * In case the interface is reset some other 15121fdadc0fSAdrian Chadd * way before "sc_kickpcu" is called, don't 15131fdadc0fSAdrian Chadd * modify sc_imask - that way if it is reset 15141fdadc0fSAdrian Chadd * by a call to ath_reset() somehow, the 15151fdadc0fSAdrian Chadd * interrupt mask will be correctly reprogrammed. 151673f895fcSAdrian Chadd */ 15178f939e79SAdrian Chadd imask = sc->sc_imask; 15181fdadc0fSAdrian Chadd imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); 15191fdadc0fSAdrian Chadd ath_hal_intrset(ah, imask); 15201fdadc0fSAdrian Chadd /* 15218f939e79SAdrian Chadd * Only blank sc_rxlink if we've not yet kicked 15228f939e79SAdrian Chadd * the PCU. 15238f939e79SAdrian Chadd * 15248f939e79SAdrian Chadd * This isn't entirely correct - the correct solution 15258f939e79SAdrian Chadd * would be to have a PCU lock and engage that for 15268f939e79SAdrian Chadd * the duration of the PCU fiddling; which would include 15278f939e79SAdrian Chadd * running the RX process. Otherwise we could end up 15288f939e79SAdrian Chadd * messing up the RX descriptor chain and making the 15298f939e79SAdrian Chadd * RX desc list much shorter. 15308f939e79SAdrian Chadd */ 15318f939e79SAdrian Chadd if (! sc->sc_kickpcu) 15328f939e79SAdrian Chadd sc->sc_rxlink = NULL; 15338f939e79SAdrian Chadd sc->sc_kickpcu = 1; 15348f939e79SAdrian Chadd /* 15351fdadc0fSAdrian Chadd * Enqueue an RX proc, to handled whatever 15361fdadc0fSAdrian Chadd * is in the RX queue. 15371fdadc0fSAdrian Chadd * This will then kick the PCU. 15381fdadc0fSAdrian Chadd */ 15391fdadc0fSAdrian Chadd taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1540ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 15415591b213SSam Leffler } 15425591b213SSam Leffler if (status & HAL_INT_TXURN) { 15435591b213SSam Leffler sc->sc_stats.ast_txurn++; 15445591b213SSam Leffler /* bump tx trigger level */ 15455591b213SSam Leffler ath_hal_updatetxtriglevel(ah, AH_TRUE); 15465591b213SSam Leffler } 15478f939e79SAdrian Chadd if (status & HAL_INT_RX) { 15488f939e79SAdrian Chadd sc->sc_stats.ast_rx_intr++; 15490bbf5441SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 15508f939e79SAdrian Chadd } 15518f939e79SAdrian Chadd if (status & HAL_INT_TX) { 15528f939e79SAdrian Chadd sc->sc_stats.ast_tx_intr++; 15538f939e79SAdrian Chadd /* 15548f939e79SAdrian Chadd * Grab all the currently set bits in the HAL txq bitmap 15558f939e79SAdrian Chadd * and blank them. This is the only place we should be 15568f939e79SAdrian Chadd * doing this. 15578f939e79SAdrian Chadd */ 1558ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 15598f939e79SAdrian Chadd txqs = 0xffffffff; 15608f939e79SAdrian Chadd ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); 15618f939e79SAdrian Chadd sc->sc_txq_active |= txqs; 15620bbf5441SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 1563ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 15648f939e79SAdrian Chadd } 15655591b213SSam Leffler if (status & HAL_INT_BMISS) { 15665591b213SSam Leffler sc->sc_stats.ast_bmiss++; 15670bbf5441SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 15685591b213SSam Leffler } 15696ad02dbaSAdrian Chadd if (status & HAL_INT_GTT) 15706ad02dbaSAdrian Chadd sc->sc_stats.ast_tx_timeout++; 15715594f5c0SAdrian Chadd if (status & HAL_INT_CST) 15725594f5c0SAdrian Chadd sc->sc_stats.ast_tx_cst++; 1573c42a7b7eSSam Leffler if (status & HAL_INT_MIB) { 1574c42a7b7eSSam Leffler sc->sc_stats.ast_mib++; 1575ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 1576c42a7b7eSSam Leffler /* 1577c42a7b7eSSam Leffler * Disable interrupts until we service the MIB 1578c42a7b7eSSam Leffler * interrupt; otherwise it will continue to fire. 1579c42a7b7eSSam Leffler */ 1580c42a7b7eSSam Leffler ath_hal_intrset(ah, 0); 1581c42a7b7eSSam Leffler /* 1582c42a7b7eSSam Leffler * Let the hal handle the event. We assume it will 1583c42a7b7eSSam Leffler * clear whatever condition caused the interrupt. 1584c42a7b7eSSam Leffler */ 1585ffa2cab6SSam Leffler ath_hal_mibevent(ah, &sc->sc_halstats); 15868f939e79SAdrian Chadd /* 15878f939e79SAdrian Chadd * Don't reset the interrupt if we've just 15888f939e79SAdrian Chadd * kicked the PCU, or we may get a nested 15898f939e79SAdrian Chadd * RXEOL before the rxproc has had a chance 15908f939e79SAdrian Chadd * to run. 15918f939e79SAdrian Chadd */ 15928f939e79SAdrian Chadd if (sc->sc_kickpcu == 0) 1593c42a7b7eSSam Leffler ath_hal_intrset(ah, sc->sc_imask); 1594ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 1595c42a7b7eSSam Leffler } 15969c4fc1e8SSam Leffler if (status & HAL_INT_RXORN) { 15979c4fc1e8SSam Leffler /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ 1598f52d3452SAdrian Chadd CTR0(ATH_KTR_ERR, "ath_intr: RXORN"); 15999c4fc1e8SSam Leffler sc->sc_stats.ast_rxorn++; 16009c4fc1e8SSam Leffler } 16015591b213SSam Leffler } 1602ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 1603ef27340cSAdrian Chadd sc->sc_intr_cnt--; 1604ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 16055591b213SSam Leffler } 16065591b213SSam Leffler 16075591b213SSam Leffler static void 16085591b213SSam Leffler ath_fatal_proc(void *arg, int pending) 16095591b213SSam Leffler { 16105591b213SSam Leffler struct ath_softc *sc = arg; 1611fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 161216c8acaaSSam Leffler u_int32_t *state; 161316c8acaaSSam Leffler u_int32_t len; 161468e8e04eSSam Leffler void *sp; 16155591b213SSam Leffler 1616c42a7b7eSSam Leffler if_printf(ifp, "hardware error; resetting\n"); 161716c8acaaSSam Leffler /* 161816c8acaaSSam Leffler * Fatal errors are unrecoverable. Typically these 161916c8acaaSSam Leffler * are caused by DMA errors. Collect h/w state from 162016c8acaaSSam Leffler * the hal so we can diagnose what's going on. 162116c8acaaSSam Leffler */ 162268e8e04eSSam Leffler if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { 162316c8acaaSSam Leffler KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 162468e8e04eSSam Leffler state = sp; 162516c8acaaSSam Leffler if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", 162616c8acaaSSam Leffler state[0], state[1] , state[2], state[3], 162716c8acaaSSam Leffler state[4], state[5]); 162816c8acaaSSam Leffler } 1629517526efSAdrian Chadd ath_reset(ifp, ATH_RESET_NOLOSS); 16305591b213SSam Leffler } 16315591b213SSam Leffler 16325591b213SSam Leffler static void 1633b032f27cSSam Leffler ath_bmiss_vap(struct ieee80211vap *vap) 16345591b213SSam Leffler { 163559fbb257SSam Leffler /* 163659fbb257SSam Leffler * Workaround phantom bmiss interrupts by sanity-checking 163759fbb257SSam Leffler * the time of our last rx'd frame. If it is within the 163859fbb257SSam Leffler * beacon miss interval then ignore the interrupt. If it's 163959fbb257SSam Leffler * truly a bmiss we'll get another interrupt soon and that'll 164059fbb257SSam Leffler * be dispatched up for processing. Note this applies only 164159fbb257SSam Leffler * for h/w beacon miss events. 164259fbb257SSam Leffler */ 164359fbb257SSam Leffler if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { 1644a7ace843SSam Leffler struct ifnet *ifp = vap->iv_ic->ic_ifp; 1645a7ace843SSam Leffler struct ath_softc *sc = ifp->if_softc; 1646d7736e13SSam Leffler u_int64_t lastrx = sc->sc_lastrx; 1647d7736e13SSam Leffler u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 1648d7736e13SSam Leffler u_int bmisstimeout = 1649b032f27cSSam Leffler vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; 1650d7736e13SSam Leffler 1651d7736e13SSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 1652d7736e13SSam Leffler "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 1653d7736e13SSam Leffler __func__, (unsigned long long) tsf, 1654d7736e13SSam Leffler (unsigned long long)(tsf - lastrx), 1655d7736e13SSam Leffler (unsigned long long) lastrx, bmisstimeout); 165659fbb257SSam Leffler 165759fbb257SSam Leffler if (tsf - lastrx <= bmisstimeout) { 1658d7736e13SSam Leffler sc->sc_stats.ast_bmiss_phantom++; 165959fbb257SSam Leffler return; 166059fbb257SSam Leffler } 166159fbb257SSam Leffler } 166259fbb257SSam Leffler ATH_VAP(vap)->av_bmiss(vap); 1663e585d188SSam Leffler } 1664b032f27cSSam Leffler 1665459bc4f0SSam Leffler static int 1666459bc4f0SSam Leffler ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) 1667459bc4f0SSam Leffler { 1668459bc4f0SSam Leffler uint32_t rsize; 1669459bc4f0SSam Leffler void *sp; 1670459bc4f0SSam Leffler 167125c96056SAdrian Chadd if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) 1672459bc4f0SSam Leffler return 0; 1673459bc4f0SSam Leffler KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); 1674459bc4f0SSam Leffler *hangs = *(uint32_t *)sp; 1675459bc4f0SSam Leffler return 1; 1676459bc4f0SSam Leffler } 1677459bc4f0SSam Leffler 1678b032f27cSSam Leffler static void 1679b032f27cSSam Leffler ath_bmiss_proc(void *arg, int pending) 1680b032f27cSSam Leffler { 1681b032f27cSSam Leffler struct ath_softc *sc = arg; 1682b032f27cSSam Leffler struct ifnet *ifp = sc->sc_ifp; 1683459bc4f0SSam Leffler uint32_t hangs; 1684b032f27cSSam Leffler 1685b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 1686459bc4f0SSam Leffler 1687459bc4f0SSam Leffler if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { 16884fa8d4efSDaniel Eischen if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs); 1689517526efSAdrian Chadd ath_reset(ifp, ATH_RESET_NOLOSS); 1690459bc4f0SSam Leffler } else 1691b032f27cSSam Leffler ieee80211_beacon_miss(ifp->if_l2com); 16925591b213SSam Leffler } 16935591b213SSam Leffler 1694724c193aSSam Leffler /* 1695b032f27cSSam Leffler * Handle TKIP MIC setup to deal hardware that doesn't do MIC 1696b032f27cSSam Leffler * calcs together with WME. If necessary disable the crypto 1697b032f27cSSam Leffler * hardware and mark the 802.11 state so keys will be setup 1698b032f27cSSam Leffler * with the MIC work done in software. 1699b032f27cSSam Leffler */ 1700b032f27cSSam Leffler static void 1701b032f27cSSam Leffler ath_settkipmic(struct ath_softc *sc) 1702b032f27cSSam Leffler { 1703b032f27cSSam Leffler struct ifnet *ifp = sc->sc_ifp; 1704b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 1705b032f27cSSam Leffler 1706b032f27cSSam Leffler if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { 1707b032f27cSSam Leffler if (ic->ic_flags & IEEE80211_F_WME) { 1708b032f27cSSam Leffler ath_hal_settkipmic(sc->sc_ah, AH_FALSE); 1709b032f27cSSam Leffler ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; 1710b032f27cSSam Leffler } else { 1711b032f27cSSam Leffler ath_hal_settkipmic(sc->sc_ah, AH_TRUE); 1712b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 1713b032f27cSSam Leffler } 1714b032f27cSSam Leffler } 1715b032f27cSSam Leffler } 1716b032f27cSSam Leffler 17175591b213SSam Leffler static void 17185591b213SSam Leffler ath_init(void *arg) 17195591b213SSam Leffler { 17205591b213SSam Leffler struct ath_softc *sc = (struct ath_softc *) arg; 1721fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 1722b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 17235591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 17245591b213SSam Leffler HAL_STATUS status; 17255591b213SSam Leffler 1726c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1727c42a7b7eSSam Leffler __func__, ifp->if_flags); 17285591b213SSam Leffler 1729f0b2a0beSSam Leffler ATH_LOCK(sc); 17305591b213SSam Leffler /* 17315591b213SSam Leffler * Stop anything previously setup. This is safe 17325591b213SSam Leffler * whether this is the first time through or not. 17335591b213SSam Leffler */ 1734c42a7b7eSSam Leffler ath_stop_locked(ifp); 17355591b213SSam Leffler 17365591b213SSam Leffler /* 17375591b213SSam Leffler * The basic interface to setting the hardware in a good 17385591b213SSam Leffler * state is ``reset''. On return the hardware is known to 17395591b213SSam Leffler * be powered up and with interrupts disabled. This must 17405591b213SSam Leffler * be followed by initialization of the appropriate bits 17415591b213SSam Leffler * and then setup of the interrupt mask. 17425591b213SSam Leffler */ 1743b032f27cSSam Leffler ath_settkipmic(sc); 174459efa8b5SSam Leffler if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) { 17455591b213SSam Leffler if_printf(ifp, "unable to reset hardware; hal status %u\n", 17465591b213SSam Leffler status); 1747b032f27cSSam Leffler ATH_UNLOCK(sc); 1748b032f27cSSam Leffler return; 17495591b213SSam Leffler } 1750b032f27cSSam Leffler ath_chan_change(sc, ic->ic_curchan); 17515591b213SSam Leffler 175248237774SAdrian Chadd /* Let DFS at it in case it's a DFS channel */ 175348237774SAdrian Chadd ath_dfs_radar_enable(sc, ic->ic_curchan); 175448237774SAdrian Chadd 17555591b213SSam Leffler /* 1756c59005e9SSam Leffler * Likewise this is set during reset so update 1757c59005e9SSam Leffler * state cached in the driver. 1758c59005e9SSam Leffler */ 1759c59005e9SSam Leffler sc->sc_diversity = ath_hal_getdiversity(ah); 17602dc7fcc4SSam Leffler sc->sc_lastlongcal = 0; 17612dc7fcc4SSam Leffler sc->sc_resetcal = 1; 17622dc7fcc4SSam Leffler sc->sc_lastcalreset = 0; 1763a108ab63SAdrian Chadd sc->sc_lastani = 0; 1764a108ab63SAdrian Chadd sc->sc_lastshortcal = 0; 1765a108ab63SAdrian Chadd sc->sc_doresetcal = AH_FALSE; 17662fd9aabbSAdrian Chadd /* 17672fd9aabbSAdrian Chadd * Beacon timers were cleared here; give ath_newstate() 17682fd9aabbSAdrian Chadd * a hint that the beacon timers should be poked when 17692fd9aabbSAdrian Chadd * things transition to the RUN state. 17702fd9aabbSAdrian Chadd */ 17712fd9aabbSAdrian Chadd sc->sc_beacons = 0; 1772c42a7b7eSSam Leffler 1773c42a7b7eSSam Leffler /* 17748f939e79SAdrian Chadd * Initial aggregation settings. 17758f939e79SAdrian Chadd */ 17768f939e79SAdrian Chadd sc->sc_hwq_limit = ATH_AGGR_MIN_QDEPTH; 17778f939e79SAdrian Chadd sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; 17788f939e79SAdrian Chadd sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; 17798f939e79SAdrian Chadd 17808f939e79SAdrian Chadd /* 17815591b213SSam Leffler * Setup the hardware after reset: the key cache 17825591b213SSam Leffler * is filled as needed and the receive engine is 17835591b213SSam Leffler * set going. Frame transmit is handled entirely 17845591b213SSam Leffler * in the frame output path; there's nothing to do 17855591b213SSam Leffler * here except setup the interrupt mask. 17865591b213SSam Leffler */ 17875591b213SSam Leffler if (ath_startrecv(sc) != 0) { 17885591b213SSam Leffler if_printf(ifp, "unable to start recv logic\n"); 1789b032f27cSSam Leffler ATH_UNLOCK(sc); 1790b032f27cSSam Leffler return; 17915591b213SSam Leffler } 17925591b213SSam Leffler 17935591b213SSam Leffler /* 17945591b213SSam Leffler * Enable interrupts. 17955591b213SSam Leffler */ 17965591b213SSam Leffler sc->sc_imask = HAL_INT_RX | HAL_INT_TX 17975591b213SSam Leffler | HAL_INT_RXEOL | HAL_INT_RXORN 17985591b213SSam Leffler | HAL_INT_FATAL | HAL_INT_GLOBAL; 1799c42a7b7eSSam Leffler /* 1800c42a7b7eSSam Leffler * Enable MIB interrupts when there are hardware phy counters. 1801c42a7b7eSSam Leffler * Note we only do this (at the moment) for station mode. 1802c42a7b7eSSam Leffler */ 1803c42a7b7eSSam Leffler if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 1804c42a7b7eSSam Leffler sc->sc_imask |= HAL_INT_MIB; 18055591b213SSam Leffler 18065594f5c0SAdrian Chadd /* Enable global TX timeout and carrier sense timeout if available */ 18076ad02dbaSAdrian Chadd if (ath_hal_gtxto_supported(ah)) 18083788ebedSAdrian Chadd sc->sc_imask |= HAL_INT_GTT; 1809d0a0ebc6SAdrian Chadd 1810d0a0ebc6SAdrian Chadd DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", 1811d0a0ebc6SAdrian Chadd __func__, sc->sc_imask); 18126ad02dbaSAdrian Chadd 181313f4c340SRobert Watson ifp->if_drv_flags |= IFF_DRV_RUNNING; 18142e986da5SSam Leffler callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 1815b032f27cSSam Leffler ath_hal_intrset(ah, sc->sc_imask); 18165591b213SSam Leffler 1817b032f27cSSam Leffler ATH_UNLOCK(sc); 1818b032f27cSSam Leffler 181986e07743SSam Leffler #ifdef ATH_TX99_DIAG 182086e07743SSam Leffler if (sc->sc_tx99 != NULL) 182186e07743SSam Leffler sc->sc_tx99->start(sc->sc_tx99); 182286e07743SSam Leffler else 182386e07743SSam Leffler #endif 1824b032f27cSSam Leffler ieee80211_start_all(ic); /* start all vap's */ 18255591b213SSam Leffler } 18265591b213SSam Leffler 18275591b213SSam Leffler static void 1828c42a7b7eSSam Leffler ath_stop_locked(struct ifnet *ifp) 18295591b213SSam Leffler { 18305591b213SSam Leffler struct ath_softc *sc = ifp->if_softc; 18315591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 18325591b213SSam Leffler 1833c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 1834c42a7b7eSSam Leffler __func__, sc->sc_invalid, ifp->if_flags); 18355591b213SSam Leffler 1836c42a7b7eSSam Leffler ATH_LOCK_ASSERT(sc); 183713f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 18385591b213SSam Leffler /* 18395591b213SSam Leffler * Shutdown the hardware and driver: 1840c42a7b7eSSam Leffler * reset 802.11 state machine 18415591b213SSam Leffler * turn off timers 1842c42a7b7eSSam Leffler * disable interrupts 1843c42a7b7eSSam Leffler * turn off the radio 18445591b213SSam Leffler * clear transmit machinery 18455591b213SSam Leffler * clear receive machinery 18465591b213SSam Leffler * drain and release tx queues 18475591b213SSam Leffler * reclaim beacon resources 18485591b213SSam Leffler * power down hardware 18495591b213SSam Leffler * 18505591b213SSam Leffler * Note that some of this work is not possible if the 18515591b213SSam Leffler * hardware is gone (invalid). 18525591b213SSam Leffler */ 185386e07743SSam Leffler #ifdef ATH_TX99_DIAG 185486e07743SSam Leffler if (sc->sc_tx99 != NULL) 185586e07743SSam Leffler sc->sc_tx99->stop(sc->sc_tx99); 185686e07743SSam Leffler #endif 18572e986da5SSam Leffler callout_stop(&sc->sc_wd_ch); 18582e986da5SSam Leffler sc->sc_wd_timer = 0; 185913f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1860c42a7b7eSSam Leffler if (!sc->sc_invalid) { 18613e50ec2cSSam Leffler if (sc->sc_softled) { 18623e50ec2cSSam Leffler callout_stop(&sc->sc_ledtimer); 18633e50ec2cSSam Leffler ath_hal_gpioset(ah, sc->sc_ledpin, 18643e50ec2cSSam Leffler !sc->sc_ledon); 18653e50ec2cSSam Leffler sc->sc_blinking = 0; 18663e50ec2cSSam Leffler } 18675591b213SSam Leffler ath_hal_intrset(ah, 0); 1868c42a7b7eSSam Leffler } 1869517526efSAdrian Chadd ath_draintxq(sc, ATH_RESET_DEFAULT); 1870c42a7b7eSSam Leffler if (!sc->sc_invalid) { 18719a842e8bSAdrian Chadd ath_stoprecv(sc, 1); 1872c42a7b7eSSam Leffler ath_hal_phydisable(ah); 1873c42a7b7eSSam Leffler } else 18745591b213SSam Leffler sc->sc_rxlink = NULL; 1875b032f27cSSam Leffler ath_beacon_free(sc); /* XXX not needed */ 1876c42a7b7eSSam Leffler } 1877c42a7b7eSSam Leffler } 1878c42a7b7eSSam Leffler 1879ef27340cSAdrian Chadd #define MAX_TXRX_ITERATIONS 1000 1880ef27340cSAdrian Chadd static void 1881ef27340cSAdrian Chadd ath_txrx_stop(struct ath_softc *sc) 1882ef27340cSAdrian Chadd { 1883ef27340cSAdrian Chadd int i = MAX_TXRX_ITERATIONS; 1884ef27340cSAdrian Chadd 1885ef27340cSAdrian Chadd ATH_UNLOCK_ASSERT(sc); 1886ef27340cSAdrian Chadd /* Stop any new TX/RX from occuring */ 1887ef27340cSAdrian Chadd taskqueue_block(sc->sc_tq); 1888ef27340cSAdrian Chadd 1889ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 1890ef27340cSAdrian Chadd /* 1891ef27340cSAdrian Chadd * Sleep until all the pending operations have completed. 1892ef27340cSAdrian Chadd * 1893ef27340cSAdrian Chadd * The caller must ensure that reset has been incremented 1894ef27340cSAdrian Chadd * or the pending operations may continue being queued. 1895ef27340cSAdrian Chadd */ 1896ef27340cSAdrian Chadd while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt || 1897ef27340cSAdrian Chadd sc->sc_txstart_cnt || sc->sc_intr_cnt) { 1898ef27340cSAdrian Chadd if (i <= 0) 1899ef27340cSAdrian Chadd break; 1900a2d8240dSAdrian Chadd msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 1); 1901ef27340cSAdrian Chadd i--; 1902ef27340cSAdrian Chadd } 1903ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 1904ef27340cSAdrian Chadd 1905ef27340cSAdrian Chadd if (i <= 0) 1906ef27340cSAdrian Chadd device_printf(sc->sc_dev, 1907ef27340cSAdrian Chadd "%s: didn't finish after %d iterations\n", 1908ef27340cSAdrian Chadd __func__, MAX_TXRX_ITERATIONS); 1909ef27340cSAdrian Chadd } 1910ef27340cSAdrian Chadd #undef MAX_TXRX_ITERATIONS 1911ef27340cSAdrian Chadd 1912ef27340cSAdrian Chadd static void 1913ef27340cSAdrian Chadd ath_txrx_start(struct ath_softc *sc) 1914ef27340cSAdrian Chadd { 1915ef27340cSAdrian Chadd 1916ef27340cSAdrian Chadd taskqueue_unblock(sc->sc_tq); 1917ef27340cSAdrian Chadd } 1918ef27340cSAdrian Chadd 1919ee321975SAdrian Chadd /* 1920ee321975SAdrian Chadd * Grab the reset lock, and wait around until noone else 1921ee321975SAdrian Chadd * is trying to do anything with it. 1922ee321975SAdrian Chadd * 1923ee321975SAdrian Chadd * This is totally horrible but we can't hold this lock for 1924ee321975SAdrian Chadd * long enough to do TX/RX or we end up with net80211/ip stack 1925ee321975SAdrian Chadd * LORs and eventual deadlock. 1926ee321975SAdrian Chadd * 1927ee321975SAdrian Chadd * "dowait" signals whether to spin, waiting for the reset 1928ee321975SAdrian Chadd * lock count to reach 0. This should (for now) only be used 1929ee321975SAdrian Chadd * during the reset path, as the rest of the code may not 1930ee321975SAdrian Chadd * be locking-reentrant enough to behave correctly. 1931ee321975SAdrian Chadd * 1932ee321975SAdrian Chadd * Another, cleaner way should be found to serialise all of 1933ee321975SAdrian Chadd * these operations. 1934ee321975SAdrian Chadd */ 1935ee321975SAdrian Chadd #define MAX_RESET_ITERATIONS 10 1936ee321975SAdrian Chadd static int 1937ee321975SAdrian Chadd ath_reset_grablock(struct ath_softc *sc, int dowait) 1938ee321975SAdrian Chadd { 1939ee321975SAdrian Chadd int w = 0; 1940ee321975SAdrian Chadd int i = MAX_RESET_ITERATIONS; 1941ee321975SAdrian Chadd 1942ee321975SAdrian Chadd ATH_PCU_LOCK_ASSERT(sc); 1943ee321975SAdrian Chadd do { 1944ee321975SAdrian Chadd if (sc->sc_inreset_cnt == 0) { 1945ee321975SAdrian Chadd w = 1; 1946ee321975SAdrian Chadd break; 1947ee321975SAdrian Chadd } 1948ee321975SAdrian Chadd if (dowait == 0) { 1949ee321975SAdrian Chadd w = 0; 1950ee321975SAdrian Chadd break; 1951ee321975SAdrian Chadd } 1952ee321975SAdrian Chadd ATH_PCU_UNLOCK(sc); 1953ee321975SAdrian Chadd pause("ath_reset_grablock", 1); 1954ee321975SAdrian Chadd i--; 1955ee321975SAdrian Chadd ATH_PCU_LOCK(sc); 1956ee321975SAdrian Chadd } while (i > 0); 1957ee321975SAdrian Chadd 1958ee321975SAdrian Chadd /* 1959ee321975SAdrian Chadd * We always increment the refcounter, regardless 1960ee321975SAdrian Chadd * of whether we succeeded to get it in an exclusive 1961ee321975SAdrian Chadd * way. 1962ee321975SAdrian Chadd */ 1963ee321975SAdrian Chadd sc->sc_inreset_cnt++; 1964ee321975SAdrian Chadd 1965ee321975SAdrian Chadd if (i <= 0) 1966ee321975SAdrian Chadd device_printf(sc->sc_dev, 1967ee321975SAdrian Chadd "%s: didn't finish after %d iterations\n", 1968ee321975SAdrian Chadd __func__, MAX_RESET_ITERATIONS); 1969ee321975SAdrian Chadd 1970ee321975SAdrian Chadd if (w == 0) 1971ee321975SAdrian Chadd device_printf(sc->sc_dev, 1972ee321975SAdrian Chadd "%s: warning, recursive reset path!\n", 1973ee321975SAdrian Chadd __func__); 1974ee321975SAdrian Chadd 1975ee321975SAdrian Chadd return w; 1976ee321975SAdrian Chadd } 1977ee321975SAdrian Chadd #undef MAX_RESET_ITERATIONS 1978ee321975SAdrian Chadd 1979ee321975SAdrian Chadd /* 1980ee321975SAdrian Chadd * XXX TODO: write ath_reset_releaselock 1981ee321975SAdrian Chadd */ 1982ee321975SAdrian Chadd 1983c42a7b7eSSam Leffler static void 1984c42a7b7eSSam Leffler ath_stop(struct ifnet *ifp) 1985c42a7b7eSSam Leffler { 1986c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 1987c42a7b7eSSam Leffler 1988c42a7b7eSSam Leffler ATH_LOCK(sc); 1989c42a7b7eSSam Leffler ath_stop_locked(ifp); 1990f0b2a0beSSam Leffler ATH_UNLOCK(sc); 19915591b213SSam Leffler } 19925591b213SSam Leffler 19935591b213SSam Leffler /* 19945591b213SSam Leffler * Reset the hardware w/o losing operational state. This is 19955591b213SSam Leffler * basically a more efficient way of doing ath_stop, ath_init, 19965591b213SSam Leffler * followed by state transitions to the current 802.11 1997c42a7b7eSSam Leffler * operational state. Used to recover from various errors and 1998c42a7b7eSSam Leffler * to reset or reload hardware state. 19995591b213SSam Leffler */ 20006079fdbeSAdrian Chadd int 2001517526efSAdrian Chadd ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type) 20025591b213SSam Leffler { 2003c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 2004b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 20055591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 20065591b213SSam Leffler HAL_STATUS status; 2007ef27340cSAdrian Chadd int i; 20085591b213SSam Leffler 2009f52d3452SAdrian Chadd DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 201016d4de92SAdrian Chadd 2011ee321975SAdrian Chadd /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */ 2012ef27340cSAdrian Chadd ATH_PCU_UNLOCK_ASSERT(sc); 2013ef27340cSAdrian Chadd ATH_UNLOCK_ASSERT(sc); 2014ef27340cSAdrian Chadd 2015ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 2016ee321975SAdrian Chadd if (ath_reset_grablock(sc, 1) == 0) { 2017ee321975SAdrian Chadd device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 2018ef27340cSAdrian Chadd __func__); 2019ef27340cSAdrian Chadd } 20205591b213SSam Leffler ath_hal_intrset(ah, 0); /* disable interrupts */ 2021ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 2022ef27340cSAdrian Chadd 2023f52d3452SAdrian Chadd /* 20249a842e8bSAdrian Chadd * Should now wait for pending TX/RX to complete 20259a842e8bSAdrian Chadd * and block future ones from occuring. This needs to be 20269a842e8bSAdrian Chadd * done before the TX queue is drained. 2027f52d3452SAdrian Chadd */ 2028ef27340cSAdrian Chadd ath_txrx_stop(sc); 2029ef27340cSAdrian Chadd ath_draintxq(sc, reset_type); /* stop xmit side */ 2030ef27340cSAdrian Chadd 2031ef27340cSAdrian Chadd /* 2032ef27340cSAdrian Chadd * Regardless of whether we're doing a no-loss flush or 2033ef27340cSAdrian Chadd * not, stop the PCU and handle what's in the RX queue. 2034ef27340cSAdrian Chadd * That way frames aren't dropped which shouldn't be. 2035ef27340cSAdrian Chadd */ 20369a842e8bSAdrian Chadd ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS)); 2037ef27340cSAdrian Chadd ath_rx_proc(sc, 0); 2038ef27340cSAdrian Chadd 2039b032f27cSSam Leffler ath_settkipmic(sc); /* configure TKIP MIC handling */ 20405591b213SSam Leffler /* NB: indicate channel change so we do a full reset */ 204159efa8b5SSam Leffler if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status)) 20425591b213SSam Leffler if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 20435591b213SSam Leffler __func__, status); 2044c59005e9SSam Leffler sc->sc_diversity = ath_hal_getdiversity(ah); 204548237774SAdrian Chadd 204648237774SAdrian Chadd /* Let DFS at it in case it's a DFS channel */ 204748237774SAdrian Chadd ath_dfs_radar_enable(sc, ic->ic_curchan); 204848237774SAdrian Chadd 204968e8e04eSSam Leffler if (ath_startrecv(sc) != 0) /* restart recv */ 205068e8e04eSSam Leffler if_printf(ifp, "%s: unable to start recv logic\n", __func__); 2051c42a7b7eSSam Leffler /* 2052c42a7b7eSSam Leffler * We may be doing a reset in response to an ioctl 2053c42a7b7eSSam Leffler * that changes the channel so update any state that 2054c42a7b7eSSam Leffler * might change as a result. 2055c42a7b7eSSam Leffler */ 2056724c193aSSam Leffler ath_chan_change(sc, ic->ic_curchan); 2057c89b957aSSam Leffler if (sc->sc_beacons) { /* restart beacons */ 2058584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 205910ad9a77SSam Leffler if (sc->sc_tdma) 206010ad9a77SSam Leffler ath_tdma_config(sc, NULL); 206110ad9a77SSam Leffler else 206210ad9a77SSam Leffler #endif 2063c89b957aSSam Leffler ath_beacon_config(sc, NULL); 206410ad9a77SSam Leffler } 2065c42a7b7eSSam Leffler 2066ef27340cSAdrian Chadd /* 2067ef27340cSAdrian Chadd * Release the reset lock and re-enable interrupts here. 2068ef27340cSAdrian Chadd * If an interrupt was being processed in ath_intr(), 2069ef27340cSAdrian Chadd * it would disable interrupts at this point. So we have 2070ef27340cSAdrian Chadd * to atomically enable interrupts and decrement the 2071ef27340cSAdrian Chadd * reset counter - this way ath_intr() doesn't end up 2072ef27340cSAdrian Chadd * disabling interrupts without a corresponding enable 2073ef27340cSAdrian Chadd * in the rest or channel change path. 2074ef27340cSAdrian Chadd */ 2075ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 2076ef27340cSAdrian Chadd sc->sc_inreset_cnt--; 2077ef27340cSAdrian Chadd /* XXX only do this if sc_inreset_cnt == 0? */ 2078ef27340cSAdrian Chadd ath_hal_intrset(ah, sc->sc_imask); 2079ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 2080ef27340cSAdrian Chadd 2081ef27340cSAdrian Chadd /* 2082ef27340cSAdrian Chadd * TX and RX can be started here. If it were started with 2083ef27340cSAdrian Chadd * sc_inreset_cnt > 0, the TX and RX path would abort. 2084ef27340cSAdrian Chadd * Thus if this is a nested call through the reset or 2085ef27340cSAdrian Chadd * channel change code, TX completion will occur but 2086ef27340cSAdrian Chadd * RX completion and ath_start / ath_tx_start will not 2087ef27340cSAdrian Chadd * run. 2088ef27340cSAdrian Chadd */ 2089ef27340cSAdrian Chadd 2090ef27340cSAdrian Chadd /* Restart TX/RX as needed */ 2091ef27340cSAdrian Chadd ath_txrx_start(sc); 2092ef27340cSAdrian Chadd 2093ef27340cSAdrian Chadd /* XXX Restart TX completion and pending TX */ 2094ef27340cSAdrian Chadd if (reset_type == ATH_RESET_NOLOSS) { 2095ef27340cSAdrian Chadd for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 2096ef27340cSAdrian Chadd if (ATH_TXQ_SETUP(sc, i)) { 2097ef27340cSAdrian Chadd ATH_TXQ_LOCK(&sc->sc_txq[i]); 2098ef27340cSAdrian Chadd ath_txq_restart_dma(sc, &sc->sc_txq[i]); 2099ef27340cSAdrian Chadd ath_txq_sched(sc, &sc->sc_txq[i]); 2100ef27340cSAdrian Chadd ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 2101ef27340cSAdrian Chadd } 2102ef27340cSAdrian Chadd } 2103ef27340cSAdrian Chadd } 2104ef27340cSAdrian Chadd 2105ef27340cSAdrian Chadd /* 2106ef27340cSAdrian Chadd * This may have been set during an ath_start() call which 2107ef27340cSAdrian Chadd * set this once it detected a concurrent TX was going on. 2108ef27340cSAdrian Chadd * So, clear it. 2109ef27340cSAdrian Chadd */ 2110ef27340cSAdrian Chadd /* XXX do this inside of IF_LOCK? */ 2111ef27340cSAdrian Chadd ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2112ef27340cSAdrian Chadd 2113ef27340cSAdrian Chadd /* Handle any frames in the TX queue */ 2114ef27340cSAdrian Chadd /* 2115ef27340cSAdrian Chadd * XXX should this be done by the caller, rather than 2116ef27340cSAdrian Chadd * ath_reset() ? 2117ef27340cSAdrian Chadd */ 2118c42a7b7eSSam Leffler ath_start(ifp); /* restart xmit */ 2119c42a7b7eSSam Leffler return 0; 21205591b213SSam Leffler } 21215591b213SSam Leffler 212268e8e04eSSam Leffler static int 2123b032f27cSSam Leffler ath_reset_vap(struct ieee80211vap *vap, u_long cmd) 2124b032f27cSSam Leffler { 21254b54a231SSam Leffler struct ieee80211com *ic = vap->iv_ic; 21264b54a231SSam Leffler struct ifnet *ifp = ic->ic_ifp; 21274b54a231SSam Leffler struct ath_softc *sc = ifp->if_softc; 21284b54a231SSam Leffler struct ath_hal *ah = sc->sc_ah; 21294b54a231SSam Leffler 21304b54a231SSam Leffler switch (cmd) { 21314b54a231SSam Leffler case IEEE80211_IOC_TXPOWER: 21324b54a231SSam Leffler /* 21334b54a231SSam Leffler * If per-packet TPC is enabled, then we have nothing 21344b54a231SSam Leffler * to do; otherwise we need to force the global limit. 21354b54a231SSam Leffler * All this can happen directly; no need to reset. 21364b54a231SSam Leffler */ 21374b54a231SSam Leffler if (!ath_hal_gettpc(ah)) 21384b54a231SSam Leffler ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 21394b54a231SSam Leffler return 0; 21404b54a231SSam Leffler } 2141517526efSAdrian Chadd /* XXX? Full or NOLOSS? */ 2142517526efSAdrian Chadd return ath_reset(ifp, ATH_RESET_FULL); 2143b032f27cSSam Leffler } 2144b032f27cSSam Leffler 2145b8e788a5SAdrian Chadd struct ath_buf * 214610ad9a77SSam Leffler _ath_getbuf_locked(struct ath_softc *sc) 214710ad9a77SSam Leffler { 214810ad9a77SSam Leffler struct ath_buf *bf; 214910ad9a77SSam Leffler 215010ad9a77SSam Leffler ATH_TXBUF_LOCK_ASSERT(sc); 215110ad9a77SSam Leffler 21526b349e5aSAdrian Chadd bf = TAILQ_FIRST(&sc->sc_txbuf); 2153e346b073SAdrian Chadd if (bf == NULL) { 2154e346b073SAdrian Chadd sc->sc_stats.ast_tx_getnobuf++; 2155e346b073SAdrian Chadd } else { 2156e346b073SAdrian Chadd if (bf->bf_flags & ATH_BUF_BUSY) { 2157e346b073SAdrian Chadd sc->sc_stats.ast_tx_getbusybuf++; 2158e346b073SAdrian Chadd bf = NULL; 2159e346b073SAdrian Chadd } 2160e346b073SAdrian Chadd } 2161e346b073SAdrian Chadd 216210ad9a77SSam Leffler if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) 21636b349e5aSAdrian Chadd TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 216410ad9a77SSam Leffler else 216510ad9a77SSam Leffler bf = NULL; 2166e346b073SAdrian Chadd 216710ad9a77SSam Leffler if (bf == NULL) { 216810ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, 21696b349e5aSAdrian Chadd TAILQ_FIRST(&sc->sc_txbuf) == NULL ? 217010ad9a77SSam Leffler "out of xmit buffers" : "xmit buffer busy"); 2171e346b073SAdrian Chadd return NULL; 217210ad9a77SSam Leffler } 2173e346b073SAdrian Chadd 2174e346b073SAdrian Chadd /* Valid bf here; clear some basic fields */ 2175e346b073SAdrian Chadd bf->bf_next = NULL; /* XXX just to be sure */ 2176e346b073SAdrian Chadd bf->bf_last = NULL; /* XXX again, just to be sure */ 2177e346b073SAdrian Chadd bf->bf_comp = NULL; /* XXX again, just to be sure */ 2178e346b073SAdrian Chadd bzero(&bf->bf_state, sizeof(bf->bf_state)); 2179e346b073SAdrian Chadd 218010ad9a77SSam Leffler return bf; 218110ad9a77SSam Leffler } 218210ad9a77SSam Leffler 2183e346b073SAdrian Chadd /* 2184e346b073SAdrian Chadd * When retrying a software frame, buffers marked ATH_BUF_BUSY 2185e346b073SAdrian Chadd * can't be thrown back on the queue as they could still be 2186e346b073SAdrian Chadd * in use by the hardware. 2187e346b073SAdrian Chadd * 2188e346b073SAdrian Chadd * This duplicates the buffer, or returns NULL. 2189e346b073SAdrian Chadd * 2190e346b073SAdrian Chadd * The descriptor is also copied but the link pointers and 2191e346b073SAdrian Chadd * the DMA segments aren't copied; this frame should thus 2192e346b073SAdrian Chadd * be again passed through the descriptor setup/chain routines 2193e346b073SAdrian Chadd * so the link is correct. 2194e346b073SAdrian Chadd * 2195e346b073SAdrian Chadd * The caller must free the buffer using ath_freebuf(). 2196e346b073SAdrian Chadd * 2197e346b073SAdrian Chadd * XXX TODO: this call shouldn't fail as it'll cause packet loss 2198e346b073SAdrian Chadd * XXX in the TX pathway when retries are needed. 2199e346b073SAdrian Chadd * XXX Figure out how to keep some buffers free, or factor the 2200e346b073SAdrian Chadd * XXX number of busy buffers into the xmit path (ath_start()) 2201e346b073SAdrian Chadd * XXX so we don't over-commit. 2202e346b073SAdrian Chadd */ 2203e346b073SAdrian Chadd struct ath_buf * 2204e346b073SAdrian Chadd ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf) 2205e346b073SAdrian Chadd { 2206e346b073SAdrian Chadd struct ath_buf *tbf; 2207e346b073SAdrian Chadd 2208e346b073SAdrian Chadd tbf = ath_getbuf(sc); 2209e346b073SAdrian Chadd if (tbf == NULL) 2210e346b073SAdrian Chadd return NULL; /* XXX failure? Why? */ 2211e346b073SAdrian Chadd 2212e346b073SAdrian Chadd /* Copy basics */ 2213e346b073SAdrian Chadd tbf->bf_next = NULL; 2214e346b073SAdrian Chadd tbf->bf_nseg = bf->bf_nseg; 2215e346b073SAdrian Chadd tbf->bf_txflags = bf->bf_txflags; 2216e346b073SAdrian Chadd tbf->bf_flags = bf->bf_flags & ~ATH_BUF_BUSY; 2217e346b073SAdrian Chadd tbf->bf_status = bf->bf_status; 2218e346b073SAdrian Chadd tbf->bf_m = bf->bf_m; 2219e346b073SAdrian Chadd tbf->bf_node = bf->bf_node; 2220e346b073SAdrian Chadd /* will be setup by the chain/setup function */ 2221e346b073SAdrian Chadd tbf->bf_lastds = NULL; 2222e346b073SAdrian Chadd /* for now, last == self */ 2223e346b073SAdrian Chadd tbf->bf_last = tbf; 2224e346b073SAdrian Chadd tbf->bf_comp = bf->bf_comp; 2225e346b073SAdrian Chadd 2226e346b073SAdrian Chadd /* NOTE: DMA segments will be setup by the setup/chain functions */ 2227e346b073SAdrian Chadd 2228e346b073SAdrian Chadd /* The caller has to re-init the descriptor + links */ 2229e346b073SAdrian Chadd 2230e346b073SAdrian Chadd /* Copy state */ 2231e346b073SAdrian Chadd memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state)); 2232e346b073SAdrian Chadd 2233e346b073SAdrian Chadd return tbf; 2234e346b073SAdrian Chadd } 2235e346b073SAdrian Chadd 2236b8e788a5SAdrian Chadd struct ath_buf * 223710ad9a77SSam Leffler ath_getbuf(struct ath_softc *sc) 223810ad9a77SSam Leffler { 223910ad9a77SSam Leffler struct ath_buf *bf; 224010ad9a77SSam Leffler 224110ad9a77SSam Leffler ATH_TXBUF_LOCK(sc); 224210ad9a77SSam Leffler bf = _ath_getbuf_locked(sc); 224310ad9a77SSam Leffler if (bf == NULL) { 224410ad9a77SSam Leffler struct ifnet *ifp = sc->sc_ifp; 224510ad9a77SSam Leffler 224610ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); 224710ad9a77SSam Leffler sc->sc_stats.ast_tx_qstop++; 2248ef27340cSAdrian Chadd /* XXX do this inside of IF_LOCK? */ 224910ad9a77SSam Leffler ifp->if_drv_flags |= IFF_DRV_OACTIVE; 225010ad9a77SSam Leffler } 225110ad9a77SSam Leffler ATH_TXBUF_UNLOCK(sc); 225210ad9a77SSam Leffler return bf; 225310ad9a77SSam Leffler } 225410ad9a77SSam Leffler 22555591b213SSam Leffler static void 22565591b213SSam Leffler ath_start(struct ifnet *ifp) 22575591b213SSam Leffler { 22585591b213SSam Leffler struct ath_softc *sc = ifp->if_softc; 22595591b213SSam Leffler struct ieee80211_node *ni; 22605591b213SSam Leffler struct ath_buf *bf; 226168e8e04eSSam Leffler struct mbuf *m, *next; 226268e8e04eSSam Leffler ath_bufhead frags; 22635591b213SSam Leffler 226413f4c340SRobert Watson if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) 22655591b213SSam Leffler return; 2266ef27340cSAdrian Chadd 2267ef27340cSAdrian Chadd /* XXX is it ok to hold the ATH_LOCK here? */ 2268ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 2269ef27340cSAdrian Chadd if (sc->sc_inreset_cnt > 0) { 2270ef27340cSAdrian Chadd device_printf(sc->sc_dev, 2271ef27340cSAdrian Chadd "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2272ef27340cSAdrian Chadd /* XXX do this inside of IF_LOCK? */ 2273ef27340cSAdrian Chadd ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2274ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 2275ef27340cSAdrian Chadd return; 2276ef27340cSAdrian Chadd } 2277ef27340cSAdrian Chadd sc->sc_txstart_cnt++; 2278ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 2279ef27340cSAdrian Chadd 22805591b213SSam Leffler for (;;) { 22815591b213SSam Leffler /* 22825591b213SSam Leffler * Grab a TX buffer and associated resources. 22835591b213SSam Leffler */ 228410ad9a77SSam Leffler bf = ath_getbuf(sc); 228510ad9a77SSam Leffler if (bf == NULL) 22865591b213SSam Leffler break; 22872b9411e2SSam Leffler 2288b032f27cSSam Leffler IFQ_DEQUEUE(&ifp->if_snd, m); 2289b032f27cSSam Leffler if (m == NULL) { 2290b032f27cSSam Leffler ATH_TXBUF_LOCK(sc); 22916b349e5aSAdrian Chadd TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 2292b032f27cSSam Leffler ATH_TXBUF_UNLOCK(sc); 2293b032f27cSSam Leffler break; 2294b032f27cSSam Leffler } 2295b032f27cSSam Leffler ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 229668e8e04eSSam Leffler /* 229768e8e04eSSam Leffler * Check for fragmentation. If this frame 229868e8e04eSSam Leffler * has been broken up verify we have enough 229968e8e04eSSam Leffler * buffers to send all the fragments so all 230068e8e04eSSam Leffler * go out or none... 230168e8e04eSSam Leffler */ 23026b349e5aSAdrian Chadd TAILQ_INIT(&frags); 230368e8e04eSSam Leffler if ((m->m_flags & M_FRAG) && 230468e8e04eSSam Leffler !ath_txfrag_setup(sc, &frags, m, ni)) { 230568e8e04eSSam Leffler DPRINTF(sc, ATH_DEBUG_XMIT, 230668e8e04eSSam Leffler "%s: out of txfrag buffers\n", __func__); 230736c6be9aSSam Leffler sc->sc_stats.ast_tx_nofrag++; 23089cb93076SSam Leffler ifp->if_oerrors++; 230968e8e04eSSam Leffler ath_freetx(m); 231068e8e04eSSam Leffler goto bad; 231168e8e04eSSam Leffler } 2312339ccfb3SSam Leffler ifp->if_opackets++; 231368e8e04eSSam Leffler nextfrag: 231468e8e04eSSam Leffler /* 231568e8e04eSSam Leffler * Pass the frame to the h/w for transmission. 231668e8e04eSSam Leffler * Fragmented frames have each frag chained together 231768e8e04eSSam Leffler * with m_nextpkt. We know there are sufficient ath_buf's 231868e8e04eSSam Leffler * to send all the frags because of work done by 231968e8e04eSSam Leffler * ath_txfrag_setup. We leave m_nextpkt set while 232068e8e04eSSam Leffler * calling ath_tx_start so it can use it to extend the 232168e8e04eSSam Leffler * the tx duration to cover the subsequent frag and 232268e8e04eSSam Leffler * so it can reclaim all the mbufs in case of an error; 232368e8e04eSSam Leffler * ath_tx_start clears m_nextpkt once it commits to 232468e8e04eSSam Leffler * handing the frame to the hardware. 232568e8e04eSSam Leffler */ 232668e8e04eSSam Leffler next = m->m_nextpkt; 23275591b213SSam Leffler if (ath_tx_start(sc, ni, bf, m)) { 23285591b213SSam Leffler bad: 23295591b213SSam Leffler ifp->if_oerrors++; 2330c42a7b7eSSam Leffler reclaim: 233168e8e04eSSam Leffler bf->bf_m = NULL; 233268e8e04eSSam Leffler bf->bf_node = NULL; 2333c42a7b7eSSam Leffler ATH_TXBUF_LOCK(sc); 23346b349e5aSAdrian Chadd TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 233568e8e04eSSam Leffler ath_txfrag_cleanup(sc, &frags, ni); 2336c42a7b7eSSam Leffler ATH_TXBUF_UNLOCK(sc); 2337c42a7b7eSSam Leffler if (ni != NULL) 2338c42a7b7eSSam Leffler ieee80211_free_node(ni); 23395591b213SSam Leffler continue; 23405591b213SSam Leffler } 234168e8e04eSSam Leffler if (next != NULL) { 234268e8e04eSSam Leffler /* 234368e8e04eSSam Leffler * Beware of state changing between frags. 234468e8e04eSSam Leffler * XXX check sta power-save state? 234568e8e04eSSam Leffler */ 2346b032f27cSSam Leffler if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { 234768e8e04eSSam Leffler DPRINTF(sc, ATH_DEBUG_XMIT, 234868e8e04eSSam Leffler "%s: flush fragmented packet, state %s\n", 234968e8e04eSSam Leffler __func__, 2350b032f27cSSam Leffler ieee80211_state_name[ni->ni_vap->iv_state]); 235168e8e04eSSam Leffler ath_freetx(next); 235268e8e04eSSam Leffler goto reclaim; 235368e8e04eSSam Leffler } 235468e8e04eSSam Leffler m = next; 23556b349e5aSAdrian Chadd bf = TAILQ_FIRST(&frags); 235668e8e04eSSam Leffler KASSERT(bf != NULL, ("no buf for txfrag")); 23576b349e5aSAdrian Chadd TAILQ_REMOVE(&frags, bf, bf_list); 235868e8e04eSSam Leffler goto nextfrag; 235968e8e04eSSam Leffler } 23605591b213SSam Leffler 23612e986da5SSam Leffler sc->sc_wd_timer = 5; 23625591b213SSam Leffler } 2363ef27340cSAdrian Chadd 2364ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 2365ef27340cSAdrian Chadd sc->sc_txstart_cnt--; 2366ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 23675591b213SSam Leffler } 23685591b213SSam Leffler 23695591b213SSam Leffler static int 23705591b213SSam Leffler ath_media_change(struct ifnet *ifp) 23715591b213SSam Leffler { 2372b032f27cSSam Leffler int error = ieee80211_media_change(ifp); 2373b032f27cSSam Leffler /* NB: only the fixed rate can change and that doesn't need a reset */ 2374b032f27cSSam Leffler return (error == ENETRESET ? 0 : error); 23755591b213SSam Leffler } 23765591b213SSam Leffler 2377c42a7b7eSSam Leffler /* 2378c42a7b7eSSam Leffler * Block/unblock tx+rx processing while a key change is done. 2379c42a7b7eSSam Leffler * We assume the caller serializes key management operations 2380c42a7b7eSSam Leffler * so we only need to worry about synchronization with other 2381c42a7b7eSSam Leffler * uses that originate in the driver. 2382c42a7b7eSSam Leffler */ 2383c42a7b7eSSam Leffler static void 2384b032f27cSSam Leffler ath_key_update_begin(struct ieee80211vap *vap) 2385c42a7b7eSSam Leffler { 2386b032f27cSSam Leffler struct ifnet *ifp = vap->iv_ic->ic_ifp; 2387c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 2388c42a7b7eSSam Leffler 2389c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2390b032f27cSSam Leffler taskqueue_block(sc->sc_tq); 2391c42a7b7eSSam Leffler IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ 2392c42a7b7eSSam Leffler } 2393c42a7b7eSSam Leffler 2394c42a7b7eSSam Leffler static void 2395b032f27cSSam Leffler ath_key_update_end(struct ieee80211vap *vap) 2396c42a7b7eSSam Leffler { 2397b032f27cSSam Leffler struct ifnet *ifp = vap->iv_ic->ic_ifp; 2398c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 2399c42a7b7eSSam Leffler 2400c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2401c42a7b7eSSam Leffler IF_UNLOCK(&ifp->if_snd); 2402b032f27cSSam Leffler taskqueue_unblock(sc->sc_tq); 2403c42a7b7eSSam Leffler } 24045591b213SSam Leffler 24054bc0e754SSam Leffler /* 24064bc0e754SSam Leffler * Calculate the receive filter according to the 24074bc0e754SSam Leffler * operating mode and state: 24084bc0e754SSam Leffler * 24094bc0e754SSam Leffler * o always accept unicast, broadcast, and multicast traffic 2410b032f27cSSam Leffler * o accept PHY error frames when hardware doesn't have MIB support 2411411373ebSSam Leffler * to count and we need them for ANI (sta mode only until recently) 2412b032f27cSSam Leffler * and we are not scanning (ANI is disabled) 2413411373ebSSam Leffler * NB: older hal's add rx filter bits out of sight and we need to 2414411373ebSSam Leffler * blindly preserve them 24154bc0e754SSam Leffler * o probe request frames are accepted only when operating in 241659aa14a9SRui Paulo * hostap, adhoc, mesh, or monitor modes 2417b032f27cSSam Leffler * o enable promiscuous mode 2418b032f27cSSam Leffler * - when in monitor mode 2419b032f27cSSam Leffler * - if interface marked PROMISC (assumes bridge setting is filtered) 24204bc0e754SSam Leffler * o accept beacons: 24214bc0e754SSam Leffler * - when operating in station mode for collecting rssi data when 24224bc0e754SSam Leffler * the station is otherwise quiet, or 2423b032f27cSSam Leffler * - when operating in adhoc mode so the 802.11 layer creates 2424b032f27cSSam Leffler * node table entries for peers, 24254bc0e754SSam Leffler * - when scanning 2426b032f27cSSam Leffler * - when doing s/w beacon miss (e.g. for ap+sta) 2427b032f27cSSam Leffler * - when operating in ap mode in 11g to detect overlapping bss that 2428b032f27cSSam Leffler * require protection 242959aa14a9SRui Paulo * - when operating in mesh mode to detect neighbors 24306f48c956SSam Leffler * o accept control frames: 24316f48c956SSam Leffler * - when in monitor mode 2432b032f27cSSam Leffler * XXX HT protection for 11n 24334bc0e754SSam Leffler */ 24344bc0e754SSam Leffler static u_int32_t 243568e8e04eSSam Leffler ath_calcrxfilter(struct ath_softc *sc) 24364bc0e754SSam Leffler { 2437fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 2438b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 24394bc0e754SSam Leffler u_int32_t rfilt; 24404bc0e754SSam Leffler 2441b032f27cSSam Leffler rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; 2442411373ebSSam Leffler if (!sc->sc_needmib && !sc->sc_scanning) 2443411373ebSSam Leffler rfilt |= HAL_RX_FILTER_PHYERR; 24444bc0e754SSam Leffler if (ic->ic_opmode != IEEE80211_M_STA) 24454bc0e754SSam Leffler rfilt |= HAL_RX_FILTER_PROBEREQ; 24465463c4a4SSam Leffler /* XXX ic->ic_monvaps != 0? */ 2447b032f27cSSam Leffler if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC)) 24484bc0e754SSam Leffler rfilt |= HAL_RX_FILTER_PROM; 24494bc0e754SSam Leffler if (ic->ic_opmode == IEEE80211_M_STA || 245047db982fSSam Leffler ic->ic_opmode == IEEE80211_M_IBSS || 2451b032f27cSSam Leffler sc->sc_swbmiss || sc->sc_scanning) 2452b032f27cSSam Leffler rfilt |= HAL_RX_FILTER_BEACON; 2453b032f27cSSam Leffler /* 2454b032f27cSSam Leffler * NB: We don't recalculate the rx filter when 2455b032f27cSSam Leffler * ic_protmode changes; otherwise we could do 2456b032f27cSSam Leffler * this only when ic_protmode != NONE. 2457b032f27cSSam Leffler */ 2458b032f27cSSam Leffler if (ic->ic_opmode == IEEE80211_M_HOSTAP && 2459b032f27cSSam Leffler IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) 24604bc0e754SSam Leffler rfilt |= HAL_RX_FILTER_BEACON; 2461f378d4c8SAdrian Chadd 2462f378d4c8SAdrian Chadd /* 24634aa18e9dSAdrian Chadd * Enable hardware PS-POLL RX only for hostap mode; 2464f378d4c8SAdrian Chadd * STA mode sends PS-POLL frames but never 24654aa18e9dSAdrian Chadd * receives them. 2466f378d4c8SAdrian Chadd */ 2467dce0bccaSAdrian Chadd if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PSPOLL, 2468f378d4c8SAdrian Chadd 0, NULL) == HAL_OK && 2469f378d4c8SAdrian Chadd ic->ic_opmode == IEEE80211_M_HOSTAP) 2470f378d4c8SAdrian Chadd rfilt |= HAL_RX_FILTER_PSPOLL; 2471f378d4c8SAdrian Chadd 2472fe0dd789SSam Leffler if (sc->sc_nmeshvaps) { 247359aa14a9SRui Paulo rfilt |= HAL_RX_FILTER_BEACON; 247459aa14a9SRui Paulo if (sc->sc_hasbmatch) 247559aa14a9SRui Paulo rfilt |= HAL_RX_FILTER_BSSID; 247659aa14a9SRui Paulo else 247759aa14a9SRui Paulo rfilt |= HAL_RX_FILTER_PROM; 247859aa14a9SRui Paulo } 24796f48c956SSam Leffler if (ic->ic_opmode == IEEE80211_M_MONITOR) 24806f48c956SSam Leffler rfilt |= HAL_RX_FILTER_CONTROL; 2481f378d4c8SAdrian Chadd 2482f378d4c8SAdrian Chadd /* 2483f378d4c8SAdrian Chadd * Enable RX of compressed BAR frames only when doing 2484f378d4c8SAdrian Chadd * 802.11n. Required for A-MPDU. 2485f378d4c8SAdrian Chadd */ 2486a83df4d3SAdrian Chadd if (IEEE80211_IS_CHAN_HT(ic->ic_curchan)) 2487a83df4d3SAdrian Chadd rfilt |= HAL_RX_FILTER_COMPBAR; 2488f378d4c8SAdrian Chadd 2489fad901ebSAdrian Chadd /* 2490fad901ebSAdrian Chadd * Enable radar PHY errors if requested by the 2491fad901ebSAdrian Chadd * DFS module. 2492fad901ebSAdrian Chadd */ 2493fad901ebSAdrian Chadd if (sc->sc_dodfs) 2494fad901ebSAdrian Chadd rfilt |= HAL_RX_FILTER_PHYRADAR; 2495fad901ebSAdrian Chadd 2496b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n", 2497b032f27cSSam Leffler __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags); 24984bc0e754SSam Leffler return rfilt; 2499b032f27cSSam Leffler } 2500b032f27cSSam Leffler 2501b032f27cSSam Leffler static void 2502b032f27cSSam Leffler ath_update_promisc(struct ifnet *ifp) 2503b032f27cSSam Leffler { 2504b032f27cSSam Leffler struct ath_softc *sc = ifp->if_softc; 2505b032f27cSSam Leffler u_int32_t rfilt; 2506b032f27cSSam Leffler 2507b032f27cSSam Leffler /* configure rx filter */ 2508b032f27cSSam Leffler rfilt = ath_calcrxfilter(sc); 2509b032f27cSSam Leffler ath_hal_setrxfilter(sc->sc_ah, rfilt); 2510b032f27cSSam Leffler 2511b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); 2512b032f27cSSam Leffler } 2513b032f27cSSam Leffler 2514b032f27cSSam Leffler static void 2515b032f27cSSam Leffler ath_update_mcast(struct ifnet *ifp) 2516b032f27cSSam Leffler { 2517b032f27cSSam Leffler struct ath_softc *sc = ifp->if_softc; 2518b032f27cSSam Leffler u_int32_t mfilt[2]; 2519b032f27cSSam Leffler 2520b032f27cSSam Leffler /* calculate and install multicast filter */ 2521b032f27cSSam Leffler if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 2522b032f27cSSam Leffler struct ifmultiaddr *ifma; 2523b032f27cSSam Leffler /* 2524b032f27cSSam Leffler * Merge multicast addresses to form the hardware filter. 2525b032f27cSSam Leffler */ 2526b032f27cSSam Leffler mfilt[0] = mfilt[1] = 0; 2527eb956cd0SRobert Watson if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */ 2528b032f27cSSam Leffler TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2529b032f27cSSam Leffler caddr_t dl; 2530b032f27cSSam Leffler u_int32_t val; 2531b032f27cSSam Leffler u_int8_t pos; 2532b032f27cSSam Leffler 2533b032f27cSSam Leffler /* calculate XOR of eight 6bit values */ 2534b032f27cSSam Leffler dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 2535b032f27cSSam Leffler val = LE_READ_4(dl + 0); 2536b032f27cSSam Leffler pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2537b032f27cSSam Leffler val = LE_READ_4(dl + 3); 2538b032f27cSSam Leffler pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2539b032f27cSSam Leffler pos &= 0x3f; 2540b032f27cSSam Leffler mfilt[pos / 32] |= (1 << (pos % 32)); 2541b032f27cSSam Leffler } 2542eb956cd0SRobert Watson if_maddr_runlock(ifp); 2543b032f27cSSam Leffler } else 2544b032f27cSSam Leffler mfilt[0] = mfilt[1] = ~0; 2545b032f27cSSam Leffler ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); 2546b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", 2547b032f27cSSam Leffler __func__, mfilt[0], mfilt[1]); 25484bc0e754SSam Leffler } 25494bc0e754SSam Leffler 25505591b213SSam Leffler static void 25515591b213SSam Leffler ath_mode_init(struct ath_softc *sc) 25525591b213SSam Leffler { 2553fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 2554b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 2555b032f27cSSam Leffler u_int32_t rfilt; 25565591b213SSam Leffler 25574bc0e754SSam Leffler /* configure rx filter */ 255868e8e04eSSam Leffler rfilt = ath_calcrxfilter(sc); 25594bc0e754SSam Leffler ath_hal_setrxfilter(ah, rfilt); 25604bc0e754SSam Leffler 25615591b213SSam Leffler /* configure operational mode */ 2562c42a7b7eSSam Leffler ath_hal_setopmode(ah); 2563c42a7b7eSSam Leffler 256429aca940SSam Leffler /* handle any link-level address change */ 256529aca940SSam Leffler ath_hal_setmac(ah, IF_LLADDR(ifp)); 25665591b213SSam Leffler 25675591b213SSam Leffler /* calculate and install multicast filter */ 2568b032f27cSSam Leffler ath_update_mcast(ifp); 25695591b213SSam Leffler } 25705591b213SSam Leffler 2571c42a7b7eSSam Leffler /* 2572c42a7b7eSSam Leffler * Set the slot time based on the current setting. 2573c42a7b7eSSam Leffler */ 2574c42a7b7eSSam Leffler static void 2575c42a7b7eSSam Leffler ath_setslottime(struct ath_softc *sc) 2576c42a7b7eSSam Leffler { 2577b032f27cSSam Leffler struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2578c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 2579aaa70f2fSSam Leffler u_int usec; 2580c42a7b7eSSam Leffler 2581aaa70f2fSSam Leffler if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 2582aaa70f2fSSam Leffler usec = 13; 2583aaa70f2fSSam Leffler else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 2584aaa70f2fSSam Leffler usec = 21; 2585724c193aSSam Leffler else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 2586724c193aSSam Leffler /* honor short/long slot time only in 11g */ 2587724c193aSSam Leffler /* XXX shouldn't honor on pure g or turbo g channel */ 2588724c193aSSam Leffler if (ic->ic_flags & IEEE80211_F_SHSLOT) 2589aaa70f2fSSam Leffler usec = HAL_SLOT_TIME_9; 2590aaa70f2fSSam Leffler else 2591aaa70f2fSSam Leffler usec = HAL_SLOT_TIME_20; 2592724c193aSSam Leffler } else 2593724c193aSSam Leffler usec = HAL_SLOT_TIME_9; 2594aaa70f2fSSam Leffler 2595aaa70f2fSSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, 2596aaa70f2fSSam Leffler "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 2597aaa70f2fSSam Leffler __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 2598aaa70f2fSSam Leffler ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 2599aaa70f2fSSam Leffler 2600aaa70f2fSSam Leffler ath_hal_setslottime(ah, usec); 2601c42a7b7eSSam Leffler sc->sc_updateslot = OK; 2602c42a7b7eSSam Leffler } 2603c42a7b7eSSam Leffler 2604c42a7b7eSSam Leffler /* 2605c42a7b7eSSam Leffler * Callback from the 802.11 layer to update the 2606c42a7b7eSSam Leffler * slot time based on the current setting. 2607c42a7b7eSSam Leffler */ 2608c42a7b7eSSam Leffler static void 2609c42a7b7eSSam Leffler ath_updateslot(struct ifnet *ifp) 2610c42a7b7eSSam Leffler { 2611c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 2612b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 2613c42a7b7eSSam Leffler 2614c42a7b7eSSam Leffler /* 2615c42a7b7eSSam Leffler * When not coordinating the BSS, change the hardware 2616c42a7b7eSSam Leffler * immediately. For other operation we defer the change 2617c42a7b7eSSam Leffler * until beacon updates have propagated to the stations. 2618c42a7b7eSSam Leffler */ 261959aa14a9SRui Paulo if (ic->ic_opmode == IEEE80211_M_HOSTAP || 262059aa14a9SRui Paulo ic->ic_opmode == IEEE80211_M_MBSS) 2621c42a7b7eSSam Leffler sc->sc_updateslot = UPDATE; 2622c42a7b7eSSam Leffler else 2623c42a7b7eSSam Leffler ath_setslottime(sc); 2624c42a7b7eSSam Leffler } 2625c42a7b7eSSam Leffler 2626c42a7b7eSSam Leffler /* 262780d2765fSSam Leffler * Setup a h/w transmit queue for beacons. 262880d2765fSSam Leffler */ 262980d2765fSSam Leffler static int 263080d2765fSSam Leffler ath_beaconq_setup(struct ath_hal *ah) 263180d2765fSSam Leffler { 263280d2765fSSam Leffler HAL_TXQ_INFO qi; 263380d2765fSSam Leffler 263480d2765fSSam Leffler memset(&qi, 0, sizeof(qi)); 263580d2765fSSam Leffler qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 263680d2765fSSam Leffler qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 263780d2765fSSam Leffler qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 26380f2e86fbSSam Leffler /* NB: for dynamic turbo, don't enable any other interrupts */ 2639bd5a9920SSam Leffler qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE; 264080d2765fSSam Leffler return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi); 264180d2765fSSam Leffler } 264280d2765fSSam Leffler 264380d2765fSSam Leffler /* 26440f2e86fbSSam Leffler * Setup the transmit queue parameters for the beacon queue. 26450f2e86fbSSam Leffler */ 26460f2e86fbSSam Leffler static int 26470f2e86fbSSam Leffler ath_beaconq_config(struct ath_softc *sc) 26480f2e86fbSSam Leffler { 26490f2e86fbSSam Leffler #define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1) 2650b032f27cSSam Leffler struct ieee80211com *ic = sc->sc_ifp->if_l2com; 26510f2e86fbSSam Leffler struct ath_hal *ah = sc->sc_ah; 26520f2e86fbSSam Leffler HAL_TXQ_INFO qi; 26530f2e86fbSSam Leffler 26540f2e86fbSSam Leffler ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi); 265559aa14a9SRui Paulo if (ic->ic_opmode == IEEE80211_M_HOSTAP || 265659aa14a9SRui Paulo ic->ic_opmode == IEEE80211_M_MBSS) { 26570f2e86fbSSam Leffler /* 26580f2e86fbSSam Leffler * Always burst out beacon and CAB traffic. 26590f2e86fbSSam Leffler */ 26600f2e86fbSSam Leffler qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT; 26610f2e86fbSSam Leffler qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT; 26620f2e86fbSSam Leffler qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT; 26630f2e86fbSSam Leffler } else { 26640f2e86fbSSam Leffler struct wmeParams *wmep = 26650f2e86fbSSam Leffler &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE]; 26660f2e86fbSSam Leffler /* 26670f2e86fbSSam Leffler * Adhoc mode; important thing is to use 2x cwmin. 26680f2e86fbSSam Leffler */ 26690f2e86fbSSam Leffler qi.tqi_aifs = wmep->wmep_aifsn; 26700f2e86fbSSam Leffler qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 26710f2e86fbSSam Leffler qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 26720f2e86fbSSam Leffler } 26730f2e86fbSSam Leffler 26740f2e86fbSSam Leffler if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) { 26750f2e86fbSSam Leffler device_printf(sc->sc_dev, "unable to update parameters for " 26760f2e86fbSSam Leffler "beacon hardware queue!\n"); 26770f2e86fbSSam Leffler return 0; 26780f2e86fbSSam Leffler } else { 26790f2e86fbSSam Leffler ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */ 26800f2e86fbSSam Leffler return 1; 26810f2e86fbSSam Leffler } 26820f2e86fbSSam Leffler #undef ATH_EXPONENT_TO_VALUE 26830f2e86fbSSam Leffler } 26840f2e86fbSSam Leffler 26850f2e86fbSSam Leffler /* 2686c42a7b7eSSam Leffler * Allocate and setup an initial beacon frame. 2687c42a7b7eSSam Leffler */ 26885591b213SSam Leffler static int 26895591b213SSam Leffler ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) 26905591b213SSam Leffler { 2691b032f27cSSam Leffler struct ieee80211vap *vap = ni->ni_vap; 2692b032f27cSSam Leffler struct ath_vap *avp = ATH_VAP(vap); 26935591b213SSam Leffler struct ath_buf *bf; 26945591b213SSam Leffler struct mbuf *m; 2695c42a7b7eSSam Leffler int error; 26965591b213SSam Leffler 2697b032f27cSSam Leffler bf = avp->av_bcbuf; 26987ebd03d7SAdrian Chadd DPRINTF(sc, ATH_DEBUG_NODE, "%s: bf_m=%p, bf_node=%p\n", 26997ebd03d7SAdrian Chadd __func__, bf->bf_m, bf->bf_node); 2700b032f27cSSam Leffler if (bf->bf_m != NULL) { 2701b032f27cSSam Leffler bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2702b032f27cSSam Leffler m_freem(bf->bf_m); 2703b032f27cSSam Leffler bf->bf_m = NULL; 2704c42a7b7eSSam Leffler } 2705b032f27cSSam Leffler if (bf->bf_node != NULL) { 2706b032f27cSSam Leffler ieee80211_free_node(bf->bf_node); 2707b032f27cSSam Leffler bf->bf_node = NULL; 2708b032f27cSSam Leffler } 2709b032f27cSSam Leffler 27105591b213SSam Leffler /* 27115591b213SSam Leffler * NB: the beacon data buffer must be 32-bit aligned; 27125591b213SSam Leffler * we assume the mbuf routines will return us something 27135591b213SSam Leffler * with this alignment (perhaps should assert). 27145591b213SSam Leffler */ 2715b032f27cSSam Leffler m = ieee80211_beacon_alloc(ni, &avp->av_boff); 27165591b213SSam Leffler if (m == NULL) { 2717b032f27cSSam Leffler device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__); 27185591b213SSam Leffler sc->sc_stats.ast_be_nombuf++; 27195591b213SSam Leffler return ENOMEM; 27205591b213SSam Leffler } 2721f9e6219bSSam Leffler error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 2722f9e6219bSSam Leffler bf->bf_segs, &bf->bf_nseg, 27235591b213SSam Leffler BUS_DMA_NOWAIT); 2724b032f27cSSam Leffler if (error != 0) { 2725b032f27cSSam Leffler device_printf(sc->sc_dev, 2726b032f27cSSam Leffler "%s: cannot map mbuf, bus_dmamap_load_mbuf_sg returns %d\n", 2727b032f27cSSam Leffler __func__, error); 2728b032f27cSSam Leffler m_freem(m); 2729b032f27cSSam Leffler return error; 2730b032f27cSSam Leffler } 2731b032f27cSSam Leffler 2732b032f27cSSam Leffler /* 2733b032f27cSSam Leffler * Calculate a TSF adjustment factor required for staggered 2734b032f27cSSam Leffler * beacons. Note that we assume the format of the beacon 2735b032f27cSSam Leffler * frame leaves the tstamp field immediately following the 2736b032f27cSSam Leffler * header. 2737b032f27cSSam Leffler */ 2738b032f27cSSam Leffler if (sc->sc_stagbeacons && avp->av_bslot > 0) { 2739b032f27cSSam Leffler uint64_t tsfadjust; 2740b032f27cSSam Leffler struct ieee80211_frame *wh; 2741b032f27cSSam Leffler 2742b032f27cSSam Leffler /* 2743b032f27cSSam Leffler * The beacon interval is in TU's; the TSF is in usecs. 2744b032f27cSSam Leffler * We figure out how many TU's to add to align the timestamp 2745b032f27cSSam Leffler * then convert to TSF units and handle byte swapping before 2746b032f27cSSam Leffler * inserting it in the frame. The hardware will then add this 2747b032f27cSSam Leffler * each time a beacon frame is sent. Note that we align vap's 2748b032f27cSSam Leffler * 1..N and leave vap 0 untouched. This means vap 0 has a 2749b032f27cSSam Leffler * timestamp in one beacon interval while the others get a 2750b032f27cSSam Leffler * timstamp aligned to the next interval. 2751b032f27cSSam Leffler */ 2752b032f27cSSam Leffler tsfadjust = ni->ni_intval * 2753b032f27cSSam Leffler (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF; 2754b032f27cSSam Leffler tsfadjust = htole64(tsfadjust << 10); /* TU -> TSF */ 2755b032f27cSSam Leffler 2756b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 2757b032f27cSSam Leffler "%s: %s beacons bslot %d intval %u tsfadjust %llu\n", 2758b032f27cSSam Leffler __func__, sc->sc_stagbeacons ? "stagger" : "burst", 27593627e321SSam Leffler avp->av_bslot, ni->ni_intval, 27603627e321SSam Leffler (long long unsigned) le64toh(tsfadjust)); 2761b032f27cSSam Leffler 2762b032f27cSSam Leffler wh = mtod(m, struct ieee80211_frame *); 2763b032f27cSSam Leffler memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust)); 2764b032f27cSSam Leffler } 2765c42a7b7eSSam Leffler bf->bf_m = m; 2766f818612bSSam Leffler bf->bf_node = ieee80211_ref_node(ni); 2767b032f27cSSam Leffler 2768b032f27cSSam Leffler return 0; 27695591b213SSam Leffler } 2770c42a7b7eSSam Leffler 2771c42a7b7eSSam Leffler /* 2772c42a7b7eSSam Leffler * Setup the beacon frame for transmit. 2773c42a7b7eSSam Leffler */ 2774c42a7b7eSSam Leffler static void 2775c42a7b7eSSam Leffler ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf) 2776c42a7b7eSSam Leffler { 2777c42a7b7eSSam Leffler #define USE_SHPREAMBLE(_ic) \ 2778c42a7b7eSSam Leffler (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\ 2779c42a7b7eSSam Leffler == IEEE80211_F_SHPREAMBLE) 2780c42a7b7eSSam Leffler struct ieee80211_node *ni = bf->bf_node; 2781c42a7b7eSSam Leffler struct ieee80211com *ic = ni->ni_ic; 2782c42a7b7eSSam Leffler struct mbuf *m = bf->bf_m; 2783c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 2784c42a7b7eSSam Leffler struct ath_desc *ds; 2785c42a7b7eSSam Leffler int flags, antenna; 278655f63772SSam Leffler const HAL_RATE_TABLE *rt; 278755f63772SSam Leffler u_int8_t rix, rate; 2788c42a7b7eSSam Leffler 27894a3ac3fcSSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n", 2790c42a7b7eSSam Leffler __func__, m, m->m_len); 27915591b213SSam Leffler 27925591b213SSam Leffler /* setup descriptors */ 27935591b213SSam Leffler ds = bf->bf_desc; 27946edf1dc7SAdrian Chadd bf->bf_last = bf; 27956edf1dc7SAdrian Chadd bf->bf_lastds = ds; 27965591b213SSam Leffler 2797c42a7b7eSSam Leffler flags = HAL_TXDESC_NOACK; 2798c42a7b7eSSam Leffler if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) { 2799c42a7b7eSSam Leffler ds->ds_link = bf->bf_daddr; /* self-linked */ 2800c42a7b7eSSam Leffler flags |= HAL_TXDESC_VEOL; 2801c42a7b7eSSam Leffler /* 2802c42a7b7eSSam Leffler * Let hardware handle antenna switching. 2803c42a7b7eSSam Leffler */ 28044866e6c2SSam Leffler antenna = sc->sc_txantenna; 2805c42a7b7eSSam Leffler } else { 28065591b213SSam Leffler ds->ds_link = 0; 2807c42a7b7eSSam Leffler /* 2808c42a7b7eSSam Leffler * Switch antenna every 4 beacons. 2809c42a7b7eSSam Leffler * XXX assumes two antenna 2810c42a7b7eSSam Leffler */ 2811b032f27cSSam Leffler if (sc->sc_txantenna != 0) 2812b032f27cSSam Leffler antenna = sc->sc_txantenna; 2813b032f27cSSam Leffler else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0) 2814b032f27cSSam Leffler antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1); 2815b032f27cSSam Leffler else 2816b032f27cSSam Leffler antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1); 2817c42a7b7eSSam Leffler } 2818c42a7b7eSSam Leffler 2819c42a7b7eSSam Leffler KASSERT(bf->bf_nseg == 1, 2820c42a7b7eSSam Leffler ("multi-segment beacon frame; nseg %u", bf->bf_nseg)); 28215591b213SSam Leffler ds->ds_data = bf->bf_segs[0].ds_addr; 28225591b213SSam Leffler /* 28235591b213SSam Leffler * Calculate rate code. 28245591b213SSam Leffler * XXX everything at min xmit rate 28255591b213SSam Leffler */ 2826b032f27cSSam Leffler rix = 0; 282755f63772SSam Leffler rt = sc->sc_currates; 282855f63772SSam Leffler rate = rt->info[rix].rateCode; 2829c42a7b7eSSam Leffler if (USE_SHPREAMBLE(ic)) 283055f63772SSam Leffler rate |= rt->info[rix].shortPreamble; 28315591b213SSam Leffler ath_hal_setuptxdesc(ah, ds 2832c42a7b7eSSam Leffler , m->m_len + IEEE80211_CRC_LEN /* frame length */ 28335591b213SSam Leffler , sizeof(struct ieee80211_frame)/* header length */ 28345591b213SSam Leffler , HAL_PKT_TYPE_BEACON /* Atheros packet type */ 2835c42a7b7eSSam Leffler , ni->ni_txpower /* txpower XXX */ 28365591b213SSam Leffler , rate, 1 /* series 0 rate/tries */ 28375591b213SSam Leffler , HAL_TXKEYIX_INVALID /* no encryption */ 2838c42a7b7eSSam Leffler , antenna /* antenna mode */ 2839c42a7b7eSSam Leffler , flags /* no ack, veol for beacons */ 28405591b213SSam Leffler , 0 /* rts/cts rate */ 28415591b213SSam Leffler , 0 /* rts/cts duration */ 28425591b213SSam Leffler ); 28435591b213SSam Leffler /* NB: beacon's BufLen must be a multiple of 4 bytes */ 28445591b213SSam Leffler ath_hal_filltxdesc(ah, ds 2845c42a7b7eSSam Leffler , roundup(m->m_len, 4) /* buffer length */ 28465591b213SSam Leffler , AH_TRUE /* first segment */ 28475591b213SSam Leffler , AH_TRUE /* last segment */ 2848c42a7b7eSSam Leffler , ds /* first descriptor */ 28495591b213SSam Leffler ); 2850b032f27cSSam Leffler #if 0 2851b032f27cSSam Leffler ath_desc_swap(ds); 2852b032f27cSSam Leffler #endif 2853c42a7b7eSSam Leffler #undef USE_SHPREAMBLE 28545591b213SSam Leffler } 28555591b213SSam Leffler 2856b105a069SSam Leffler static void 2857b032f27cSSam Leffler ath_beacon_update(struct ieee80211vap *vap, int item) 2858b105a069SSam Leffler { 2859b032f27cSSam Leffler struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff; 2860b105a069SSam Leffler 2861b105a069SSam Leffler setbit(bo->bo_flags, item); 2862b105a069SSam Leffler } 2863b105a069SSam Leffler 2864c42a7b7eSSam Leffler /* 2865622b3fd2SSam Leffler * Append the contents of src to dst; both queues 2866622b3fd2SSam Leffler * are assumed to be locked. 2867622b3fd2SSam Leffler */ 2868622b3fd2SSam Leffler static void 2869622b3fd2SSam Leffler ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 2870622b3fd2SSam Leffler { 28716b349e5aSAdrian Chadd TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); 2872622b3fd2SSam Leffler dst->axq_link = src->axq_link; 2873622b3fd2SSam Leffler src->axq_link = NULL; 2874622b3fd2SSam Leffler dst->axq_depth += src->axq_depth; 28756edf1dc7SAdrian Chadd dst->axq_aggr_depth += src->axq_aggr_depth; 2876622b3fd2SSam Leffler src->axq_depth = 0; 28776edf1dc7SAdrian Chadd src->axq_aggr_depth = 0; 2878622b3fd2SSam Leffler } 2879622b3fd2SSam Leffler 2880622b3fd2SSam Leffler /* 2881c42a7b7eSSam Leffler * Transmit a beacon frame at SWBA. Dynamic updates to the 2882c42a7b7eSSam Leffler * frame contents are done as needed and the slot time is 2883c42a7b7eSSam Leffler * also adjusted based on current state. 2884c42a7b7eSSam Leffler */ 28855591b213SSam Leffler static void 28865591b213SSam Leffler ath_beacon_proc(void *arg, int pending) 28875591b213SSam Leffler { 28885591b213SSam Leffler struct ath_softc *sc = arg; 28895591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 2890b032f27cSSam Leffler struct ieee80211vap *vap; 2891b032f27cSSam Leffler struct ath_buf *bf; 2892b032f27cSSam Leffler int slot, otherant; 2893b032f27cSSam Leffler uint32_t bfaddr; 28945591b213SSam Leffler 2895c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n", 2896c42a7b7eSSam Leffler __func__, pending); 2897c42a7b7eSSam Leffler /* 2898c42a7b7eSSam Leffler * Check if the previous beacon has gone out. If 2899c66c48cbSSam Leffler * not don't try to post another, skip this period 2900c66c48cbSSam Leffler * and wait for the next. Missed beacons indicate 2901c66c48cbSSam Leffler * a problem and should not occur. If we miss too 2902c66c48cbSSam Leffler * many consecutive beacons reset the device. 2903c42a7b7eSSam Leffler */ 2904c42a7b7eSSam Leffler if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { 2905c42a7b7eSSam Leffler sc->sc_bmisscount++; 29067ec4e6b8SAdrian Chadd sc->sc_stats.ast_be_missed++; 29074a3ac3fcSSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 2908c42a7b7eSSam Leffler "%s: missed %u consecutive beacons\n", 2909c42a7b7eSSam Leffler __func__, sc->sc_bmisscount); 2910a32ac9d3SSam Leffler if (sc->sc_bmisscount >= ath_bstuck_threshold) 29110bbf5441SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask); 2912c42a7b7eSSam Leffler return; 2913c42a7b7eSSam Leffler } 2914c42a7b7eSSam Leffler if (sc->sc_bmisscount != 0) { 2915c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 2916c42a7b7eSSam Leffler "%s: resume beacon xmit after %u misses\n", 2917c42a7b7eSSam Leffler __func__, sc->sc_bmisscount); 2918c42a7b7eSSam Leffler sc->sc_bmisscount = 0; 2919c42a7b7eSSam Leffler } 2920c42a7b7eSSam Leffler 2921b032f27cSSam Leffler if (sc->sc_stagbeacons) { /* staggered beacons */ 2922b032f27cSSam Leffler struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2923b032f27cSSam Leffler uint32_t tsftu; 2924b032f27cSSam Leffler 2925b032f27cSSam Leffler tsftu = ath_hal_gettsf32(ah) >> 10; 2926b032f27cSSam Leffler /* XXX lintval */ 2927b032f27cSSam Leffler slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval; 2928b032f27cSSam Leffler vap = sc->sc_bslot[(slot+1) % ATH_BCBUF]; 2929b032f27cSSam Leffler bfaddr = 0; 2930309a3e45SSam Leffler if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) { 2931b032f27cSSam Leffler bf = ath_beacon_generate(sc, vap); 2932b032f27cSSam Leffler if (bf != NULL) 2933b032f27cSSam Leffler bfaddr = bf->bf_daddr; 2934b032f27cSSam Leffler } 2935b032f27cSSam Leffler } else { /* burst'd beacons */ 2936b032f27cSSam Leffler uint32_t *bflink = &bfaddr; 2937b032f27cSSam Leffler 2938b032f27cSSam Leffler for (slot = 0; slot < ATH_BCBUF; slot++) { 2939b032f27cSSam Leffler vap = sc->sc_bslot[slot]; 2940309a3e45SSam Leffler if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) { 2941b032f27cSSam Leffler bf = ath_beacon_generate(sc, vap); 2942b032f27cSSam Leffler if (bf != NULL) { 2943b032f27cSSam Leffler *bflink = bf->bf_daddr; 2944b032f27cSSam Leffler bflink = &bf->bf_desc->ds_link; 2945c42a7b7eSSam Leffler } 2946c42a7b7eSSam Leffler } 2947b032f27cSSam Leffler } 2948b032f27cSSam Leffler *bflink = 0; /* terminate list */ 2949622b3fd2SSam Leffler } 2950c42a7b7eSSam Leffler 2951c42a7b7eSSam Leffler /* 2952c42a7b7eSSam Leffler * Handle slot time change when a non-ERP station joins/leaves 2953c42a7b7eSSam Leffler * an 11g network. The 802.11 layer notifies us via callback, 2954c42a7b7eSSam Leffler * we mark updateslot, then wait one beacon before effecting 2955c42a7b7eSSam Leffler * the change. This gives associated stations at least one 2956c42a7b7eSSam Leffler * beacon interval to note the state change. 2957c42a7b7eSSam Leffler */ 2958c42a7b7eSSam Leffler /* XXX locking */ 2959b032f27cSSam Leffler if (sc->sc_updateslot == UPDATE) { 2960c42a7b7eSSam Leffler sc->sc_updateslot = COMMIT; /* commit next beacon */ 2961b032f27cSSam Leffler sc->sc_slotupdate = slot; 2962b032f27cSSam Leffler } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot) 2963c42a7b7eSSam Leffler ath_setslottime(sc); /* commit change to h/w */ 2964c42a7b7eSSam Leffler 2965c42a7b7eSSam Leffler /* 2966c42a7b7eSSam Leffler * Check recent per-antenna transmit statistics and flip 2967c42a7b7eSSam Leffler * the default antenna if noticeably more frames went out 2968c42a7b7eSSam Leffler * on the non-default antenna. 2969c42a7b7eSSam Leffler * XXX assumes 2 anntenae 2970c42a7b7eSSam Leffler */ 2971b032f27cSSam Leffler if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) { 2972c42a7b7eSSam Leffler otherant = sc->sc_defant & 1 ? 2 : 1; 2973c42a7b7eSSam Leffler if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) 2974c42a7b7eSSam Leffler ath_setdefantenna(sc, otherant); 2975c42a7b7eSSam Leffler sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; 2976b032f27cSSam Leffler } 2977c42a7b7eSSam Leffler 2978b032f27cSSam Leffler if (bfaddr != 0) { 2979c42a7b7eSSam Leffler /* 2980c42a7b7eSSam Leffler * Stop any current dma and put the new frame on the queue. 2981c42a7b7eSSam Leffler * This should never fail since we check above that no frames 2982c42a7b7eSSam Leffler * are still pending on the queue. 2983c42a7b7eSSam Leffler */ 29845591b213SSam Leffler if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { 2985c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, 2986c42a7b7eSSam Leffler "%s: beacon queue %u did not stop?\n", 2987c42a7b7eSSam Leffler __func__, sc->sc_bhalq); 29885591b213SSam Leffler } 2989b032f27cSSam Leffler /* NB: cabq traffic should already be queued and primed */ 2990b032f27cSSam Leffler ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr); 2991b032f27cSSam Leffler ath_hal_txstart(ah, sc->sc_bhalq); 2992b032f27cSSam Leffler 2993b032f27cSSam Leffler sc->sc_stats.ast_be_xmit++; 2994b032f27cSSam Leffler } 2995b032f27cSSam Leffler } 2996b032f27cSSam Leffler 2997b032f27cSSam Leffler static struct ath_buf * 2998b032f27cSSam Leffler ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap) 2999b032f27cSSam Leffler { 3000b032f27cSSam Leffler struct ath_vap *avp = ATH_VAP(vap); 3001b032f27cSSam Leffler struct ath_txq *cabq = sc->sc_cabq; 3002b032f27cSSam Leffler struct ath_buf *bf; 3003b032f27cSSam Leffler struct mbuf *m; 3004b032f27cSSam Leffler int nmcastq, error; 3005b032f27cSSam Leffler 3006309a3e45SSam Leffler KASSERT(vap->iv_state >= IEEE80211_S_RUN, 3007b032f27cSSam Leffler ("not running, state %d", vap->iv_state)); 3008b032f27cSSam Leffler KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer")); 3009b032f27cSSam Leffler 3010b032f27cSSam Leffler /* 3011b032f27cSSam Leffler * Update dynamic beacon contents. If this returns 3012b032f27cSSam Leffler * non-zero then we need to remap the memory because 3013b032f27cSSam Leffler * the beacon frame changed size (probably because 3014b032f27cSSam Leffler * of the TIM bitmap). 3015b032f27cSSam Leffler */ 3016b032f27cSSam Leffler bf = avp->av_bcbuf; 3017b032f27cSSam Leffler m = bf->bf_m; 3018b032f27cSSam Leffler nmcastq = avp->av_mcastq.axq_depth; 3019b032f27cSSam Leffler if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) { 3020b032f27cSSam Leffler /* XXX too conservative? */ 3021b032f27cSSam Leffler bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3022b032f27cSSam Leffler error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 3023b032f27cSSam Leffler bf->bf_segs, &bf->bf_nseg, 3024b032f27cSSam Leffler BUS_DMA_NOWAIT); 3025b032f27cSSam Leffler if (error != 0) { 3026b032f27cSSam Leffler if_printf(vap->iv_ifp, 3027b032f27cSSam Leffler "%s: bus_dmamap_load_mbuf_sg failed, error %u\n", 3028b032f27cSSam Leffler __func__, error); 3029b032f27cSSam Leffler return NULL; 3030b032f27cSSam Leffler } 3031b032f27cSSam Leffler } 3032b032f27cSSam Leffler if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) { 3033b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 3034b032f27cSSam Leffler "%s: cabq did not drain, mcastq %u cabq %u\n", 3035b032f27cSSam Leffler __func__, nmcastq, cabq->axq_depth); 3036b032f27cSSam Leffler sc->sc_stats.ast_cabq_busy++; 3037b032f27cSSam Leffler if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) { 3038b032f27cSSam Leffler /* 3039b032f27cSSam Leffler * CABQ traffic from a previous vap is still pending. 3040b032f27cSSam Leffler * We must drain the q before this beacon frame goes 3041b032f27cSSam Leffler * out as otherwise this vap's stations will get cab 3042b032f27cSSam Leffler * frames from a different vap. 3043b032f27cSSam Leffler * XXX could be slow causing us to miss DBA 3044b032f27cSSam Leffler */ 3045b032f27cSSam Leffler ath_tx_draintxq(sc, cabq); 3046b032f27cSSam Leffler } 3047b032f27cSSam Leffler } 3048b032f27cSSam Leffler ath_beacon_setup(sc, bf); 30495591b213SSam Leffler bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 30505591b213SSam Leffler 3051c42a7b7eSSam Leffler /* 3052c42a7b7eSSam Leffler * Enable the CAB queue before the beacon queue to 3053c42a7b7eSSam Leffler * insure cab frames are triggered by this beacon. 3054c42a7b7eSSam Leffler */ 3055b032f27cSSam Leffler if (avp->av_boff.bo_tim[4] & 1) { 3056b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 3057b032f27cSSam Leffler 3058f3af83f7SSam Leffler /* NB: only at DTIM */ 3059622b3fd2SSam Leffler ATH_TXQ_LOCK(cabq); 3060b032f27cSSam Leffler ATH_TXQ_LOCK(&avp->av_mcastq); 3061622b3fd2SSam Leffler if (nmcastq) { 3062622b3fd2SSam Leffler struct ath_buf *bfm; 3063622b3fd2SSam Leffler 3064622b3fd2SSam Leffler /* 3065622b3fd2SSam Leffler * Move frames from the s/w mcast q to the h/w cab q. 3066b032f27cSSam Leffler * XXX MORE_DATA bit 3067622b3fd2SSam Leffler */ 30686b349e5aSAdrian Chadd bfm = TAILQ_FIRST(&avp->av_mcastq.axq_q); 3069622b3fd2SSam Leffler if (cabq->axq_link != NULL) { 3070622b3fd2SSam Leffler *cabq->axq_link = bfm->bf_daddr; 3071622b3fd2SSam Leffler } else 3072622b3fd2SSam Leffler ath_hal_puttxbuf(ah, cabq->axq_qnum, 3073622b3fd2SSam Leffler bfm->bf_daddr); 3074b032f27cSSam Leffler ath_txqmove(cabq, &avp->av_mcastq); 3075622b3fd2SSam Leffler 3076622b3fd2SSam Leffler sc->sc_stats.ast_cabq_xmit += nmcastq; 3077622b3fd2SSam Leffler } 3078622b3fd2SSam Leffler /* NB: gated by beacon so safe to start here */ 30796b349e5aSAdrian Chadd if (! TAILQ_EMPTY(&(cabq->axq_q))) 3080622b3fd2SSam Leffler ath_hal_txstart(ah, cabq->axq_qnum); 3081b032f27cSSam Leffler ATH_TXQ_UNLOCK(&avp->av_mcastq); 30827b15790aSAdrian Chadd ATH_TXQ_UNLOCK(cabq); 3083622b3fd2SSam Leffler } 3084b032f27cSSam Leffler return bf; 3085b032f27cSSam Leffler } 3086b032f27cSSam Leffler 3087b032f27cSSam Leffler static void 3088b032f27cSSam Leffler ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap) 3089b032f27cSSam Leffler { 3090b032f27cSSam Leffler struct ath_vap *avp = ATH_VAP(vap); 3091b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 3092b032f27cSSam Leffler struct ath_buf *bf; 3093b032f27cSSam Leffler struct mbuf *m; 3094b032f27cSSam Leffler int error; 3095b032f27cSSam Leffler 3096b032f27cSSam Leffler KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer")); 3097b032f27cSSam Leffler 3098b032f27cSSam Leffler /* 3099b032f27cSSam Leffler * Update dynamic beacon contents. If this returns 3100b032f27cSSam Leffler * non-zero then we need to remap the memory because 3101b032f27cSSam Leffler * the beacon frame changed size (probably because 3102b032f27cSSam Leffler * of the TIM bitmap). 3103b032f27cSSam Leffler */ 3104b032f27cSSam Leffler bf = avp->av_bcbuf; 3105b032f27cSSam Leffler m = bf->bf_m; 3106b032f27cSSam Leffler if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) { 3107b032f27cSSam Leffler /* XXX too conservative? */ 3108b032f27cSSam Leffler bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3109b032f27cSSam Leffler error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 3110b032f27cSSam Leffler bf->bf_segs, &bf->bf_nseg, 3111b032f27cSSam Leffler BUS_DMA_NOWAIT); 3112b032f27cSSam Leffler if (error != 0) { 3113b032f27cSSam Leffler if_printf(vap->iv_ifp, 3114b032f27cSSam Leffler "%s: bus_dmamap_load_mbuf_sg failed, error %u\n", 3115b032f27cSSam Leffler __func__, error); 3116b032f27cSSam Leffler return; 3117b032f27cSSam Leffler } 3118b032f27cSSam Leffler } 3119b032f27cSSam Leffler ath_beacon_setup(sc, bf); 3120b032f27cSSam Leffler bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 3121b032f27cSSam Leffler 3122b032f27cSSam Leffler /* NB: caller is known to have already stopped tx dma */ 31235591b213SSam Leffler ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); 31245591b213SSam Leffler ath_hal_txstart(ah, sc->sc_bhalq); 31255591b213SSam Leffler } 31265591b213SSam Leffler 3127c42a7b7eSSam Leffler /* 3128c42a7b7eSSam Leffler * Reset the hardware after detecting beacons have stopped. 3129c42a7b7eSSam Leffler */ 3130c42a7b7eSSam Leffler static void 3131c42a7b7eSSam Leffler ath_bstuck_proc(void *arg, int pending) 3132c42a7b7eSSam Leffler { 3133c42a7b7eSSam Leffler struct ath_softc *sc = arg; 3134fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 313516d4de92SAdrian Chadd uint32_t hangs = 0; 313616d4de92SAdrian Chadd 313716d4de92SAdrian Chadd if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) 313816d4de92SAdrian Chadd if_printf(ifp, "bb hang detected (0x%x)\n", hangs); 3139c42a7b7eSSam Leffler 3140c42a7b7eSSam Leffler if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 3141c42a7b7eSSam Leffler sc->sc_bmisscount); 3142c2e34459SSam Leffler sc->sc_stats.ast_bstuck++; 314316d4de92SAdrian Chadd /* 314416d4de92SAdrian Chadd * This assumes that there's no simultaneous channel mode change 314516d4de92SAdrian Chadd * occuring. 314616d4de92SAdrian Chadd */ 3147517526efSAdrian Chadd ath_reset(ifp, ATH_RESET_NOLOSS); 3148c42a7b7eSSam Leffler } 3149c42a7b7eSSam Leffler 3150c42a7b7eSSam Leffler /* 3151b032f27cSSam Leffler * Reclaim beacon resources and return buffer to the pool. 3152b032f27cSSam Leffler */ 3153b032f27cSSam Leffler static void 3154b032f27cSSam Leffler ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf) 3155b032f27cSSam Leffler { 3156b032f27cSSam Leffler 31577ebd03d7SAdrian Chadd DPRINTF(sc, ATH_DEBUG_NODE, "%s: free bf=%p, bf_m=%p, bf_node=%p\n", 31587ebd03d7SAdrian Chadd __func__, bf, bf->bf_m, bf->bf_node); 3159b032f27cSSam Leffler if (bf->bf_m != NULL) { 3160b032f27cSSam Leffler bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3161b032f27cSSam Leffler m_freem(bf->bf_m); 3162b032f27cSSam Leffler bf->bf_m = NULL; 3163b032f27cSSam Leffler } 3164b032f27cSSam Leffler if (bf->bf_node != NULL) { 3165b032f27cSSam Leffler ieee80211_free_node(bf->bf_node); 3166b032f27cSSam Leffler bf->bf_node = NULL; 3167b032f27cSSam Leffler } 31686b349e5aSAdrian Chadd TAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list); 3169b032f27cSSam Leffler } 3170b032f27cSSam Leffler 3171b032f27cSSam Leffler /* 3172c42a7b7eSSam Leffler * Reclaim beacon resources. 3173c42a7b7eSSam Leffler */ 31745591b213SSam Leffler static void 31755591b213SSam Leffler ath_beacon_free(struct ath_softc *sc) 31765591b213SSam Leffler { 3177c42a7b7eSSam Leffler struct ath_buf *bf; 31785591b213SSam Leffler 31796b349e5aSAdrian Chadd TAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) { 31807ebd03d7SAdrian Chadd DPRINTF(sc, ATH_DEBUG_NODE, 31817ebd03d7SAdrian Chadd "%s: free bf=%p, bf_m=%p, bf_node=%p\n", 31827ebd03d7SAdrian Chadd __func__, bf, bf->bf_m, bf->bf_node); 31835591b213SSam Leffler if (bf->bf_m != NULL) { 31845591b213SSam Leffler bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 31855591b213SSam Leffler m_freem(bf->bf_m); 31865591b213SSam Leffler bf->bf_m = NULL; 3187f818612bSSam Leffler } 3188f818612bSSam Leffler if (bf->bf_node != NULL) { 3189f818612bSSam Leffler ieee80211_free_node(bf->bf_node); 31905591b213SSam Leffler bf->bf_node = NULL; 31915591b213SSam Leffler } 31925591b213SSam Leffler } 3193f818612bSSam Leffler } 31945591b213SSam Leffler 31955591b213SSam Leffler /* 31965591b213SSam Leffler * Configure the beacon and sleep timers. 31975591b213SSam Leffler * 31985591b213SSam Leffler * When operating as an AP this resets the TSF and sets 31995591b213SSam Leffler * up the hardware to notify us when we need to issue beacons. 32005591b213SSam Leffler * 32015591b213SSam Leffler * When operating in station mode this sets up the beacon 32025591b213SSam Leffler * timers according to the timestamp of the last received 32035591b213SSam Leffler * beacon and the current TSF, configures PCF and DTIM 32045591b213SSam Leffler * handling, programs the sleep registers so the hardware 32055591b213SSam Leffler * will wakeup in time to receive beacons, and configures 32065591b213SSam Leffler * the beacon miss handling so we'll receive a BMISS 32075591b213SSam Leffler * interrupt when we stop seeing beacons from the AP 32085591b213SSam Leffler * we've associated with. 32095591b213SSam Leffler */ 32105591b213SSam Leffler static void 3211b032f27cSSam Leffler ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap) 32125591b213SSam Leffler { 321380d939bfSSam Leffler #define TSF_TO_TU(_h,_l) \ 321480d939bfSSam Leffler ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) 321580d939bfSSam Leffler #define FUDGE 2 32165591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 3217b032f27cSSam Leffler struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3218b032f27cSSam Leffler struct ieee80211_node *ni; 321980d939bfSSam Leffler u_int32_t nexttbtt, intval, tsftu; 322080d939bfSSam Leffler u_int64_t tsf; 32215591b213SSam Leffler 3222b032f27cSSam Leffler if (vap == NULL) 3223b032f27cSSam Leffler vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */ 3224b032f27cSSam Leffler ni = vap->iv_bss; 3225b032f27cSSam Leffler 32268371372bSSam Leffler /* extract tstamp from last beacon and convert to TU */ 32278371372bSSam Leffler nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4), 32288371372bSSam Leffler LE_READ_4(ni->ni_tstamp.data)); 322959aa14a9SRui Paulo if (ic->ic_opmode == IEEE80211_M_HOSTAP || 323059aa14a9SRui Paulo ic->ic_opmode == IEEE80211_M_MBSS) { 3231b032f27cSSam Leffler /* 323259aa14a9SRui Paulo * For multi-bss ap/mesh support beacons are either staggered 3233b032f27cSSam Leffler * evenly over N slots or burst together. For the former 3234b032f27cSSam Leffler * arrange for the SWBA to be delivered for each slot. 3235b032f27cSSam Leffler * Slots that are not occupied will generate nothing. 3236b032f27cSSam Leffler */ 32378371372bSSam Leffler /* NB: the beacon interval is kept internally in TU's */ 32384bacf7c1SSam Leffler intval = ni->ni_intval & HAL_BEACON_PERIOD; 3239b032f27cSSam Leffler if (sc->sc_stagbeacons) 3240b032f27cSSam Leffler intval /= ATH_BCBUF; 3241b032f27cSSam Leffler } else { 3242b032f27cSSam Leffler /* NB: the beacon interval is kept internally in TU's */ 3243b032f27cSSam Leffler intval = ni->ni_intval & HAL_BEACON_PERIOD; 3244b032f27cSSam Leffler } 3245a6c992f4SSam Leffler if (nexttbtt == 0) /* e.g. for ap mode */ 3246a6c992f4SSam Leffler nexttbtt = intval; 3247a6c992f4SSam Leffler else if (intval) /* NB: can be 0 for monitor mode */ 3248a6c992f4SSam Leffler nexttbtt = roundup(nexttbtt, intval); 3249a6c992f4SSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n", 3250a6c992f4SSam Leffler __func__, nexttbtt, intval, ni->ni_intval); 3251b032f27cSSam Leffler if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) { 32525591b213SSam Leffler HAL_BEACON_STATE bs; 32538371372bSSam Leffler int dtimperiod, dtimcount; 32548371372bSSam Leffler int cfpperiod, cfpcount; 32555591b213SSam Leffler 32568371372bSSam Leffler /* 32578371372bSSam Leffler * Setup dtim and cfp parameters according to 32588371372bSSam Leffler * last beacon we received (which may be none). 32598371372bSSam Leffler */ 32608371372bSSam Leffler dtimperiod = ni->ni_dtim_period; 32618371372bSSam Leffler if (dtimperiod <= 0) /* NB: 0 if not known */ 32628371372bSSam Leffler dtimperiod = 1; 32638371372bSSam Leffler dtimcount = ni->ni_dtim_count; 32648371372bSSam Leffler if (dtimcount >= dtimperiod) /* NB: sanity check */ 32658371372bSSam Leffler dtimcount = 0; /* XXX? */ 32668371372bSSam Leffler cfpperiod = 1; /* NB: no PCF support yet */ 32678371372bSSam Leffler cfpcount = 0; 32688371372bSSam Leffler /* 32698371372bSSam Leffler * Pull nexttbtt forward to reflect the current 32708371372bSSam Leffler * TSF and calculate dtim+cfp state for the result. 32718371372bSSam Leffler */ 32728371372bSSam Leffler tsf = ath_hal_gettsf64(ah); 327380d939bfSSam Leffler tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 32748371372bSSam Leffler do { 32758371372bSSam Leffler nexttbtt += intval; 32768371372bSSam Leffler if (--dtimcount < 0) { 32778371372bSSam Leffler dtimcount = dtimperiod - 1; 32788371372bSSam Leffler if (--cfpcount < 0) 32798371372bSSam Leffler cfpcount = cfpperiod - 1; 32808371372bSSam Leffler } 32818371372bSSam Leffler } while (nexttbtt < tsftu); 32825591b213SSam Leffler memset(&bs, 0, sizeof(bs)); 3283a6c992f4SSam Leffler bs.bs_intval = intval; 32845591b213SSam Leffler bs.bs_nexttbtt = nexttbtt; 32858371372bSSam Leffler bs.bs_dtimperiod = dtimperiod*intval; 32868371372bSSam Leffler bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval; 32878371372bSSam Leffler bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod; 32888371372bSSam Leffler bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod; 32898371372bSSam Leffler bs.bs_cfpmaxduration = 0; 32908371372bSSam Leffler #if 0 32915591b213SSam Leffler /* 3292c42a7b7eSSam Leffler * The 802.11 layer records the offset to the DTIM 3293c42a7b7eSSam Leffler * bitmap while receiving beacons; use it here to 3294c42a7b7eSSam Leffler * enable h/w detection of our AID being marked in 3295c42a7b7eSSam Leffler * the bitmap vector (to indicate frames for us are 3296c42a7b7eSSam Leffler * pending at the AP). 32978371372bSSam Leffler * XXX do DTIM handling in s/w to WAR old h/w bugs 32988371372bSSam Leffler * XXX enable based on h/w rev for newer chips 3299c42a7b7eSSam Leffler */ 3300c42a7b7eSSam Leffler bs.bs_timoffset = ni->ni_timoff; 33018371372bSSam Leffler #endif 3302c42a7b7eSSam Leffler /* 33035591b213SSam Leffler * Calculate the number of consecutive beacons to miss 330468e8e04eSSam Leffler * before taking a BMISS interrupt. 33055591b213SSam Leffler * Note that we clamp the result to at most 10 beacons. 33065591b213SSam Leffler */ 3307b032f27cSSam Leffler bs.bs_bmissthreshold = vap->iv_bmissthreshold; 33085591b213SSam Leffler if (bs.bs_bmissthreshold > 10) 33095591b213SSam Leffler bs.bs_bmissthreshold = 10; 33105591b213SSam Leffler else if (bs.bs_bmissthreshold <= 0) 33115591b213SSam Leffler bs.bs_bmissthreshold = 1; 33125591b213SSam Leffler 33135591b213SSam Leffler /* 33145591b213SSam Leffler * Calculate sleep duration. The configuration is 33155591b213SSam Leffler * given in ms. We insure a multiple of the beacon 33165591b213SSam Leffler * period is used. Also, if the sleep duration is 33175591b213SSam Leffler * greater than the DTIM period then it makes senses 33185591b213SSam Leffler * to make it a multiple of that. 33195591b213SSam Leffler * 33205591b213SSam Leffler * XXX fixed at 100ms 33215591b213SSam Leffler */ 33224bacf7c1SSam Leffler bs.bs_sleepduration = 33234bacf7c1SSam Leffler roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval); 33245591b213SSam Leffler if (bs.bs_sleepduration > bs.bs_dtimperiod) 33255591b213SSam Leffler bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod); 33265591b213SSam Leffler 3327c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 33288371372bSSam Leffler "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n" 33295591b213SSam Leffler , __func__ 33308371372bSSam Leffler , tsf, tsftu 33315591b213SSam Leffler , bs.bs_intval 33325591b213SSam Leffler , bs.bs_nexttbtt 33335591b213SSam Leffler , bs.bs_dtimperiod 33345591b213SSam Leffler , bs.bs_nextdtim 33355591b213SSam Leffler , bs.bs_bmissthreshold 33365591b213SSam Leffler , bs.bs_sleepduration 3337c42a7b7eSSam Leffler , bs.bs_cfpperiod 3338c42a7b7eSSam Leffler , bs.bs_cfpmaxduration 3339c42a7b7eSSam Leffler , bs.bs_cfpnext 3340c42a7b7eSSam Leffler , bs.bs_timoffset 3341c42a7b7eSSam Leffler ); 33425591b213SSam Leffler ath_hal_intrset(ah, 0); 3343c42a7b7eSSam Leffler ath_hal_beacontimers(ah, &bs); 33445591b213SSam Leffler sc->sc_imask |= HAL_INT_BMISS; 33455591b213SSam Leffler ath_hal_intrset(ah, sc->sc_imask); 33465591b213SSam Leffler } else { 33475591b213SSam Leffler ath_hal_intrset(ah, 0); 3348a6c992f4SSam Leffler if (nexttbtt == intval) 3349c42a7b7eSSam Leffler intval |= HAL_BEACON_RESET_TSF; 3350c42a7b7eSSam Leffler if (ic->ic_opmode == IEEE80211_M_IBSS) { 3351c42a7b7eSSam Leffler /* 3352c42a7b7eSSam Leffler * In IBSS mode enable the beacon timers but only 3353c42a7b7eSSam Leffler * enable SWBA interrupts if we need to manually 3354c42a7b7eSSam Leffler * prepare beacon frames. Otherwise we use a 3355c42a7b7eSSam Leffler * self-linked tx descriptor and let the hardware 3356c42a7b7eSSam Leffler * deal with things. 3357c42a7b7eSSam Leffler */ 3358c42a7b7eSSam Leffler intval |= HAL_BEACON_ENA; 3359c42a7b7eSSam Leffler if (!sc->sc_hasveol) 3360c42a7b7eSSam Leffler sc->sc_imask |= HAL_INT_SWBA; 336180d939bfSSam Leffler if ((intval & HAL_BEACON_RESET_TSF) == 0) { 336280d939bfSSam Leffler /* 336380d939bfSSam Leffler * Pull nexttbtt forward to reflect 336480d939bfSSam Leffler * the current TSF. 336580d939bfSSam Leffler */ 336680d939bfSSam Leffler tsf = ath_hal_gettsf64(ah); 336780d939bfSSam Leffler tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 336880d939bfSSam Leffler do { 336980d939bfSSam Leffler nexttbtt += intval; 337080d939bfSSam Leffler } while (nexttbtt < tsftu); 337180d939bfSSam Leffler } 33720f2e86fbSSam Leffler ath_beaconq_config(sc); 337359aa14a9SRui Paulo } else if (ic->ic_opmode == IEEE80211_M_HOSTAP || 337459aa14a9SRui Paulo ic->ic_opmode == IEEE80211_M_MBSS) { 3375c42a7b7eSSam Leffler /* 337659aa14a9SRui Paulo * In AP/mesh mode we enable the beacon timers 337759aa14a9SRui Paulo * and SWBA interrupts to prepare beacon frames. 3378c42a7b7eSSam Leffler */ 3379c42a7b7eSSam Leffler intval |= HAL_BEACON_ENA; 33805591b213SSam Leffler sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ 33810f2e86fbSSam Leffler ath_beaconq_config(sc); 3382c42a7b7eSSam Leffler } 3383c42a7b7eSSam Leffler ath_hal_beaconinit(ah, nexttbtt, intval); 3384c42a7b7eSSam Leffler sc->sc_bmisscount = 0; 33855591b213SSam Leffler ath_hal_intrset(ah, sc->sc_imask); 3386c42a7b7eSSam Leffler /* 3387c42a7b7eSSam Leffler * When using a self-linked beacon descriptor in 3388c42a7b7eSSam Leffler * ibss mode load it once here. 3389c42a7b7eSSam Leffler */ 3390c42a7b7eSSam Leffler if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) 3391b032f27cSSam Leffler ath_beacon_start_adhoc(sc, vap); 33925591b213SSam Leffler } 339380d939bfSSam Leffler sc->sc_syncbeacon = 0; 339480d939bfSSam Leffler #undef FUDGE 33958371372bSSam Leffler #undef TSF_TO_TU 33965591b213SSam Leffler } 33975591b213SSam Leffler 33985591b213SSam Leffler static void 33995591b213SSam Leffler ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 34005591b213SSam Leffler { 34015591b213SSam Leffler bus_addr_t *paddr = (bus_addr_t*) arg; 3402d77367bfSSam Leffler KASSERT(error == 0, ("error %u on bus_dma callback", error)); 34035591b213SSam Leffler *paddr = segs->ds_addr; 34045591b213SSam Leffler } 34055591b213SSam Leffler 34065591b213SSam Leffler static int 3407c42a7b7eSSam Leffler ath_descdma_setup(struct ath_softc *sc, 3408c42a7b7eSSam Leffler struct ath_descdma *dd, ath_bufhead *head, 3409c42a7b7eSSam Leffler const char *name, int nbuf, int ndesc) 3410c42a7b7eSSam Leffler { 3411c42a7b7eSSam Leffler #define DS2PHYS(_dd, _ds) \ 3412c42a7b7eSSam Leffler ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 341345abcd6cSAdrian Chadd #define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 341445abcd6cSAdrian Chadd ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 3415fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 341645abcd6cSAdrian Chadd uint8_t *ds; 3417c42a7b7eSSam Leffler struct ath_buf *bf; 3418c42a7b7eSSam Leffler int i, bsize, error; 341945abcd6cSAdrian Chadd int desc_len; 342045abcd6cSAdrian Chadd 342145abcd6cSAdrian Chadd desc_len = sizeof(struct ath_desc); 3422c42a7b7eSSam Leffler 3423c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n", 3424c42a7b7eSSam Leffler __func__, name, nbuf, ndesc); 3425c42a7b7eSSam Leffler 3426c42a7b7eSSam Leffler dd->dd_name = name; 342745abcd6cSAdrian Chadd dd->dd_desc_len = desc_len * nbuf * ndesc; 342845abcd6cSAdrian Chadd 342945abcd6cSAdrian Chadd /* 343045abcd6cSAdrian Chadd * Merlin work-around: 343145abcd6cSAdrian Chadd * Descriptors that cross the 4KB boundary can't be used. 343245abcd6cSAdrian Chadd * Assume one skipped descriptor per 4KB page. 343345abcd6cSAdrian Chadd */ 343445abcd6cSAdrian Chadd if (! ath_hal_split4ktrans(sc->sc_ah)) { 343545abcd6cSAdrian Chadd int numdescpage = 4096 / (desc_len * ndesc); 343645abcd6cSAdrian Chadd dd->dd_desc_len = (nbuf / numdescpage + 1) * 4096; 343745abcd6cSAdrian Chadd } 3438c42a7b7eSSam Leffler 3439c42a7b7eSSam Leffler /* 3440c42a7b7eSSam Leffler * Setup DMA descriptor area. 3441c42a7b7eSSam Leffler */ 3442c2175ff5SMarius Strobl error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 3443c42a7b7eSSam Leffler PAGE_SIZE, 0, /* alignment, bounds */ 3444c42a7b7eSSam Leffler BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 3445c42a7b7eSSam Leffler BUS_SPACE_MAXADDR, /* highaddr */ 3446c42a7b7eSSam Leffler NULL, NULL, /* filter, filterarg */ 3447c42a7b7eSSam Leffler dd->dd_desc_len, /* maxsize */ 3448c42a7b7eSSam Leffler 1, /* nsegments */ 34496ccb8ea7SSam Leffler dd->dd_desc_len, /* maxsegsize */ 3450c42a7b7eSSam Leffler BUS_DMA_ALLOCNOW, /* flags */ 3451c42a7b7eSSam Leffler NULL, /* lockfunc */ 3452c42a7b7eSSam Leffler NULL, /* lockarg */ 3453c42a7b7eSSam Leffler &dd->dd_dmat); 3454c42a7b7eSSam Leffler if (error != 0) { 3455c42a7b7eSSam Leffler if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); 3456c42a7b7eSSam Leffler return error; 3457c42a7b7eSSam Leffler } 3458c42a7b7eSSam Leffler 3459c42a7b7eSSam Leffler /* allocate descriptors */ 3460c42a7b7eSSam Leffler error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap); 3461c42a7b7eSSam Leffler if (error != 0) { 3462c42a7b7eSSam Leffler if_printf(ifp, "unable to create dmamap for %s descriptors, " 3463c42a7b7eSSam Leffler "error %u\n", dd->dd_name, error); 3464c42a7b7eSSam Leffler goto fail0; 3465c42a7b7eSSam Leffler } 3466c42a7b7eSSam Leffler 3467c42a7b7eSSam Leffler error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 34680553a01fSSam Leffler BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 34690553a01fSSam Leffler &dd->dd_dmamap); 3470c42a7b7eSSam Leffler if (error != 0) { 3471c42a7b7eSSam Leffler if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 3472c42a7b7eSSam Leffler "error %u\n", nbuf * ndesc, dd->dd_name, error); 3473c42a7b7eSSam Leffler goto fail1; 3474c42a7b7eSSam Leffler } 3475c42a7b7eSSam Leffler 3476c42a7b7eSSam Leffler error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 3477c42a7b7eSSam Leffler dd->dd_desc, dd->dd_desc_len, 3478c42a7b7eSSam Leffler ath_load_cb, &dd->dd_desc_paddr, 3479c42a7b7eSSam Leffler BUS_DMA_NOWAIT); 3480c42a7b7eSSam Leffler if (error != 0) { 3481c42a7b7eSSam Leffler if_printf(ifp, "unable to map %s descriptors, error %u\n", 3482c42a7b7eSSam Leffler dd->dd_name, error); 3483c42a7b7eSSam Leffler goto fail2; 3484c42a7b7eSSam Leffler } 3485c42a7b7eSSam Leffler 348645abcd6cSAdrian Chadd ds = (uint8_t *) dd->dd_desc; 3487c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 3488c42a7b7eSSam Leffler __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, 3489c42a7b7eSSam Leffler (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); 3490c42a7b7eSSam Leffler 3491ebecf802SSam Leffler /* allocate rx buffers */ 3492c42a7b7eSSam Leffler bsize = sizeof(struct ath_buf) * nbuf; 3493c42a7b7eSSam Leffler bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 3494c42a7b7eSSam Leffler if (bf == NULL) { 3495c42a7b7eSSam Leffler if_printf(ifp, "malloc of %s buffers failed, size %u\n", 3496c42a7b7eSSam Leffler dd->dd_name, bsize); 3497c42a7b7eSSam Leffler goto fail3; 3498c42a7b7eSSam Leffler } 3499c42a7b7eSSam Leffler dd->dd_bufptr = bf; 3500c42a7b7eSSam Leffler 35016b349e5aSAdrian Chadd TAILQ_INIT(head); 350245abcd6cSAdrian Chadd for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * desc_len)) { 350345abcd6cSAdrian Chadd bf->bf_desc = (struct ath_desc *) ds; 3504c42a7b7eSSam Leffler bf->bf_daddr = DS2PHYS(dd, ds); 350545abcd6cSAdrian Chadd if (! ath_hal_split4ktrans(sc->sc_ah)) { 350645abcd6cSAdrian Chadd /* 350745abcd6cSAdrian Chadd * Merlin WAR: Skip descriptor addresses which 350845abcd6cSAdrian Chadd * cause 4KB boundary crossing along any point 350945abcd6cSAdrian Chadd * in the descriptor. 351045abcd6cSAdrian Chadd */ 351145abcd6cSAdrian Chadd if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr, 351245abcd6cSAdrian Chadd desc_len * ndesc)) { 351345abcd6cSAdrian Chadd /* Start at the next page */ 351445abcd6cSAdrian Chadd ds += 0x1000 - (bf->bf_daddr & 0xFFF); 351545abcd6cSAdrian Chadd bf->bf_desc = (struct ath_desc *) ds; 351645abcd6cSAdrian Chadd bf->bf_daddr = DS2PHYS(dd, ds); 351745abcd6cSAdrian Chadd } 351845abcd6cSAdrian Chadd } 3519c42a7b7eSSam Leffler error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 3520c42a7b7eSSam Leffler &bf->bf_dmamap); 3521c42a7b7eSSam Leffler if (error != 0) { 3522c42a7b7eSSam Leffler if_printf(ifp, "unable to create dmamap for %s " 3523c42a7b7eSSam Leffler "buffer %u, error %u\n", dd->dd_name, i, error); 3524c42a7b7eSSam Leffler ath_descdma_cleanup(sc, dd, head); 3525c42a7b7eSSam Leffler return error; 3526c42a7b7eSSam Leffler } 35276edf1dc7SAdrian Chadd bf->bf_lastds = bf->bf_desc; /* Just an initial value */ 35286b349e5aSAdrian Chadd TAILQ_INSERT_TAIL(head, bf, bf_list); 3529c42a7b7eSSam Leffler } 3530c42a7b7eSSam Leffler return 0; 3531c42a7b7eSSam Leffler fail3: 3532c42a7b7eSSam Leffler bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3533c42a7b7eSSam Leffler fail2: 3534c42a7b7eSSam Leffler bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3535c42a7b7eSSam Leffler fail1: 3536c42a7b7eSSam Leffler bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 3537c42a7b7eSSam Leffler fail0: 3538c42a7b7eSSam Leffler bus_dma_tag_destroy(dd->dd_dmat); 3539c42a7b7eSSam Leffler memset(dd, 0, sizeof(*dd)); 3540c42a7b7eSSam Leffler return error; 3541c42a7b7eSSam Leffler #undef DS2PHYS 354245abcd6cSAdrian Chadd #undef ATH_DESC_4KB_BOUND_CHECK 3543c42a7b7eSSam Leffler } 3544c42a7b7eSSam Leffler 3545c42a7b7eSSam Leffler static void 3546c42a7b7eSSam Leffler ath_descdma_cleanup(struct ath_softc *sc, 3547c42a7b7eSSam Leffler struct ath_descdma *dd, ath_bufhead *head) 3548c42a7b7eSSam Leffler { 3549c42a7b7eSSam Leffler struct ath_buf *bf; 3550c42a7b7eSSam Leffler struct ieee80211_node *ni; 3551c42a7b7eSSam Leffler 3552c42a7b7eSSam Leffler bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3553c42a7b7eSSam Leffler bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3554c42a7b7eSSam Leffler bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 3555c42a7b7eSSam Leffler bus_dma_tag_destroy(dd->dd_dmat); 3556c42a7b7eSSam Leffler 35576b349e5aSAdrian Chadd TAILQ_FOREACH(bf, head, bf_list) { 3558c42a7b7eSSam Leffler if (bf->bf_m) { 3559c42a7b7eSSam Leffler m_freem(bf->bf_m); 3560c42a7b7eSSam Leffler bf->bf_m = NULL; 3561c42a7b7eSSam Leffler } 3562c42a7b7eSSam Leffler if (bf->bf_dmamap != NULL) { 3563c42a7b7eSSam Leffler bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 3564c42a7b7eSSam Leffler bf->bf_dmamap = NULL; 3565c42a7b7eSSam Leffler } 3566c42a7b7eSSam Leffler ni = bf->bf_node; 3567c42a7b7eSSam Leffler bf->bf_node = NULL; 3568c42a7b7eSSam Leffler if (ni != NULL) { 3569c42a7b7eSSam Leffler /* 3570c42a7b7eSSam Leffler * Reclaim node reference. 3571c42a7b7eSSam Leffler */ 3572c42a7b7eSSam Leffler ieee80211_free_node(ni); 3573c42a7b7eSSam Leffler } 3574c42a7b7eSSam Leffler } 3575c42a7b7eSSam Leffler 35766b349e5aSAdrian Chadd TAILQ_INIT(head); 3577c42a7b7eSSam Leffler free(dd->dd_bufptr, M_ATHDEV); 3578c42a7b7eSSam Leffler memset(dd, 0, sizeof(*dd)); 3579c42a7b7eSSam Leffler } 3580c42a7b7eSSam Leffler 3581c42a7b7eSSam Leffler static int 35825591b213SSam Leffler ath_desc_alloc(struct ath_softc *sc) 35835591b213SSam Leffler { 3584c42a7b7eSSam Leffler int error; 35855591b213SSam Leffler 3586c42a7b7eSSam Leffler error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 3587e2d787faSSam Leffler "rx", ath_rxbuf, 1); 35885591b213SSam Leffler if (error != 0) 35895591b213SSam Leffler return error; 35905591b213SSam Leffler 3591c42a7b7eSSam Leffler error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 3592e2d787faSSam Leffler "tx", ath_txbuf, ATH_TXDESC); 3593c42a7b7eSSam Leffler if (error != 0) { 3594c42a7b7eSSam Leffler ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 35955591b213SSam Leffler return error; 3596c42a7b7eSSam Leffler } 3597c42a7b7eSSam Leffler 3598c42a7b7eSSam Leffler error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 3599b032f27cSSam Leffler "beacon", ATH_BCBUF, 1); 3600c42a7b7eSSam Leffler if (error != 0) { 3601c42a7b7eSSam Leffler ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3602c42a7b7eSSam Leffler ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 3603c42a7b7eSSam Leffler return error; 3604c42a7b7eSSam Leffler } 36055591b213SSam Leffler return 0; 36065591b213SSam Leffler } 36075591b213SSam Leffler 36085591b213SSam Leffler static void 36095591b213SSam Leffler ath_desc_free(struct ath_softc *sc) 36105591b213SSam Leffler { 36115591b213SSam Leffler 3612c42a7b7eSSam Leffler if (sc->sc_bdma.dd_desc_len != 0) 3613c42a7b7eSSam Leffler ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 3614c42a7b7eSSam Leffler if (sc->sc_txdma.dd_desc_len != 0) 3615c42a7b7eSSam Leffler ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3616c42a7b7eSSam Leffler if (sc->sc_rxdma.dd_desc_len != 0) 3617c42a7b7eSSam Leffler ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 36185591b213SSam Leffler } 36195591b213SSam Leffler 36205591b213SSam Leffler static struct ieee80211_node * 362138c208f8SSam Leffler ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 36225591b213SSam Leffler { 362338c208f8SSam Leffler struct ieee80211com *ic = vap->iv_ic; 3624c42a7b7eSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 3625c42a7b7eSSam Leffler const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 3626c42a7b7eSSam Leffler struct ath_node *an; 3627c42a7b7eSSam Leffler 3628c42a7b7eSSam Leffler an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 3629c42a7b7eSSam Leffler if (an == NULL) { 3630c42a7b7eSSam Leffler /* XXX stat+msg */ 3631de5af704SSam Leffler return NULL; 36325591b213SSam Leffler } 3633c42a7b7eSSam Leffler ath_rate_node_init(sc, an); 36345591b213SSam Leffler 36353dd85b26SAdrian Chadd /* Setup the mutex - there's no associd yet so set the name to NULL */ 36363dd85b26SAdrian Chadd snprintf(an->an_name, sizeof(an->an_name), "%s: node %p", 36373dd85b26SAdrian Chadd device_get_nameunit(sc->sc_dev), an); 36383dd85b26SAdrian Chadd mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); 36393dd85b26SAdrian Chadd 3640eb6f0de0SAdrian Chadd /* XXX setup ath_tid */ 3641eb6f0de0SAdrian Chadd ath_tx_tid_init(sc, an); 3642eb6f0de0SAdrian Chadd 3643c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); 3644c42a7b7eSSam Leffler return &an->an_node; 3645c42a7b7eSSam Leffler } 3646c42a7b7eSSam Leffler 36475591b213SSam Leffler static void 36484afa805eSAdrian Chadd ath_node_cleanup(struct ieee80211_node *ni) 36494afa805eSAdrian Chadd { 36504afa805eSAdrian Chadd struct ieee80211com *ic = ni->ni_ic; 36514afa805eSAdrian Chadd struct ath_softc *sc = ic->ic_ifp->if_softc; 36524afa805eSAdrian Chadd 36534afa805eSAdrian Chadd /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */ 3654eb6f0de0SAdrian Chadd ath_tx_node_flush(sc, ATH_NODE(ni)); 36554afa805eSAdrian Chadd ath_rate_node_cleanup(sc, ATH_NODE(ni)); 36564afa805eSAdrian Chadd sc->sc_node_cleanup(ni); 36574afa805eSAdrian Chadd } 36584afa805eSAdrian Chadd 36594afa805eSAdrian Chadd static void 3660c42a7b7eSSam Leffler ath_node_free(struct ieee80211_node *ni) 36615591b213SSam Leffler { 3662c42a7b7eSSam Leffler struct ieee80211com *ic = ni->ni_ic; 3663c42a7b7eSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 36641e774079SSam Leffler 3665c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni); 36663dd85b26SAdrian Chadd mtx_destroy(&ATH_NODE(ni)->an_mtx); 3667c42a7b7eSSam Leffler sc->sc_node_free(ni); 36685591b213SSam Leffler } 36695591b213SSam Leffler 367068e8e04eSSam Leffler static void 367168e8e04eSSam Leffler ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) 367268e8e04eSSam Leffler { 367368e8e04eSSam Leffler struct ieee80211com *ic = ni->ni_ic; 367468e8e04eSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 367568e8e04eSSam Leffler struct ath_hal *ah = sc->sc_ah; 367668e8e04eSSam Leffler 3677b032f27cSSam Leffler *rssi = ic->ic_node_getrssi(ni); 367859efa8b5SSam Leffler if (ni->ni_chan != IEEE80211_CHAN_ANYC) 367959efa8b5SSam Leffler *noise = ath_hal_getchannoise(ah, ni->ni_chan); 368059efa8b5SSam Leffler else 368168e8e04eSSam Leffler *noise = -95; /* nominally correct */ 368268e8e04eSSam Leffler } 368368e8e04eSSam Leffler 36845591b213SSam Leffler static int 36855591b213SSam Leffler ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 36865591b213SSam Leffler { 36875591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 36885591b213SSam Leffler int error; 36895591b213SSam Leffler struct mbuf *m; 36905591b213SSam Leffler struct ath_desc *ds; 36915591b213SSam Leffler 36925591b213SSam Leffler m = bf->bf_m; 36935591b213SSam Leffler if (m == NULL) { 36945591b213SSam Leffler /* 36955591b213SSam Leffler * NB: by assigning a page to the rx dma buffer we 36965591b213SSam Leffler * implicitly satisfy the Atheros requirement that 36975591b213SSam Leffler * this buffer be cache-line-aligned and sized to be 36985591b213SSam Leffler * multiple of the cache line size. Not doing this 36995591b213SSam Leffler * causes weird stuff to happen (for the 5210 at least). 37005591b213SSam Leffler */ 37015591b213SSam Leffler m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 37025591b213SSam Leffler if (m == NULL) { 3703c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, 3704c42a7b7eSSam Leffler "%s: no mbuf/cluster\n", __func__); 37055591b213SSam Leffler sc->sc_stats.ast_rx_nombuf++; 37065591b213SSam Leffler return ENOMEM; 37075591b213SSam Leffler } 37085591b213SSam Leffler m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 37095591b213SSam Leffler 3710f9e6219bSSam Leffler error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 3711c42a7b7eSSam Leffler bf->bf_dmamap, m, 3712f9e6219bSSam Leffler bf->bf_segs, &bf->bf_nseg, 37135591b213SSam Leffler BUS_DMA_NOWAIT); 37145591b213SSam Leffler if (error != 0) { 3715c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, 3716f9e6219bSSam Leffler "%s: bus_dmamap_load_mbuf_sg failed; error %d\n", 3717c42a7b7eSSam Leffler __func__, error); 37185591b213SSam Leffler sc->sc_stats.ast_rx_busdma++; 3719b2792ff6SSam Leffler m_freem(m); 37205591b213SSam Leffler return error; 37215591b213SSam Leffler } 3722d77367bfSSam Leffler KASSERT(bf->bf_nseg == 1, 3723d77367bfSSam Leffler ("multi-segment packet; nseg %u", bf->bf_nseg)); 3724b2792ff6SSam Leffler bf->bf_m = m; 37255591b213SSam Leffler } 37265591b213SSam Leffler bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD); 37275591b213SSam Leffler 372804e22a02SSam Leffler /* 372904e22a02SSam Leffler * Setup descriptors. For receive we always terminate 373004e22a02SSam Leffler * the descriptor list with a self-linked entry so we'll 373104e22a02SSam Leffler * not get overrun under high load (as can happen with a 3732c42a7b7eSSam Leffler * 5212 when ANI processing enables PHY error frames). 373304e22a02SSam Leffler * 373404e22a02SSam Leffler * To insure the last descriptor is self-linked we create 373504e22a02SSam Leffler * each descriptor as self-linked and add it to the end. As 373604e22a02SSam Leffler * each additional descriptor is added the previous self-linked 373704e22a02SSam Leffler * entry is ``fixed'' naturally. This should be safe even 373804e22a02SSam Leffler * if DMA is happening. When processing RX interrupts we 373904e22a02SSam Leffler * never remove/process the last, self-linked, entry on the 374004e22a02SSam Leffler * descriptor list. This insures the hardware always has 374104e22a02SSam Leffler * someplace to write a new frame. 374204e22a02SSam Leffler */ 37438a2a6beeSAdrian Chadd /* 37448a2a6beeSAdrian Chadd * 11N: we can no longer afford to self link the last descriptor. 37458a2a6beeSAdrian Chadd * MAC acknowledges BA status as long as it copies frames to host 37468a2a6beeSAdrian Chadd * buffer (or rx fifo). This can incorrectly acknowledge packets 37478a2a6beeSAdrian Chadd * to a sender if last desc is self-linked. 37488a2a6beeSAdrian Chadd */ 37495591b213SSam Leffler ds = bf->bf_desc; 37508a2a6beeSAdrian Chadd if (sc->sc_rxslink) 375104e22a02SSam Leffler ds->ds_link = bf->bf_daddr; /* link to self */ 37528a2a6beeSAdrian Chadd else 37538a2a6beeSAdrian Chadd ds->ds_link = 0; /* terminate the list */ 37545591b213SSam Leffler ds->ds_data = bf->bf_segs[0].ds_addr; 37555591b213SSam Leffler ath_hal_setuprxdesc(ah, ds 37565591b213SSam Leffler , m->m_len /* buffer size */ 37575591b213SSam Leffler , 0 37585591b213SSam Leffler ); 37595591b213SSam Leffler 37605591b213SSam Leffler if (sc->sc_rxlink != NULL) 37615591b213SSam Leffler *sc->sc_rxlink = bf->bf_daddr; 37625591b213SSam Leffler sc->sc_rxlink = &ds->ds_link; 37635591b213SSam Leffler return 0; 37645591b213SSam Leffler } 37655591b213SSam Leffler 3766c42a7b7eSSam Leffler /* 376703ed599aSSam Leffler * Extend 15-bit time stamp from rx descriptor to 37687b0c77ecSSam Leffler * a full 64-bit TSF using the specified TSF. 376903ed599aSSam Leffler */ 377003ed599aSSam Leffler static __inline u_int64_t 3771fc4de9b7SAdrian Chadd ath_extend_tsf15(u_int32_t rstamp, u_int64_t tsf) 377203ed599aSSam Leffler { 377303ed599aSSam Leffler if ((tsf & 0x7fff) < rstamp) 377403ed599aSSam Leffler tsf -= 0x8000; 3775fc4de9b7SAdrian Chadd 377603ed599aSSam Leffler return ((tsf &~ 0x7fff) | rstamp); 377703ed599aSSam Leffler } 377803ed599aSSam Leffler 377903ed599aSSam Leffler /* 3780fc4de9b7SAdrian Chadd * Extend 32-bit time stamp from rx descriptor to 3781fc4de9b7SAdrian Chadd * a full 64-bit TSF using the specified TSF. 3782fc4de9b7SAdrian Chadd */ 3783fc4de9b7SAdrian Chadd static __inline u_int64_t 3784fc4de9b7SAdrian Chadd ath_extend_tsf32(u_int32_t rstamp, u_int64_t tsf) 3785fc4de9b7SAdrian Chadd { 3786fc4de9b7SAdrian Chadd u_int32_t tsf_low = tsf & 0xffffffff; 3787fc4de9b7SAdrian Chadd u_int64_t tsf64 = (tsf & ~0xffffffffULL) | rstamp; 3788fc4de9b7SAdrian Chadd 3789fc4de9b7SAdrian Chadd if (rstamp > tsf_low && (rstamp - tsf_low > 0x10000000)) 3790fc4de9b7SAdrian Chadd tsf64 -= 0x100000000ULL; 3791fc4de9b7SAdrian Chadd 3792fc4de9b7SAdrian Chadd if (rstamp < tsf_low && (tsf_low - rstamp > 0x10000000)) 3793fc4de9b7SAdrian Chadd tsf64 += 0x100000000ULL; 3794fc4de9b7SAdrian Chadd 3795fc4de9b7SAdrian Chadd return tsf64; 3796fc4de9b7SAdrian Chadd } 3797fc4de9b7SAdrian Chadd 3798fc4de9b7SAdrian Chadd /* 3799fc4de9b7SAdrian Chadd * Extend the TSF from the RX descriptor to a full 64 bit TSF. 3800fc4de9b7SAdrian Chadd * Earlier hardware versions only wrote the low 15 bits of the 3801fc4de9b7SAdrian Chadd * TSF into the RX descriptor; later versions (AR5416 and up) 3802fc4de9b7SAdrian Chadd * include the 32 bit TSF value. 3803fc4de9b7SAdrian Chadd */ 3804fc4de9b7SAdrian Chadd static __inline u_int64_t 3805fc4de9b7SAdrian Chadd ath_extend_tsf(struct ath_softc *sc, u_int32_t rstamp, u_int64_t tsf) 3806fc4de9b7SAdrian Chadd { 3807fc4de9b7SAdrian Chadd if (sc->sc_rxtsf32) 3808fc4de9b7SAdrian Chadd return ath_extend_tsf32(rstamp, tsf); 3809fc4de9b7SAdrian Chadd else 3810fc4de9b7SAdrian Chadd return ath_extend_tsf15(rstamp, tsf); 3811fc4de9b7SAdrian Chadd } 3812fc4de9b7SAdrian Chadd 3813fc4de9b7SAdrian Chadd /* 3814c42a7b7eSSam Leffler * Intercept management frames to collect beacon rssi data 3815c42a7b7eSSam Leffler * and to do ibss merges. 3816c42a7b7eSSam Leffler */ 3817c42a7b7eSSam Leffler static void 3818b032f27cSSam Leffler ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, 38195463c4a4SSam Leffler int subtype, int rssi, int nf) 3820c42a7b7eSSam Leffler { 3821b032f27cSSam Leffler struct ieee80211vap *vap = ni->ni_vap; 3822b032f27cSSam Leffler struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 3823c42a7b7eSSam Leffler 3824c42a7b7eSSam Leffler /* 3825c42a7b7eSSam Leffler * Call up first so subsequent work can use information 3826c42a7b7eSSam Leffler * potentially stored in the node (e.g. for ibss merge). 3827c42a7b7eSSam Leffler */ 38285463c4a4SSam Leffler ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, nf); 3829c42a7b7eSSam Leffler switch (subtype) { 3830c42a7b7eSSam Leffler case IEEE80211_FC0_SUBTYPE_BEACON: 3831c42a7b7eSSam Leffler /* update rssi statistics for use by the hal */ 3832ffa2cab6SSam Leffler ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi); 383380d939bfSSam Leffler if (sc->sc_syncbeacon && 3834b032f27cSSam Leffler ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) { 383580d939bfSSam Leffler /* 383680d939bfSSam Leffler * Resync beacon timers using the tsf of the beacon 383780d939bfSSam Leffler * frame we just received. 383880d939bfSSam Leffler */ 3839b032f27cSSam Leffler ath_beacon_config(sc, vap); 384080d939bfSSam Leffler } 3841c42a7b7eSSam Leffler /* fall thru... */ 3842c42a7b7eSSam Leffler case IEEE80211_FC0_SUBTYPE_PROBE_RESP: 3843b032f27cSSam Leffler if (vap->iv_opmode == IEEE80211_M_IBSS && 3844b032f27cSSam Leffler vap->iv_state == IEEE80211_S_RUN) { 38457041d50cSBernhard Schmidt uint32_t rstamp = sc->sc_lastrs->rs_tstamp; 3846fc4de9b7SAdrian Chadd uint64_t tsf = ath_extend_tsf(sc, rstamp, 38477b0c77ecSSam Leffler ath_hal_gettsf64(sc->sc_ah)); 3848c42a7b7eSSam Leffler /* 3849c42a7b7eSSam Leffler * Handle ibss merge as needed; check the tsf on the 3850c42a7b7eSSam Leffler * frame before attempting the merge. The 802.11 spec 3851c42a7b7eSSam Leffler * says the station should change it's bssid to match 3852c42a7b7eSSam Leffler * the oldest station with the same ssid, where oldest 3853f818612bSSam Leffler * is determined by the tsf. Note that hardware 3854f818612bSSam Leffler * reconfiguration happens through callback to 385503ed599aSSam Leffler * ath_newstate as the state machine will go from 385603ed599aSSam Leffler * RUN -> RUN when this happens. 3857c42a7b7eSSam Leffler */ 385803ed599aSSam Leffler if (le64toh(ni->ni_tstamp.tsf) >= tsf) { 385903ed599aSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, 386033d7d80cSTai-hwa Liang "ibss merge, rstamp %u tsf %ju " 386133d7d80cSTai-hwa Liang "tstamp %ju\n", rstamp, (uintmax_t)tsf, 386233d7d80cSTai-hwa Liang (uintmax_t)ni->ni_tstamp.tsf); 3863641b4d0bSSam Leffler (void) ieee80211_ibss_merge(ni); 3864c42a7b7eSSam Leffler } 386503ed599aSSam Leffler } 3866c42a7b7eSSam Leffler break; 3867c42a7b7eSSam Leffler } 3868c42a7b7eSSam Leffler } 3869c42a7b7eSSam Leffler 3870c42a7b7eSSam Leffler /* 3871c42a7b7eSSam Leffler * Set the default antenna. 3872c42a7b7eSSam Leffler */ 3873c42a7b7eSSam Leffler static void 3874c42a7b7eSSam Leffler ath_setdefantenna(struct ath_softc *sc, u_int antenna) 3875c42a7b7eSSam Leffler { 3876c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 3877c42a7b7eSSam Leffler 3878c42a7b7eSSam Leffler /* XXX block beacon interrupts */ 3879c42a7b7eSSam Leffler ath_hal_setdefantenna(ah, antenna); 3880c42a7b7eSSam Leffler if (sc->sc_defant != antenna) 3881c42a7b7eSSam Leffler sc->sc_stats.ast_ant_defswitch++; 3882c42a7b7eSSam Leffler sc->sc_defant = antenna; 3883c42a7b7eSSam Leffler sc->sc_rxotherant = 0; 3884c42a7b7eSSam Leffler } 3885c42a7b7eSSam Leffler 38865463c4a4SSam Leffler static void 3887b032f27cSSam Leffler ath_rx_tap(struct ifnet *ifp, struct mbuf *m, 388865f9edeeSSam Leffler const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf) 38897b0c77ecSSam Leffler { 3890e387d629SSam Leffler #define CHAN_HT20 htole32(IEEE80211_CHAN_HT20) 3891e387d629SSam Leffler #define CHAN_HT40U htole32(IEEE80211_CHAN_HT40U) 3892e387d629SSam Leffler #define CHAN_HT40D htole32(IEEE80211_CHAN_HT40D) 389346d4d74cSSam Leffler #define CHAN_HT (CHAN_HT20|CHAN_HT40U|CHAN_HT40D) 3894b032f27cSSam Leffler struct ath_softc *sc = ifp->if_softc; 389546d4d74cSSam Leffler const HAL_RATE_TABLE *rt; 389646d4d74cSSam Leffler uint8_t rix; 38977b0c77ecSSam Leffler 389846d4d74cSSam Leffler rt = sc->sc_currates; 389946d4d74cSSam Leffler KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 390046d4d74cSSam Leffler rix = rt->rateCodeToIndex[rs->rs_rate]; 390168e8e04eSSam Leffler sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate; 39027b0c77ecSSam Leffler sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags; 390346d4d74cSSam Leffler #ifdef AH_SUPPORT_AR5416 3904e387d629SSam Leffler sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT; 390546d4d74cSSam Leffler if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) { /* HT rate */ 390659efa8b5SSam Leffler struct ieee80211com *ic = ifp->if_l2com; 390759efa8b5SSam Leffler 3908e387d629SSam Leffler if ((rs->rs_flags & HAL_RX_2040) == 0) 3909e387d629SSam Leffler sc->sc_rx_th.wr_chan_flags |= CHAN_HT20; 391059efa8b5SSam Leffler else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan)) 3911e387d629SSam Leffler sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U; 3912e387d629SSam Leffler else 3913e387d629SSam Leffler sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D; 391468e8e04eSSam Leffler if ((rs->rs_flags & HAL_RX_GI) == 0) 3915e387d629SSam Leffler sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI; 391668e8e04eSSam Leffler } 391768e8e04eSSam Leffler #endif 3918fc4de9b7SAdrian Chadd sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(sc, rs->rs_tstamp, tsf)); 391965f9edeeSSam Leffler if (rs->rs_status & HAL_RXERR_CRC) 39207b0c77ecSSam Leffler sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; 39217b0c77ecSSam Leffler /* XXX propagate other error flags from descriptor */ 39227b0c77ecSSam Leffler sc->sc_rx_th.wr_antnoise = nf; 39235463c4a4SSam Leffler sc->sc_rx_th.wr_antsignal = nf + rs->rs_rssi; 392465f9edeeSSam Leffler sc->sc_rx_th.wr_antenna = rs->rs_antenna; 392546d4d74cSSam Leffler #undef CHAN_HT 3926e387d629SSam Leffler #undef CHAN_HT20 3927e387d629SSam Leffler #undef CHAN_HT40U 3928e387d629SSam Leffler #undef CHAN_HT40D 39297b0c77ecSSam Leffler } 39307b0c77ecSSam Leffler 39315591b213SSam Leffler static void 3932b032f27cSSam Leffler ath_handle_micerror(struct ieee80211com *ic, 3933b032f27cSSam Leffler struct ieee80211_frame *wh, int keyix) 3934b032f27cSSam Leffler { 3935b032f27cSSam Leffler struct ieee80211_node *ni; 3936b032f27cSSam Leffler 3937b032f27cSSam Leffler /* XXX recheck MIC to deal w/ chips that lie */ 3938b032f27cSSam Leffler /* XXX discard MIC errors on !data frames */ 3939b032f27cSSam Leffler ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh); 3940b032f27cSSam Leffler if (ni != NULL) { 3941b032f27cSSam Leffler ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix); 3942b032f27cSSam Leffler ieee80211_free_node(ni); 3943b032f27cSSam Leffler } 3944b032f27cSSam Leffler } 3945b032f27cSSam Leffler 394696ff485dSAdrian Chadd /* 394796ff485dSAdrian Chadd * Only run the RX proc if it's not already running. 394896ff485dSAdrian Chadd * Since this may get run as part of the reset/flush path, 394996ff485dSAdrian Chadd * the task can't clash with an existing, running tasklet. 395096ff485dSAdrian Chadd */ 3951b032f27cSSam Leffler static void 395296ff485dSAdrian Chadd ath_rx_tasklet(void *arg, int npending) 395396ff485dSAdrian Chadd { 395496ff485dSAdrian Chadd struct ath_softc *sc = arg; 395596ff485dSAdrian Chadd 395696ff485dSAdrian Chadd CTR1(ATH_KTR_INTR, "ath_rx_proc: pending=%d", npending); 395796ff485dSAdrian Chadd DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending); 3958ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 3959ef27340cSAdrian Chadd if (sc->sc_inreset_cnt > 0) { 3960ef27340cSAdrian Chadd device_printf(sc->sc_dev, 3961ef27340cSAdrian Chadd "%s: sc_inreset_cnt > 0; skipping\n", __func__); 3962ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 3963ef27340cSAdrian Chadd return; 3964ef27340cSAdrian Chadd } 3965ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 396696ff485dSAdrian Chadd ath_rx_proc(sc, 1); 396796ff485dSAdrian Chadd } 396896ff485dSAdrian Chadd 396996ff485dSAdrian Chadd static void 397096ff485dSAdrian Chadd ath_rx_proc(struct ath_softc *sc, int resched) 39715591b213SSam Leffler { 39728cec0ab9SSam Leffler #define PA2DESC(_sc, _pa) \ 3973c42a7b7eSSam Leffler ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 3974c42a7b7eSSam Leffler ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 39755591b213SSam Leffler struct ath_buf *bf; 3976fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 3977b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 39785591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 39795591b213SSam Leffler struct ath_desc *ds; 398065f9edeeSSam Leffler struct ath_rx_status *rs; 39815591b213SSam Leffler struct mbuf *m; 39820a915fadSSam Leffler struct ieee80211_node *ni; 3983d7736e13SSam Leffler int len, type, ngood; 39845591b213SSam Leffler HAL_STATUS status; 39857b0c77ecSSam Leffler int16_t nf; 3986*06fc4a10SAdrian Chadd u_int64_t tsf, rstamp; 39878f939e79SAdrian Chadd int npkts = 0; 39885591b213SSam Leffler 3989ef27340cSAdrian Chadd /* XXX we must not hold the ATH_LOCK here */ 3990ef27340cSAdrian Chadd ATH_UNLOCK_ASSERT(sc); 3991ef27340cSAdrian Chadd ATH_PCU_UNLOCK_ASSERT(sc); 3992ef27340cSAdrian Chadd 3993ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 3994ef27340cSAdrian Chadd sc->sc_rxproc_cnt++; 3995ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 3996ef27340cSAdrian Chadd 399796ff485dSAdrian Chadd DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: called\n", __func__); 3998d7736e13SSam Leffler ngood = 0; 399959efa8b5SSam Leffler nf = ath_hal_getchannoise(ah, sc->sc_curchan); 400084784be1SSam Leffler sc->sc_stats.ast_rx_noise = nf; 40017b0c77ecSSam Leffler tsf = ath_hal_gettsf64(ah); 40025591b213SSam Leffler do { 40036b349e5aSAdrian Chadd bf = TAILQ_FIRST(&sc->sc_rxbuf); 40048a2a6beeSAdrian Chadd if (sc->sc_rxslink && bf == NULL) { /* NB: shouldn't happen */ 4005c42a7b7eSSam Leffler if_printf(ifp, "%s: no buffer!\n", __func__); 40065591b213SSam Leffler break; 40078a2a6beeSAdrian Chadd } else if (bf == NULL) { 40088a2a6beeSAdrian Chadd /* 40098a2a6beeSAdrian Chadd * End of List: 40108a2a6beeSAdrian Chadd * this can happen for non-self-linked RX chains 40118a2a6beeSAdrian Chadd */ 40128a2a6beeSAdrian Chadd sc->sc_stats.ast_rx_hitqueueend++; 40138a2a6beeSAdrian Chadd break; 40145591b213SSam Leffler } 4015b2792ff6SSam Leffler m = bf->bf_m; 4016b2792ff6SSam Leffler if (m == NULL) { /* NB: shouldn't happen */ 4017b2792ff6SSam Leffler /* 4018b2792ff6SSam Leffler * If mbuf allocation failed previously there 4019b2792ff6SSam Leffler * will be no mbuf; try again to re-populate it. 4020b2792ff6SSam Leffler */ 4021b2792ff6SSam Leffler /* XXX make debug msg */ 4022b2792ff6SSam Leffler if_printf(ifp, "%s: no mbuf!\n", __func__); 40236b349e5aSAdrian Chadd TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 4024b2792ff6SSam Leffler goto rx_next; 4025b2792ff6SSam Leffler } 402604e22a02SSam Leffler ds = bf->bf_desc; 402704e22a02SSam Leffler if (ds->ds_link == bf->bf_daddr) { 402804e22a02SSam Leffler /* NB: never process the self-linked entry at the end */ 4029f77057dbSAdrian Chadd sc->sc_stats.ast_rx_hitqueueend++; 403004e22a02SSam Leffler break; 403104e22a02SSam Leffler } 40328cec0ab9SSam Leffler /* XXX sync descriptor memory */ 40338cec0ab9SSam Leffler /* 40348cec0ab9SSam Leffler * Must provide the virtual address of the current 40358cec0ab9SSam Leffler * descriptor, the physical address, and the virtual 40368cec0ab9SSam Leffler * address of the next descriptor in the h/w chain. 40378cec0ab9SSam Leffler * This allows the HAL to look ahead to see if the 40388cec0ab9SSam Leffler * hardware is done with a descriptor by checking the 40398cec0ab9SSam Leffler * done bit in the following descriptor and the address 40408cec0ab9SSam Leffler * of the current descriptor the DMA engine is working 40418cec0ab9SSam Leffler * on. All this is necessary because of our use of 40428cec0ab9SSam Leffler * a self-linked list to avoid rx overruns. 40438cec0ab9SSam Leffler */ 404465f9edeeSSam Leffler rs = &bf->bf_status.ds_rxstat; 40458cec0ab9SSam Leffler status = ath_hal_rxprocdesc(ah, ds, 404665f9edeeSSam Leffler bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 4047a585a9a1SSam Leffler #ifdef ATH_DEBUG 4048c42a7b7eSSam Leffler if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 40496902009eSSam Leffler ath_printrxbuf(sc, bf, 0, status == HAL_OK); 40505591b213SSam Leffler #endif 40515591b213SSam Leffler if (status == HAL_EINPROGRESS) 40525591b213SSam Leffler break; 40536b349e5aSAdrian Chadd 40546b349e5aSAdrian Chadd TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 40558f939e79SAdrian Chadd npkts++; 4056f9aa1d90SAdrian Chadd 4057*06fc4a10SAdrian Chadd /* 4058*06fc4a10SAdrian Chadd * Calculate the correct 64 bit TSF given 4059*06fc4a10SAdrian Chadd * the TSF64 register value and rs_tstamp. 4060*06fc4a10SAdrian Chadd */ 4061*06fc4a10SAdrian Chadd rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf); 4062*06fc4a10SAdrian Chadd 4063f9aa1d90SAdrian Chadd /* These aren't specifically errors */ 40646e0f1168SAdrian Chadd #ifdef AH_SUPPORT_AR5416 4065f9aa1d90SAdrian Chadd if (rs->rs_flags & HAL_RX_GI) 4066f9aa1d90SAdrian Chadd sc->sc_stats.ast_rx_halfgi++; 4067f9aa1d90SAdrian Chadd if (rs->rs_flags & HAL_RX_2040) 4068f9aa1d90SAdrian Chadd sc->sc_stats.ast_rx_2040++; 4069f9aa1d90SAdrian Chadd if (rs->rs_flags & HAL_RX_DELIM_CRC_PRE) 4070f9aa1d90SAdrian Chadd sc->sc_stats.ast_rx_pre_crc_err++; 4071f9aa1d90SAdrian Chadd if (rs->rs_flags & HAL_RX_DELIM_CRC_POST) 4072f9aa1d90SAdrian Chadd sc->sc_stats.ast_rx_post_crc_err++; 4073f9aa1d90SAdrian Chadd if (rs->rs_flags & HAL_RX_DECRYPT_BUSY) 4074f9aa1d90SAdrian Chadd sc->sc_stats.ast_rx_decrypt_busy_err++; 4075f9aa1d90SAdrian Chadd if (rs->rs_flags & HAL_RX_HI_RX_CHAIN) 4076f9aa1d90SAdrian Chadd sc->sc_stats.ast_rx_hi_rx_chain++; 40776e0f1168SAdrian Chadd #endif /* AH_SUPPORT_AR5416 */ 4078f9aa1d90SAdrian Chadd 407968e8e04eSSam Leffler if (rs->rs_status != 0) { 408065f9edeeSSam Leffler if (rs->rs_status & HAL_RXERR_CRC) 40815591b213SSam Leffler sc->sc_stats.ast_rx_crcerr++; 408265f9edeeSSam Leffler if (rs->rs_status & HAL_RXERR_FIFO) 40835591b213SSam Leffler sc->sc_stats.ast_rx_fifoerr++; 408465f9edeeSSam Leffler if (rs->rs_status & HAL_RXERR_PHY) { 40855591b213SSam Leffler sc->sc_stats.ast_rx_phyerr++; 408648237774SAdrian Chadd /* Process DFS radar events */ 4087373815efSAdrian Chadd if ((rs->rs_phyerr == HAL_PHYERR_RADAR) || 4088373815efSAdrian Chadd (rs->rs_phyerr == HAL_PHYERR_FALSE_RADAR_EXT)) { 4089373815efSAdrian Chadd /* Since we're touching the frame data, sync it */ 4090373815efSAdrian Chadd bus_dmamap_sync(sc->sc_dmat, 4091373815efSAdrian Chadd bf->bf_dmamap, 4092373815efSAdrian Chadd BUS_DMASYNC_POSTREAD); 4093373815efSAdrian Chadd /* Now pass it to the radar processing code */ 4094*06fc4a10SAdrian Chadd ath_dfs_process_phy_err(sc, mtod(m, char *), rstamp, rs); 4095373815efSAdrian Chadd } 409648237774SAdrian Chadd 4097f9aa1d90SAdrian Chadd /* Be suitably paranoid about receiving phy errors out of the stats array bounds */ 4098f9aa1d90SAdrian Chadd if (rs->rs_phyerr < 64) 4099f9aa1d90SAdrian Chadd sc->sc_stats.ast_rx_phy[rs->rs_phyerr]++; 410068e8e04eSSam Leffler goto rx_error; /* NB: don't count in ierrors */ 4101c42a7b7eSSam Leffler } 410265f9edeeSSam Leffler if (rs->rs_status & HAL_RXERR_DECRYPT) { 410385643802SSam Leffler /* 4104c42a7b7eSSam Leffler * Decrypt error. If the error occurred 4105c42a7b7eSSam Leffler * because there was no hardware key, then 4106c42a7b7eSSam Leffler * let the frame through so the upper layers 4107c42a7b7eSSam Leffler * can process it. This is necessary for 5210 4108c42a7b7eSSam Leffler * parts which have no way to setup a ``clear'' 4109c42a7b7eSSam Leffler * key cache entry. 4110c42a7b7eSSam Leffler * 4111c42a7b7eSSam Leffler * XXX do key cache faulting 411285643802SSam Leffler */ 411365f9edeeSSam Leffler if (rs->rs_keyix == HAL_RXKEYIX_INVALID) 4114c42a7b7eSSam Leffler goto rx_accept; 4115c42a7b7eSSam Leffler sc->sc_stats.ast_rx_badcrypt++; 41165591b213SSam Leffler } 411765f9edeeSSam Leffler if (rs->rs_status & HAL_RXERR_MIC) { 4118c42a7b7eSSam Leffler sc->sc_stats.ast_rx_badmic++; 4119c42a7b7eSSam Leffler /* 4120c42a7b7eSSam Leffler * Do minimal work required to hand off 41215463c4a4SSam Leffler * the 802.11 header for notification. 4122c42a7b7eSSam Leffler */ 4123c42a7b7eSSam Leffler /* XXX frag's and qos frames */ 412465f9edeeSSam Leffler len = rs->rs_datalen; 4125c42a7b7eSSam Leffler if (len >= sizeof (struct ieee80211_frame)) { 4126c42a7b7eSSam Leffler bus_dmamap_sync(sc->sc_dmat, 4127c42a7b7eSSam Leffler bf->bf_dmamap, 4128c42a7b7eSSam Leffler BUS_DMASYNC_POSTREAD); 4129b032f27cSSam Leffler ath_handle_micerror(ic, 4130c42a7b7eSSam Leffler mtod(m, struct ieee80211_frame *), 41310ab4040aSSam Leffler sc->sc_splitmic ? 4132b032f27cSSam Leffler rs->rs_keyix-32 : rs->rs_keyix); 4133c42a7b7eSSam Leffler } 4134c42a7b7eSSam Leffler } 4135c42a7b7eSSam Leffler ifp->if_ierrors++; 413668e8e04eSSam Leffler rx_error: 413768e8e04eSSam Leffler /* 413868e8e04eSSam Leffler * Cleanup any pending partial frame. 413968e8e04eSSam Leffler */ 414068e8e04eSSam Leffler if (sc->sc_rxpending != NULL) { 414168e8e04eSSam Leffler m_freem(sc->sc_rxpending); 414268e8e04eSSam Leffler sc->sc_rxpending = NULL; 414368e8e04eSSam Leffler } 4144c42a7b7eSSam Leffler /* 41457b0c77ecSSam Leffler * When a tap is present pass error frames 41467b0c77ecSSam Leffler * that have been requested. By default we 41477b0c77ecSSam Leffler * pass decrypt+mic errors but others may be 41487b0c77ecSSam Leffler * interesting (e.g. crc). 4149c42a7b7eSSam Leffler */ 41505463c4a4SSam Leffler if (ieee80211_radiotap_active(ic) && 415165f9edeeSSam Leffler (rs->rs_status & sc->sc_monpass)) { 41527b0c77ecSSam Leffler bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 41537b0c77ecSSam Leffler BUS_DMASYNC_POSTREAD); 41547b0c77ecSSam Leffler /* NB: bpf needs the mbuf length setup */ 415565f9edeeSSam Leffler len = rs->rs_datalen; 41567b0c77ecSSam Leffler m->m_pkthdr.len = m->m_len = len; 4157dcfd99a7SAdrian Chadd bf->bf_m = NULL; 4158*06fc4a10SAdrian Chadd ath_rx_tap(ifp, m, rs, rstamp, nf); 41595463c4a4SSam Leffler ieee80211_radiotap_rx_all(ic, m); 4160dcfd99a7SAdrian Chadd m_freem(m); 41617b0c77ecSSam Leffler } 41627b0c77ecSSam Leffler /* XXX pass MIC errors up for s/w reclaculation */ 41635591b213SSam Leffler goto rx_next; 41645591b213SSam Leffler } 4165c42a7b7eSSam Leffler rx_accept: 4166c42a7b7eSSam Leffler /* 4167c42a7b7eSSam Leffler * Sync and unmap the frame. At this point we're 4168c42a7b7eSSam Leffler * committed to passing the mbuf somewhere so clear 4169c66c48cbSSam Leffler * bf_m; this means a new mbuf must be allocated 4170c42a7b7eSSam Leffler * when the rx descriptor is setup again to receive 4171c42a7b7eSSam Leffler * another frame. 4172c42a7b7eSSam Leffler */ 41735591b213SSam Leffler bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 41745591b213SSam Leffler BUS_DMASYNC_POSTREAD); 41755591b213SSam Leffler bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 41765591b213SSam Leffler bf->bf_m = NULL; 4177c42a7b7eSSam Leffler 417865f9edeeSSam Leffler len = rs->rs_datalen; 417968e8e04eSSam Leffler m->m_len = len; 418068e8e04eSSam Leffler 418168e8e04eSSam Leffler if (rs->rs_more) { 418268e8e04eSSam Leffler /* 418368e8e04eSSam Leffler * Frame spans multiple descriptors; save 418468e8e04eSSam Leffler * it for the next completed descriptor, it 418568e8e04eSSam Leffler * will be used to construct a jumbogram. 418668e8e04eSSam Leffler */ 418768e8e04eSSam Leffler if (sc->sc_rxpending != NULL) { 418868e8e04eSSam Leffler /* NB: max frame size is currently 2 clusters */ 418968e8e04eSSam Leffler sc->sc_stats.ast_rx_toobig++; 419068e8e04eSSam Leffler m_freem(sc->sc_rxpending); 419168e8e04eSSam Leffler } 419268e8e04eSSam Leffler m->m_pkthdr.rcvif = ifp; 419368e8e04eSSam Leffler m->m_pkthdr.len = len; 419468e8e04eSSam Leffler sc->sc_rxpending = m; 419568e8e04eSSam Leffler goto rx_next; 419668e8e04eSSam Leffler } else if (sc->sc_rxpending != NULL) { 419768e8e04eSSam Leffler /* 419868e8e04eSSam Leffler * This is the second part of a jumbogram, 419968e8e04eSSam Leffler * chain it to the first mbuf, adjust the 420068e8e04eSSam Leffler * frame length, and clear the rxpending state. 420168e8e04eSSam Leffler */ 420268e8e04eSSam Leffler sc->sc_rxpending->m_next = m; 420368e8e04eSSam Leffler sc->sc_rxpending->m_pkthdr.len += len; 420468e8e04eSSam Leffler m = sc->sc_rxpending; 420568e8e04eSSam Leffler sc->sc_rxpending = NULL; 420668e8e04eSSam Leffler } else { 420768e8e04eSSam Leffler /* 420868e8e04eSSam Leffler * Normal single-descriptor receive; setup 420968e8e04eSSam Leffler * the rcvif and packet length. 421068e8e04eSSam Leffler */ 421168e8e04eSSam Leffler m->m_pkthdr.rcvif = ifp; 421268e8e04eSSam Leffler m->m_pkthdr.len = len; 421368e8e04eSSam Leffler } 421473454c73SSam Leffler 4215197d53c5SAdrian Chadd /* 4216197d53c5SAdrian Chadd * Validate rs->rs_antenna. 4217197d53c5SAdrian Chadd * 4218197d53c5SAdrian Chadd * Some users w/ AR9285 NICs have reported crashes 4219197d53c5SAdrian Chadd * here because rs_antenna field is bogusly large. 4220197d53c5SAdrian Chadd * Let's enforce the maximum antenna limit of 8 4221197d53c5SAdrian Chadd * (and it shouldn't be hard coded, but that's a 4222197d53c5SAdrian Chadd * separate problem) and if there's an issue, print 4223197d53c5SAdrian Chadd * out an error and adjust rs_antenna to something 4224197d53c5SAdrian Chadd * sensible. 4225197d53c5SAdrian Chadd * 4226197d53c5SAdrian Chadd * This code should be removed once the actual 4227197d53c5SAdrian Chadd * root cause of the issue has been identified. 4228197d53c5SAdrian Chadd * For example, it may be that the rs_antenna 4229197d53c5SAdrian Chadd * field is only valid for the lsat frame of 4230197d53c5SAdrian Chadd * an aggregate and it just happens that it is 4231197d53c5SAdrian Chadd * "mostly" right. (This is a general statement - 4232197d53c5SAdrian Chadd * the majority of the statistics are only valid 4233197d53c5SAdrian Chadd * for the last frame in an aggregate. 4234197d53c5SAdrian Chadd */ 4235197d53c5SAdrian Chadd if (rs->rs_antenna > 7) { 4236197d53c5SAdrian Chadd device_printf(sc->sc_dev, "%s: rs_antenna > 7 (%d)\n", 4237197d53c5SAdrian Chadd __func__, rs->rs_antenna); 4238197d53c5SAdrian Chadd #ifdef ATH_DEBUG 4239197d53c5SAdrian Chadd ath_printrxbuf(sc, bf, 0, status == HAL_OK); 4240197d53c5SAdrian Chadd #endif /* ATH_DEBUG */ 4241197d53c5SAdrian Chadd rs->rs_antenna = 0; /* XXX better than nothing */ 4242197d53c5SAdrian Chadd } 4243197d53c5SAdrian Chadd 4244b032f27cSSam Leffler ifp->if_ipackets++; 424565f9edeeSSam Leffler sc->sc_stats.ast_ant_rx[rs->rs_antenna]++; 4246c42a7b7eSSam Leffler 42475463c4a4SSam Leffler /* 42485463c4a4SSam Leffler * Populate the rx status block. When there are bpf 42495463c4a4SSam Leffler * listeners we do the additional work to provide 42505463c4a4SSam Leffler * complete status. Otherwise we fill in only the 42515463c4a4SSam Leffler * material required by ieee80211_input. Note that 42525463c4a4SSam Leffler * noise setting is filled in above. 42535463c4a4SSam Leffler */ 42545463c4a4SSam Leffler if (ieee80211_radiotap_active(ic)) 4255*06fc4a10SAdrian Chadd ath_rx_tap(ifp, m, rs, rstamp, nf); 42560a915fadSSam Leffler 42575591b213SSam Leffler /* 4258c42a7b7eSSam Leffler * From this point on we assume the frame is at least 4259c42a7b7eSSam Leffler * as large as ieee80211_frame_min; verify that. 42605591b213SSam Leffler */ 4261c42a7b7eSSam Leffler if (len < IEEE80211_MIN_LEN) { 42625463c4a4SSam Leffler if (!ieee80211_radiotap_active(ic)) { 42635463c4a4SSam Leffler DPRINTF(sc, ATH_DEBUG_RECV, 42645463c4a4SSam Leffler "%s: short packet %d\n", __func__, len); 4265c42a7b7eSSam Leffler sc->sc_stats.ast_rx_tooshort++; 42665463c4a4SSam Leffler } else { 42675463c4a4SSam Leffler /* NB: in particular this captures ack's */ 42685463c4a4SSam Leffler ieee80211_radiotap_rx_all(ic, m); 42695463c4a4SSam Leffler } 4270c42a7b7eSSam Leffler m_freem(m); 4271c42a7b7eSSam Leffler goto rx_next; 42725591b213SSam Leffler } 42730a915fadSSam Leffler 4274c42a7b7eSSam Leffler if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) { 427546d4d74cSSam Leffler const HAL_RATE_TABLE *rt = sc->sc_currates; 427646d4d74cSSam Leffler uint8_t rix = rt->rateCodeToIndex[rs->rs_rate]; 427746d4d74cSSam Leffler 427868e8e04eSSam Leffler ieee80211_dump_pkt(ic, mtod(m, caddr_t), len, 427946d4d74cSSam Leffler sc->sc_hwmap[rix].ieeerate, rs->rs_rssi); 4280c42a7b7eSSam Leffler } 4281c42a7b7eSSam Leffler 4282c42a7b7eSSam Leffler m_adj(m, -IEEE80211_CRC_LEN); 4283de5af704SSam Leffler 4284de5af704SSam Leffler /* 4285c42a7b7eSSam Leffler * Locate the node for sender, track state, and then 4286c42a7b7eSSam Leffler * pass the (referenced) node up to the 802.11 layer 4287c42a7b7eSSam Leffler * for its use. 4288c42a7b7eSSam Leffler */ 4289c1225b52SSam Leffler ni = ieee80211_find_rxnode_withkey(ic, 4290c1225b52SSam Leffler mtod(m, const struct ieee80211_frame_min *), 429165f9edeeSSam Leffler rs->rs_keyix == HAL_RXKEYIX_INVALID ? 429265f9edeeSSam Leffler IEEE80211_KEYIX_NONE : rs->rs_keyix); 42937041d50cSBernhard Schmidt sc->sc_lastrs = rs; 4294a07e9ddbSAdrian Chadd 42956e0f1168SAdrian Chadd #ifdef AH_SUPPORT_AR5416 4296a07e9ddbSAdrian Chadd if (rs->rs_isaggr) 4297a07e9ddbSAdrian Chadd sc->sc_stats.ast_rx_agg++; 42986e0f1168SAdrian Chadd #endif /* AH_SUPPORT_AR5416 */ 4299a07e9ddbSAdrian Chadd 4300a07e9ddbSAdrian Chadd if (ni != NULL) { 4301b032f27cSSam Leffler /* 4302e57539afSAdrian Chadd * Only punt packets for ampdu reorder processing for 4303e57539afSAdrian Chadd * 11n nodes; net80211 enforces that M_AMPDU is only 4304e57539afSAdrian Chadd * set for 11n nodes. 430500fc8705SAdrian Chadd */ 430600fc8705SAdrian Chadd if (ni->ni_flags & IEEE80211_NODE_HT) 430700fc8705SAdrian Chadd m->m_flags |= M_AMPDU; 430800fc8705SAdrian Chadd 430900fc8705SAdrian Chadd /* 4310b032f27cSSam Leffler * Sending station is known, dispatch directly. 4311b032f27cSSam Leffler */ 43125463c4a4SSam Leffler type = ieee80211_input(ni, m, rs->rs_rssi, nf); 4313b032f27cSSam Leffler ieee80211_free_node(ni); 4314b032f27cSSam Leffler /* 4315b032f27cSSam Leffler * Arrange to update the last rx timestamp only for 4316b032f27cSSam Leffler * frames from our ap when operating in station mode. 4317b032f27cSSam Leffler * This assumes the rx key is always setup when 4318b032f27cSSam Leffler * associated. 4319b032f27cSSam Leffler */ 4320b032f27cSSam Leffler if (ic->ic_opmode == IEEE80211_M_STA && 4321b032f27cSSam Leffler rs->rs_keyix != HAL_RXKEYIX_INVALID) 4322b032f27cSSam Leffler ngood++; 4323b032f27cSSam Leffler } else { 43245463c4a4SSam Leffler type = ieee80211_input_all(ic, m, rs->rs_rssi, nf); 4325b032f27cSSam Leffler } 4326c42a7b7eSSam Leffler /* 4327c42a7b7eSSam Leffler * Track rx rssi and do any rx antenna management. 4328de5af704SSam Leffler */ 432965f9edeeSSam Leffler ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi); 4330c42a7b7eSSam Leffler if (sc->sc_diversity) { 4331c42a7b7eSSam Leffler /* 4332c42a7b7eSSam Leffler * When using fast diversity, change the default rx 4333c42a7b7eSSam Leffler * antenna if diversity chooses the other antenna 3 4334c42a7b7eSSam Leffler * times in a row. 4335c42a7b7eSSam Leffler */ 433665f9edeeSSam Leffler if (sc->sc_defant != rs->rs_antenna) { 4337c42a7b7eSSam Leffler if (++sc->sc_rxotherant >= 3) 433865f9edeeSSam Leffler ath_setdefantenna(sc, rs->rs_antenna); 4339c42a7b7eSSam Leffler } else 4340c42a7b7eSSam Leffler sc->sc_rxotherant = 0; 4341c42a7b7eSSam Leffler } 4342235ab70eSAdrian Chadd 4343235ab70eSAdrian Chadd /* Newer school diversity - kite specific for now */ 4344235ab70eSAdrian Chadd /* XXX perhaps migrate the normal diversity code to this? */ 4345235ab70eSAdrian Chadd if ((ah)->ah_rxAntCombDiversity) 4346235ab70eSAdrian Chadd (*(ah)->ah_rxAntCombDiversity)(ah, rs, ticks, hz); 4347235ab70eSAdrian Chadd 43483e50ec2cSSam Leffler if (sc->sc_softled) { 43493e50ec2cSSam Leffler /* 43503e50ec2cSSam Leffler * Blink for any data frame. Otherwise do a 43513e50ec2cSSam Leffler * heartbeat-style blink when idle. The latter 43523e50ec2cSSam Leffler * is mainly for station mode where we depend on 43533e50ec2cSSam Leffler * periodic beacon frames to trigger the poll event. 43543e50ec2cSSam Leffler */ 435531640eb7SSam Leffler if (type == IEEE80211_FC0_TYPE_DATA) { 435646d4d74cSSam Leffler const HAL_RATE_TABLE *rt = sc->sc_currates; 435746d4d74cSSam Leffler ath_led_event(sc, 435846d4d74cSSam Leffler rt->rateCodeToIndex[rs->rs_rate]); 43593e50ec2cSSam Leffler } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle) 436046d4d74cSSam Leffler ath_led_event(sc, 0); 43613e50ec2cSSam Leffler } 43625591b213SSam Leffler rx_next: 43636b349e5aSAdrian Chadd TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 43645591b213SSam Leffler } while (ath_rxbuf_init(sc, bf) == 0); 43655591b213SSam Leffler 4366c42a7b7eSSam Leffler /* rx signal state monitoring */ 436759efa8b5SSam Leffler ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); 4368d7736e13SSam Leffler if (ngood) 4369d7736e13SSam Leffler sc->sc_lastrx = tsf; 4370b5f4adb3SSam Leffler 4371f52d3452SAdrian Chadd CTR2(ATH_KTR_INTR, "ath_rx_proc: npkts=%d, ngood=%d", npkts, ngood); 437248237774SAdrian Chadd /* Queue DFS tasklet if needed */ 437396ff485dSAdrian Chadd if (resched && ath_dfs_tasklet_needed(sc, sc->sc_curchan)) 437448237774SAdrian Chadd taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); 437548237774SAdrian Chadd 43761fdadc0fSAdrian Chadd /* 43771fdadc0fSAdrian Chadd * Now that all the RX frames were handled that 43781fdadc0fSAdrian Chadd * need to be handled, kick the PCU if there's 43791fdadc0fSAdrian Chadd * been an RXEOL condition. 43801fdadc0fSAdrian Chadd */ 4381ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 438296ff485dSAdrian Chadd if (resched && sc->sc_kickpcu) { 4383f52d3452SAdrian Chadd CTR0(ATH_KTR_ERR, "ath_rx_proc: kickpcu"); 43848f939e79SAdrian Chadd device_printf(sc->sc_dev, "%s: kickpcu; handled %d packets\n", 43858f939e79SAdrian Chadd __func__, npkts); 43868f939e79SAdrian Chadd 43878f939e79SAdrian Chadd /* XXX rxslink? */ 4388ef27340cSAdrian Chadd /* 4389ef27340cSAdrian Chadd * XXX can we hold the PCU lock here? 4390ef27340cSAdrian Chadd * Are there any net80211 buffer calls involved? 4391ef27340cSAdrian Chadd */ 43928f939e79SAdrian Chadd bf = TAILQ_FIRST(&sc->sc_rxbuf); 43938f939e79SAdrian Chadd ath_hal_putrxbuf(ah, bf->bf_daddr); 43948f939e79SAdrian Chadd ath_hal_rxena(ah); /* enable recv descriptors */ 43958f939e79SAdrian Chadd ath_mode_init(sc); /* set filters, etc. */ 43968f939e79SAdrian Chadd ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 43978f939e79SAdrian Chadd 43981fdadc0fSAdrian Chadd ath_hal_intrset(ah, sc->sc_imask); 43998f939e79SAdrian Chadd sc->sc_kickpcu = 0; 44001fdadc0fSAdrian Chadd } 4401ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 44021fdadc0fSAdrian Chadd 4403ef27340cSAdrian Chadd /* XXX check this inside of IF_LOCK? */ 440496ff485dSAdrian Chadd if (resched && (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 4405339ccfb3SSam Leffler #ifdef IEEE80211_SUPPORT_SUPERG 440604f19fd6SSam Leffler ieee80211_ff_age_all(ic, 100); 4407339ccfb3SSam Leffler #endif 4408339ccfb3SSam Leffler if (!IFQ_IS_EMPTY(&ifp->if_snd)) 4409cd196bb2SSam Leffler ath_start(ifp); 4410339ccfb3SSam Leffler } 44118cec0ab9SSam Leffler #undef PA2DESC 4412ef27340cSAdrian Chadd 4413ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 4414ef27340cSAdrian Chadd sc->sc_rxproc_cnt--; 4415ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 44165591b213SSam Leffler } 44175591b213SSam Leffler 4418622b3fd2SSam Leffler static void 4419622b3fd2SSam Leffler ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 4420622b3fd2SSam Leffler { 4421622b3fd2SSam Leffler txq->axq_qnum = qnum; 4422339ccfb3SSam Leffler txq->axq_ac = 0; 4423622b3fd2SSam Leffler txq->axq_depth = 0; 442416d4de92SAdrian Chadd txq->axq_aggr_depth = 0; 4425622b3fd2SSam Leffler txq->axq_intrcnt = 0; 4426622b3fd2SSam Leffler txq->axq_link = NULL; 44276b349e5aSAdrian Chadd txq->axq_softc = sc; 44286b349e5aSAdrian Chadd TAILQ_INIT(&txq->axq_q); 44296b349e5aSAdrian Chadd TAILQ_INIT(&txq->axq_tidq); 4430622b3fd2SSam Leffler ATH_TXQ_LOCK_INIT(sc, txq); 4431622b3fd2SSam Leffler } 4432622b3fd2SSam Leffler 44335591b213SSam Leffler /* 4434c42a7b7eSSam Leffler * Setup a h/w transmit queue. 44355591b213SSam Leffler */ 4436c42a7b7eSSam Leffler static struct ath_txq * 4437c42a7b7eSSam Leffler ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 4438c42a7b7eSSam Leffler { 4439c42a7b7eSSam Leffler #define N(a) (sizeof(a)/sizeof(a[0])) 4440c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 4441c42a7b7eSSam Leffler HAL_TXQ_INFO qi; 4442c42a7b7eSSam Leffler int qnum; 4443c42a7b7eSSam Leffler 4444c42a7b7eSSam Leffler memset(&qi, 0, sizeof(qi)); 4445c42a7b7eSSam Leffler qi.tqi_subtype = subtype; 4446c42a7b7eSSam Leffler qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 4447c42a7b7eSSam Leffler qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 4448c42a7b7eSSam Leffler qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 4449c42a7b7eSSam Leffler /* 4450c42a7b7eSSam Leffler * Enable interrupts only for EOL and DESC conditions. 4451c42a7b7eSSam Leffler * We mark tx descriptors to receive a DESC interrupt 4452c42a7b7eSSam Leffler * when a tx queue gets deep; otherwise waiting for the 4453c42a7b7eSSam Leffler * EOL to reap descriptors. Note that this is done to 4454c42a7b7eSSam Leffler * reduce interrupt load and this only defers reaping 4455c42a7b7eSSam Leffler * descriptors, never transmitting frames. Aside from 4456c42a7b7eSSam Leffler * reducing interrupts this also permits more concurrency. 4457c42a7b7eSSam Leffler * The only potential downside is if the tx queue backs 4458c42a7b7eSSam Leffler * up in which case the top half of the kernel may backup 4459c42a7b7eSSam Leffler * due to a lack of tx descriptors. 4460c42a7b7eSSam Leffler */ 4461bd5a9920SSam Leffler qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE; 4462c42a7b7eSSam Leffler qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 4463c42a7b7eSSam Leffler if (qnum == -1) { 4464c42a7b7eSSam Leffler /* 4465c42a7b7eSSam Leffler * NB: don't print a message, this happens 4466a614e076SSam Leffler * normally on parts with too few tx queues 4467c42a7b7eSSam Leffler */ 4468c42a7b7eSSam Leffler return NULL; 4469c42a7b7eSSam Leffler } 4470c42a7b7eSSam Leffler if (qnum >= N(sc->sc_txq)) { 44716891c875SPeter Wemm device_printf(sc->sc_dev, 44726891c875SPeter Wemm "hal qnum %u out of range, max %zu!\n", 4473c42a7b7eSSam Leffler qnum, N(sc->sc_txq)); 4474c42a7b7eSSam Leffler ath_hal_releasetxqueue(ah, qnum); 4475c42a7b7eSSam Leffler return NULL; 4476c42a7b7eSSam Leffler } 4477c42a7b7eSSam Leffler if (!ATH_TXQ_SETUP(sc, qnum)) { 4478622b3fd2SSam Leffler ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 4479c42a7b7eSSam Leffler sc->sc_txqsetup |= 1<<qnum; 4480c42a7b7eSSam Leffler } 4481c42a7b7eSSam Leffler return &sc->sc_txq[qnum]; 4482c42a7b7eSSam Leffler #undef N 4483c42a7b7eSSam Leffler } 4484c42a7b7eSSam Leffler 4485c42a7b7eSSam Leffler /* 4486c42a7b7eSSam Leffler * Setup a hardware data transmit queue for the specified 4487c42a7b7eSSam Leffler * access control. The hal may not support all requested 4488c42a7b7eSSam Leffler * queues in which case it will return a reference to a 4489c42a7b7eSSam Leffler * previously setup queue. We record the mapping from ac's 4490c42a7b7eSSam Leffler * to h/w queues for use by ath_tx_start and also track 4491c42a7b7eSSam Leffler * the set of h/w queues being used to optimize work in the 4492c42a7b7eSSam Leffler * transmit interrupt handler and related routines. 4493c42a7b7eSSam Leffler */ 4494c42a7b7eSSam Leffler static int 4495c42a7b7eSSam Leffler ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 4496c42a7b7eSSam Leffler { 4497c42a7b7eSSam Leffler #define N(a) (sizeof(a)/sizeof(a[0])) 4498c42a7b7eSSam Leffler struct ath_txq *txq; 4499c42a7b7eSSam Leffler 4500c42a7b7eSSam Leffler if (ac >= N(sc->sc_ac2q)) { 45016891c875SPeter Wemm device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 4502c42a7b7eSSam Leffler ac, N(sc->sc_ac2q)); 4503c42a7b7eSSam Leffler return 0; 4504c42a7b7eSSam Leffler } 4505c42a7b7eSSam Leffler txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 4506c42a7b7eSSam Leffler if (txq != NULL) { 4507339ccfb3SSam Leffler txq->axq_ac = ac; 4508c42a7b7eSSam Leffler sc->sc_ac2q[ac] = txq; 4509c42a7b7eSSam Leffler return 1; 4510c42a7b7eSSam Leffler } else 4511c42a7b7eSSam Leffler return 0; 4512c42a7b7eSSam Leffler #undef N 4513c42a7b7eSSam Leffler } 4514c42a7b7eSSam Leffler 4515c42a7b7eSSam Leffler /* 4516c42a7b7eSSam Leffler * Update WME parameters for a transmit queue. 4517c42a7b7eSSam Leffler */ 4518c42a7b7eSSam Leffler static int 4519c42a7b7eSSam Leffler ath_txq_update(struct ath_softc *sc, int ac) 4520c42a7b7eSSam Leffler { 4521c42a7b7eSSam Leffler #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 4522c42a7b7eSSam Leffler #define ATH_TXOP_TO_US(v) (v<<5) 4523b032f27cSSam Leffler struct ifnet *ifp = sc->sc_ifp; 4524b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 4525c42a7b7eSSam Leffler struct ath_txq *txq = sc->sc_ac2q[ac]; 4526c42a7b7eSSam Leffler struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 4527c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 4528c42a7b7eSSam Leffler HAL_TXQ_INFO qi; 4529c42a7b7eSSam Leffler 4530c42a7b7eSSam Leffler ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 4531584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 453210ad9a77SSam Leffler if (sc->sc_tdma) { 453310ad9a77SSam Leffler /* 453410ad9a77SSam Leffler * AIFS is zero so there's no pre-transmit wait. The 453510ad9a77SSam Leffler * burst time defines the slot duration and is configured 453609be6601SSam Leffler * through net80211. The QCU is setup to not do post-xmit 453710ad9a77SSam Leffler * back off, lockout all lower-priority QCU's, and fire 453810ad9a77SSam Leffler * off the DMA beacon alert timer which is setup based 453910ad9a77SSam Leffler * on the slot configuration. 454010ad9a77SSam Leffler */ 454110ad9a77SSam Leffler qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 454210ad9a77SSam Leffler | HAL_TXQ_TXERRINT_ENABLE 454310ad9a77SSam Leffler | HAL_TXQ_TXURNINT_ENABLE 454410ad9a77SSam Leffler | HAL_TXQ_TXEOLINT_ENABLE 454510ad9a77SSam Leffler | HAL_TXQ_DBA_GATED 454610ad9a77SSam Leffler | HAL_TXQ_BACKOFF_DISABLE 454710ad9a77SSam Leffler | HAL_TXQ_ARB_LOCKOUT_GLOBAL 454810ad9a77SSam Leffler ; 454910ad9a77SSam Leffler qi.tqi_aifs = 0; 455010ad9a77SSam Leffler /* XXX +dbaprep? */ 455110ad9a77SSam Leffler qi.tqi_readyTime = sc->sc_tdmaslotlen; 455210ad9a77SSam Leffler qi.tqi_burstTime = qi.tqi_readyTime; 455310ad9a77SSam Leffler } else { 455410ad9a77SSam Leffler #endif 455516d4de92SAdrian Chadd /* 455616d4de92SAdrian Chadd * XXX shouldn't this just use the default flags 455716d4de92SAdrian Chadd * used in the previous queue setup? 455816d4de92SAdrian Chadd */ 455910ad9a77SSam Leffler qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 456010ad9a77SSam Leffler | HAL_TXQ_TXERRINT_ENABLE 456110ad9a77SSam Leffler | HAL_TXQ_TXDESCINT_ENABLE 456210ad9a77SSam Leffler | HAL_TXQ_TXURNINT_ENABLE 45631f25c0f7SAdrian Chadd | HAL_TXQ_TXEOLINT_ENABLE 456410ad9a77SSam Leffler ; 4565c42a7b7eSSam Leffler qi.tqi_aifs = wmep->wmep_aifsn; 4566c42a7b7eSSam Leffler qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 4567c42a7b7eSSam Leffler qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 456810ad9a77SSam Leffler qi.tqi_readyTime = 0; 4569c42a7b7eSSam Leffler qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 4570584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 457110ad9a77SSam Leffler } 457210ad9a77SSam Leffler #endif 457310ad9a77SSam Leffler 457410ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, 457510ad9a77SSam Leffler "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", 457610ad9a77SSam Leffler __func__, txq->axq_qnum, qi.tqi_qflags, 457710ad9a77SSam Leffler qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); 4578c42a7b7eSSam Leffler 4579c42a7b7eSSam Leffler if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 4580b032f27cSSam Leffler if_printf(ifp, "unable to update hardware queue " 4581c42a7b7eSSam Leffler "parameters for %s traffic!\n", 4582c42a7b7eSSam Leffler ieee80211_wme_acnames[ac]); 4583c42a7b7eSSam Leffler return 0; 4584c42a7b7eSSam Leffler } else { 4585c42a7b7eSSam Leffler ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 4586c42a7b7eSSam Leffler return 1; 4587c42a7b7eSSam Leffler } 4588c42a7b7eSSam Leffler #undef ATH_TXOP_TO_US 4589c42a7b7eSSam Leffler #undef ATH_EXPONENT_TO_VALUE 4590c42a7b7eSSam Leffler } 4591c42a7b7eSSam Leffler 4592c42a7b7eSSam Leffler /* 4593c42a7b7eSSam Leffler * Callback from the 802.11 layer to update WME parameters. 4594c42a7b7eSSam Leffler */ 4595c42a7b7eSSam Leffler static int 4596c42a7b7eSSam Leffler ath_wme_update(struct ieee80211com *ic) 4597c42a7b7eSSam Leffler { 4598c42a7b7eSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 4599c42a7b7eSSam Leffler 4600c42a7b7eSSam Leffler return !ath_txq_update(sc, WME_AC_BE) || 4601c42a7b7eSSam Leffler !ath_txq_update(sc, WME_AC_BK) || 4602c42a7b7eSSam Leffler !ath_txq_update(sc, WME_AC_VI) || 4603c42a7b7eSSam Leffler !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 4604c42a7b7eSSam Leffler } 4605c42a7b7eSSam Leffler 4606c42a7b7eSSam Leffler /* 4607c42a7b7eSSam Leffler * Reclaim resources for a setup queue. 4608c42a7b7eSSam Leffler */ 4609c42a7b7eSSam Leffler static void 4610c42a7b7eSSam Leffler ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 4611c42a7b7eSSam Leffler { 4612c42a7b7eSSam Leffler 4613c42a7b7eSSam Leffler ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 4614c42a7b7eSSam Leffler ATH_TXQ_LOCK_DESTROY(txq); 4615c42a7b7eSSam Leffler sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 4616c42a7b7eSSam Leffler } 4617c42a7b7eSSam Leffler 4618c42a7b7eSSam Leffler /* 4619c42a7b7eSSam Leffler * Reclaim all tx queue resources. 4620c42a7b7eSSam Leffler */ 4621c42a7b7eSSam Leffler static void 4622c42a7b7eSSam Leffler ath_tx_cleanup(struct ath_softc *sc) 4623c42a7b7eSSam Leffler { 4624c42a7b7eSSam Leffler int i; 4625c42a7b7eSSam Leffler 4626c42a7b7eSSam Leffler ATH_TXBUF_LOCK_DESTROY(sc); 4627c42a7b7eSSam Leffler for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4628c42a7b7eSSam Leffler if (ATH_TXQ_SETUP(sc, i)) 4629c42a7b7eSSam Leffler ath_tx_cleanupq(sc, &sc->sc_txq[i]); 4630c42a7b7eSSam Leffler } 46315591b213SSam Leffler 463299d258fdSSam Leffler /* 4633ab06fdf2SSam Leffler * Return h/w rate index for an IEEE rate (w/o basic rate bit) 4634ab06fdf2SSam Leffler * using the current rates in sc_rixmap. 46358b5341deSSam Leffler */ 4636b8e788a5SAdrian Chadd int 4637ab06fdf2SSam Leffler ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) 46388b5341deSSam Leffler { 4639ab06fdf2SSam Leffler int rix = sc->sc_rixmap[rate]; 4640ab06fdf2SSam Leffler /* NB: return lowest rix for invalid rate */ 4641ab06fdf2SSam Leffler return (rix == 0xff ? 0 : rix); 46428b5341deSSam Leffler } 46438b5341deSSam Leffler 46449352fb7aSAdrian Chadd static void 46459352fb7aSAdrian Chadd ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts, 46469352fb7aSAdrian Chadd struct ath_buf *bf) 46479352fb7aSAdrian Chadd { 46489352fb7aSAdrian Chadd struct ieee80211_node *ni = bf->bf_node; 46499352fb7aSAdrian Chadd struct ifnet *ifp = sc->sc_ifp; 46509352fb7aSAdrian Chadd struct ieee80211com *ic = ifp->if_l2com; 46519352fb7aSAdrian Chadd int sr, lr, pri; 46529352fb7aSAdrian Chadd 46539352fb7aSAdrian Chadd if (ts->ts_status == 0) { 46549352fb7aSAdrian Chadd u_int8_t txant = ts->ts_antenna; 46559352fb7aSAdrian Chadd sc->sc_stats.ast_ant_tx[txant]++; 46569352fb7aSAdrian Chadd sc->sc_ant_tx[txant]++; 46579352fb7aSAdrian Chadd if (ts->ts_finaltsi != 0) 46589352fb7aSAdrian Chadd sc->sc_stats.ast_tx_altrate++; 46599352fb7aSAdrian Chadd pri = M_WME_GETAC(bf->bf_m); 46609352fb7aSAdrian Chadd if (pri >= WME_AC_VO) 46619352fb7aSAdrian Chadd ic->ic_wme.wme_hipri_traffic++; 46629352fb7aSAdrian Chadd if ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0) 46639352fb7aSAdrian Chadd ni->ni_inact = ni->ni_inact_reload; 46649352fb7aSAdrian Chadd } else { 46659352fb7aSAdrian Chadd if (ts->ts_status & HAL_TXERR_XRETRY) 46669352fb7aSAdrian Chadd sc->sc_stats.ast_tx_xretries++; 46679352fb7aSAdrian Chadd if (ts->ts_status & HAL_TXERR_FIFO) 46689352fb7aSAdrian Chadd sc->sc_stats.ast_tx_fifoerr++; 46699352fb7aSAdrian Chadd if (ts->ts_status & HAL_TXERR_FILT) 46709352fb7aSAdrian Chadd sc->sc_stats.ast_tx_filtered++; 46719352fb7aSAdrian Chadd if (ts->ts_status & HAL_TXERR_XTXOP) 46729352fb7aSAdrian Chadd sc->sc_stats.ast_tx_xtxop++; 46739352fb7aSAdrian Chadd if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) 46749352fb7aSAdrian Chadd sc->sc_stats.ast_tx_timerexpired++; 46759352fb7aSAdrian Chadd 46769352fb7aSAdrian Chadd if (ts->ts_status & HAL_TX_DATA_UNDERRUN) 46779352fb7aSAdrian Chadd sc->sc_stats.ast_tx_data_underrun++; 46789352fb7aSAdrian Chadd if (ts->ts_status & HAL_TX_DELIM_UNDERRUN) 46799352fb7aSAdrian Chadd sc->sc_stats.ast_tx_delim_underrun++; 46809352fb7aSAdrian Chadd 46819352fb7aSAdrian Chadd if (bf->bf_m->m_flags & M_FF) 46829352fb7aSAdrian Chadd sc->sc_stats.ast_ff_txerr++; 46839352fb7aSAdrian Chadd } 46849352fb7aSAdrian Chadd /* XXX when is this valid? */ 46859352fb7aSAdrian Chadd if (ts->ts_status & HAL_TX_DESC_CFG_ERR) 46869352fb7aSAdrian Chadd sc->sc_stats.ast_tx_desccfgerr++; 46879352fb7aSAdrian Chadd 46889352fb7aSAdrian Chadd sr = ts->ts_shortretry; 46899352fb7aSAdrian Chadd lr = ts->ts_longretry; 46909352fb7aSAdrian Chadd sc->sc_stats.ast_tx_shortretry += sr; 46919352fb7aSAdrian Chadd sc->sc_stats.ast_tx_longretry += lr; 46929352fb7aSAdrian Chadd 46939352fb7aSAdrian Chadd } 46949352fb7aSAdrian Chadd 46959352fb7aSAdrian Chadd /* 46969352fb7aSAdrian Chadd * The default completion. If fail is 1, this means 46979352fb7aSAdrian Chadd * "please don't retry the frame, and just return -1 status 46989352fb7aSAdrian Chadd * to the net80211 stack. 46999352fb7aSAdrian Chadd */ 47009352fb7aSAdrian Chadd void 47019352fb7aSAdrian Chadd ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 47029352fb7aSAdrian Chadd { 47039352fb7aSAdrian Chadd struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 47049352fb7aSAdrian Chadd int st; 47059352fb7aSAdrian Chadd 47069352fb7aSAdrian Chadd if (fail == 1) 47079352fb7aSAdrian Chadd st = -1; 47089352fb7aSAdrian Chadd else 47099352fb7aSAdrian Chadd st = ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0) ? 47109352fb7aSAdrian Chadd ts->ts_status : HAL_TXERR_XRETRY; 47119352fb7aSAdrian Chadd 47129352fb7aSAdrian Chadd if (bf->bf_state.bfs_dobaw) 47139352fb7aSAdrian Chadd device_printf(sc->sc_dev, 47149352fb7aSAdrian Chadd "%s: dobaw should've been cleared!\n", __func__); 47159352fb7aSAdrian Chadd if (bf->bf_next != NULL) 47169352fb7aSAdrian Chadd device_printf(sc->sc_dev, 47179352fb7aSAdrian Chadd "%s: bf_next not NULL!\n", __func__); 47189352fb7aSAdrian Chadd 47199352fb7aSAdrian Chadd /* 47209352fb7aSAdrian Chadd * Do any tx complete callback. Note this must 47219352fb7aSAdrian Chadd * be done before releasing the node reference. 47229352fb7aSAdrian Chadd * This will free the mbuf, release the net80211 47239352fb7aSAdrian Chadd * node and recycle the ath_buf. 47249352fb7aSAdrian Chadd */ 47259352fb7aSAdrian Chadd ath_tx_freebuf(sc, bf, st); 47269352fb7aSAdrian Chadd } 47279352fb7aSAdrian Chadd 47289352fb7aSAdrian Chadd /* 4729eb6f0de0SAdrian Chadd * Update rate control with the given completion status. 4730eb6f0de0SAdrian Chadd */ 4731eb6f0de0SAdrian Chadd void 4732eb6f0de0SAdrian Chadd ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 4733eb6f0de0SAdrian Chadd struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, 4734eb6f0de0SAdrian Chadd int nframes, int nbad) 4735eb6f0de0SAdrian Chadd { 4736eb6f0de0SAdrian Chadd struct ath_node *an; 4737eb6f0de0SAdrian Chadd 4738eb6f0de0SAdrian Chadd /* Only for unicast frames */ 4739eb6f0de0SAdrian Chadd if (ni == NULL) 4740eb6f0de0SAdrian Chadd return; 4741eb6f0de0SAdrian Chadd 4742eb6f0de0SAdrian Chadd an = ATH_NODE(ni); 4743eb6f0de0SAdrian Chadd 4744eb6f0de0SAdrian Chadd if ((ts->ts_status & HAL_TXERR_FILT) == 0) { 4745eb6f0de0SAdrian Chadd ATH_NODE_LOCK(an); 4746eb6f0de0SAdrian Chadd ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad); 4747eb6f0de0SAdrian Chadd ATH_NODE_UNLOCK(an); 4748eb6f0de0SAdrian Chadd } 4749eb6f0de0SAdrian Chadd } 4750eb6f0de0SAdrian Chadd 4751eb6f0de0SAdrian Chadd /* 47529352fb7aSAdrian Chadd * Update the busy status of the last frame on the free list. 47539352fb7aSAdrian Chadd * When doing TDMA, the busy flag tracks whether the hardware 47549352fb7aSAdrian Chadd * currently points to this buffer or not, and thus gated DMA 47559352fb7aSAdrian Chadd * may restart by re-reading the last descriptor in this 47569352fb7aSAdrian Chadd * buffer. 47579352fb7aSAdrian Chadd * 47589352fb7aSAdrian Chadd * This should be called in the completion function once one 47599352fb7aSAdrian Chadd * of the buffers has been used. 47609352fb7aSAdrian Chadd */ 47619352fb7aSAdrian Chadd static void 47629352fb7aSAdrian Chadd ath_tx_update_busy(struct ath_softc *sc) 47639352fb7aSAdrian Chadd { 47649352fb7aSAdrian Chadd struct ath_buf *last; 47659352fb7aSAdrian Chadd 47669352fb7aSAdrian Chadd /* 47679352fb7aSAdrian Chadd * Since the last frame may still be marked 47689352fb7aSAdrian Chadd * as ATH_BUF_BUSY, unmark it here before 47699352fb7aSAdrian Chadd * finishing the frame processing. 47709352fb7aSAdrian Chadd * Since we've completed a frame (aggregate 47719352fb7aSAdrian Chadd * or otherwise), the hardware has moved on 47729352fb7aSAdrian Chadd * and is no longer referencing the previous 47739352fb7aSAdrian Chadd * descriptor. 47749352fb7aSAdrian Chadd */ 47759352fb7aSAdrian Chadd ATH_TXBUF_LOCK_ASSERT(sc); 47769352fb7aSAdrian Chadd last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 47779352fb7aSAdrian Chadd if (last != NULL) 47789352fb7aSAdrian Chadd last->bf_flags &= ~ATH_BUF_BUSY; 47799352fb7aSAdrian Chadd } 47809352fb7aSAdrian Chadd 47819352fb7aSAdrian Chadd 478268e8e04eSSam Leffler /* 4783c42a7b7eSSam Leffler * Process completed xmit descriptors from the specified queue. 4784eb6f0de0SAdrian Chadd * Kick the packet scheduler if needed. This can occur from this 4785eb6f0de0SAdrian Chadd * particular task. 4786c42a7b7eSSam Leffler */ 4787d7736e13SSam Leffler static int 478896ff485dSAdrian Chadd ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) 47895591b213SSam Leffler { 47905591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 47919352fb7aSAdrian Chadd struct ath_buf *bf; 47926edf1dc7SAdrian Chadd struct ath_desc *ds; 479365f9edeeSSam Leffler struct ath_tx_status *ts; 47945591b213SSam Leffler struct ieee80211_node *ni; 4795eb6f0de0SAdrian Chadd struct ath_node *an; 47969352fb7aSAdrian Chadd int nacked; 47975591b213SSam Leffler HAL_STATUS status; 47985591b213SSam Leffler 4799c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 4800c42a7b7eSSam Leffler __func__, txq->axq_qnum, 4801c42a7b7eSSam Leffler (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 4802c42a7b7eSSam Leffler txq->axq_link); 4803d7736e13SSam Leffler nacked = 0; 48045591b213SSam Leffler for (;;) { 4805c42a7b7eSSam Leffler ATH_TXQ_LOCK(txq); 4806c42a7b7eSSam Leffler txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 48076b349e5aSAdrian Chadd bf = TAILQ_FIRST(&txq->axq_q); 48085591b213SSam Leffler if (bf == NULL) { 4809c42a7b7eSSam Leffler ATH_TXQ_UNLOCK(txq); 48105591b213SSam Leffler break; 48115591b213SSam Leffler } 48126edf1dc7SAdrian Chadd ds = bf->bf_lastds; /* XXX must be setup correctly! */ 481365f9edeeSSam Leffler ts = &bf->bf_status.ds_txstat; 481465f9edeeSSam Leffler status = ath_hal_txprocdesc(ah, ds, ts); 4815a585a9a1SSam Leffler #ifdef ATH_DEBUG 4816c42a7b7eSSam Leffler if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 48176902009eSSam Leffler ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 48186902009eSSam Leffler status == HAL_OK); 48195591b213SSam Leffler #endif 48205591b213SSam Leffler if (status == HAL_EINPROGRESS) { 4821c42a7b7eSSam Leffler ATH_TXQ_UNLOCK(txq); 48225591b213SSam Leffler break; 48235591b213SSam Leffler } 48246b349e5aSAdrian Chadd ATH_TXQ_REMOVE(txq, bf, bf_list); 4825584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 482610ad9a77SSam Leffler if (txq->axq_depth > 0) { 482710ad9a77SSam Leffler /* 482810ad9a77SSam Leffler * More frames follow. Mark the buffer busy 482910ad9a77SSam Leffler * so it's not re-used while the hardware may 483010ad9a77SSam Leffler * still re-read the link field in the descriptor. 48316edf1dc7SAdrian Chadd * 48326edf1dc7SAdrian Chadd * Use the last buffer in an aggregate as that 48336edf1dc7SAdrian Chadd * is where the hardware may be - intermediate 48346edf1dc7SAdrian Chadd * descriptors won't be "busy". 483510ad9a77SSam Leffler */ 48366edf1dc7SAdrian Chadd bf->bf_last->bf_flags |= ATH_BUF_BUSY; 483710ad9a77SSam Leffler } else 483810ad9a77SSam Leffler #else 4839ebecf802SSam Leffler if (txq->axq_depth == 0) 484010ad9a77SSam Leffler #endif 48411539af1eSSam Leffler txq->axq_link = NULL; 48426edf1dc7SAdrian Chadd if (bf->bf_state.bfs_aggr) 48436edf1dc7SAdrian Chadd txq->axq_aggr_depth--; 48445591b213SSam Leffler 48455591b213SSam Leffler ni = bf->bf_node; 4846c42a7b7eSSam Leffler /* 48479352fb7aSAdrian Chadd * If unicast frame was ack'd update RSSI, 484884784be1SSam Leffler * including the last rx time used to 484984784be1SSam Leffler * workaround phantom bmiss interrupts. 4850d7736e13SSam Leffler */ 48519352fb7aSAdrian Chadd if (ni != NULL && ts->ts_status == 0 && 48529352fb7aSAdrian Chadd ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0)) { 4853d7736e13SSam Leffler nacked++; 485484784be1SSam Leffler sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 485584784be1SSam Leffler ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 485684784be1SSam Leffler ts->ts_rssi); 485784784be1SSam Leffler } 48589352fb7aSAdrian Chadd ATH_TXQ_UNLOCK(txq); 48599352fb7aSAdrian Chadd 48609352fb7aSAdrian Chadd /* If unicast frame, update general statistics */ 48619352fb7aSAdrian Chadd if (ni != NULL) { 4862eb6f0de0SAdrian Chadd an = ATH_NODE(ni); 48639352fb7aSAdrian Chadd /* update statistics */ 48649352fb7aSAdrian Chadd ath_tx_update_stats(sc, ts, bf); 4865d7736e13SSam Leffler } 48669352fb7aSAdrian Chadd 48670a915fadSSam Leffler /* 48689352fb7aSAdrian Chadd * Call the completion handler. 48699352fb7aSAdrian Chadd * The completion handler is responsible for 48709352fb7aSAdrian Chadd * calling the rate control code. 48719352fb7aSAdrian Chadd * 48729352fb7aSAdrian Chadd * Frames with no completion handler get the 48739352fb7aSAdrian Chadd * rate control code called here. 487468e8e04eSSam Leffler */ 48759352fb7aSAdrian Chadd if (bf->bf_comp == NULL) { 48769352fb7aSAdrian Chadd if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 48779352fb7aSAdrian Chadd (bf->bf_txflags & HAL_TXDESC_NOACK) == 0) { 48789352fb7aSAdrian Chadd /* 48799352fb7aSAdrian Chadd * XXX assume this isn't an aggregate 48809352fb7aSAdrian Chadd * frame. 48819352fb7aSAdrian Chadd */ 4882eb6f0de0SAdrian Chadd ath_tx_update_ratectrl(sc, ni, 4883eb6f0de0SAdrian Chadd bf->bf_state.bfs_rc, ts, 4884eb6f0de0SAdrian Chadd bf->bf_state.bfs_pktlen, 1, 4885eb6f0de0SAdrian Chadd (ts->ts_status == 0 ? 0 : 1)); 48865591b213SSam Leffler } 48879352fb7aSAdrian Chadd ath_tx_default_comp(sc, bf, 0); 48889352fb7aSAdrian Chadd } else 48899352fb7aSAdrian Chadd bf->bf_comp(sc, bf, 0); 48905591b213SSam Leffler } 4891339ccfb3SSam Leffler #ifdef IEEE80211_SUPPORT_SUPERG 489268e8e04eSSam Leffler /* 489368e8e04eSSam Leffler * Flush fast-frame staging queue when traffic slows. 489468e8e04eSSam Leffler */ 489568e8e04eSSam Leffler if (txq->axq_depth <= 1) 489604f19fd6SSam Leffler ieee80211_ff_flush(ic, txq->axq_ac); 4897339ccfb3SSam Leffler #endif 4898eb6f0de0SAdrian Chadd 4899eb6f0de0SAdrian Chadd /* Kick the TXQ scheduler */ 4900eb6f0de0SAdrian Chadd if (dosched) { 4901eb6f0de0SAdrian Chadd ATH_TXQ_LOCK(txq); 4902eb6f0de0SAdrian Chadd ath_txq_sched(sc, txq); 4903eb6f0de0SAdrian Chadd ATH_TXQ_UNLOCK(txq); 4904eb6f0de0SAdrian Chadd } 4905eb6f0de0SAdrian Chadd 4906d7736e13SSam Leffler return nacked; 4907d7736e13SSam Leffler } 4908d7736e13SSam Leffler 49098f939e79SAdrian Chadd #define TXQACTIVE(t, q) ( (t) & (1 << (q))) 4910c42a7b7eSSam Leffler 4911c42a7b7eSSam Leffler /* 4912c42a7b7eSSam Leffler * Deferred processing of transmit interrupt; special-cased 4913c42a7b7eSSam Leffler * for a single hardware transmit queue (e.g. 5210 and 5211). 4914c42a7b7eSSam Leffler */ 4915c42a7b7eSSam Leffler static void 4916c42a7b7eSSam Leffler ath_tx_proc_q0(void *arg, int npending) 4917c42a7b7eSSam Leffler { 4918c42a7b7eSSam Leffler struct ath_softc *sc = arg; 4919fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 49208f939e79SAdrian Chadd uint32_t txqs; 4921c42a7b7eSSam Leffler 4922ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 4923ef27340cSAdrian Chadd sc->sc_txproc_cnt++; 49248f939e79SAdrian Chadd txqs = sc->sc_txq_active; 49258f939e79SAdrian Chadd sc->sc_txq_active &= ~txqs; 4926ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 49278f939e79SAdrian Chadd 492896ff485dSAdrian Chadd if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) 49298f939e79SAdrian Chadd /* XXX why is lastrx updated in tx code? */ 4930d7736e13SSam Leffler sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 49318f939e79SAdrian Chadd if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 493296ff485dSAdrian Chadd ath_tx_processq(sc, sc->sc_cabq, 1); 4933ef27340cSAdrian Chadd /* XXX check this inside of IF_LOCK? */ 493413f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 49352e986da5SSam Leffler sc->sc_wd_timer = 0; 49365591b213SSam Leffler 49373e50ec2cSSam Leffler if (sc->sc_softled) 493846d4d74cSSam Leffler ath_led_event(sc, sc->sc_txrix); 49393e50ec2cSSam Leffler 4940ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 4941ef27340cSAdrian Chadd sc->sc_txproc_cnt--; 4942ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 4943ef27340cSAdrian Chadd 49445591b213SSam Leffler ath_start(ifp); 49455591b213SSam Leffler } 49465591b213SSam Leffler 49475591b213SSam Leffler /* 4948c42a7b7eSSam Leffler * Deferred processing of transmit interrupt; special-cased 4949c42a7b7eSSam Leffler * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 49505591b213SSam Leffler */ 49515591b213SSam Leffler static void 4952c42a7b7eSSam Leffler ath_tx_proc_q0123(void *arg, int npending) 4953c42a7b7eSSam Leffler { 4954c42a7b7eSSam Leffler struct ath_softc *sc = arg; 4955fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 4956d7736e13SSam Leffler int nacked; 49578f939e79SAdrian Chadd uint32_t txqs; 49588f939e79SAdrian Chadd 4959ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 4960ef27340cSAdrian Chadd sc->sc_txproc_cnt++; 49618f939e79SAdrian Chadd txqs = sc->sc_txq_active; 49628f939e79SAdrian Chadd sc->sc_txq_active &= ~txqs; 4963ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 4964c42a7b7eSSam Leffler 4965c42a7b7eSSam Leffler /* 4966c42a7b7eSSam Leffler * Process each active queue. 4967c42a7b7eSSam Leffler */ 4968d7736e13SSam Leffler nacked = 0; 49698f939e79SAdrian Chadd if (TXQACTIVE(txqs, 0)) 497096ff485dSAdrian Chadd nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); 49718f939e79SAdrian Chadd if (TXQACTIVE(txqs, 1)) 497296ff485dSAdrian Chadd nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); 49738f939e79SAdrian Chadd if (TXQACTIVE(txqs, 2)) 497496ff485dSAdrian Chadd nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); 49758f939e79SAdrian Chadd if (TXQACTIVE(txqs, 3)) 497696ff485dSAdrian Chadd nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); 49778f939e79SAdrian Chadd if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 497896ff485dSAdrian Chadd ath_tx_processq(sc, sc->sc_cabq, 1); 4979d7736e13SSam Leffler if (nacked) 4980d7736e13SSam Leffler sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4981c42a7b7eSSam Leffler 4982ef27340cSAdrian Chadd /* XXX check this inside of IF_LOCK? */ 498313f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 49842e986da5SSam Leffler sc->sc_wd_timer = 0; 4985c42a7b7eSSam Leffler 49863e50ec2cSSam Leffler if (sc->sc_softled) 498746d4d74cSSam Leffler ath_led_event(sc, sc->sc_txrix); 49883e50ec2cSSam Leffler 4989ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 4990ef27340cSAdrian Chadd sc->sc_txproc_cnt--; 4991ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 4992ef27340cSAdrian Chadd 4993c42a7b7eSSam Leffler ath_start(ifp); 4994c42a7b7eSSam Leffler } 4995c42a7b7eSSam Leffler 4996c42a7b7eSSam Leffler /* 4997c42a7b7eSSam Leffler * Deferred processing of transmit interrupt. 4998c42a7b7eSSam Leffler */ 4999c42a7b7eSSam Leffler static void 5000c42a7b7eSSam Leffler ath_tx_proc(void *arg, int npending) 5001c42a7b7eSSam Leffler { 5002c42a7b7eSSam Leffler struct ath_softc *sc = arg; 5003fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 5004d7736e13SSam Leffler int i, nacked; 50058f939e79SAdrian Chadd uint32_t txqs; 50068f939e79SAdrian Chadd 5007ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 5008ef27340cSAdrian Chadd sc->sc_txproc_cnt++; 50098f939e79SAdrian Chadd txqs = sc->sc_txq_active; 50108f939e79SAdrian Chadd sc->sc_txq_active &= ~txqs; 5011ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 5012c42a7b7eSSam Leffler 5013c42a7b7eSSam Leffler /* 5014c42a7b7eSSam Leffler * Process each active queue. 5015c42a7b7eSSam Leffler */ 5016d7736e13SSam Leffler nacked = 0; 5017c42a7b7eSSam Leffler for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 50188f939e79SAdrian Chadd if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) 501996ff485dSAdrian Chadd nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); 5020d7736e13SSam Leffler if (nacked) 5021d7736e13SSam Leffler sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 5022c42a7b7eSSam Leffler 5023ef27340cSAdrian Chadd /* XXX check this inside of IF_LOCK? */ 502413f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 50252e986da5SSam Leffler sc->sc_wd_timer = 0; 5026c42a7b7eSSam Leffler 50273e50ec2cSSam Leffler if (sc->sc_softled) 502846d4d74cSSam Leffler ath_led_event(sc, sc->sc_txrix); 50293e50ec2cSSam Leffler 5030ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 5031ef27340cSAdrian Chadd sc->sc_txproc_cnt--; 5032ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 5033ef27340cSAdrian Chadd 5034c42a7b7eSSam Leffler ath_start(ifp); 5035c42a7b7eSSam Leffler } 503616d4de92SAdrian Chadd #undef TXQACTIVE 5037c42a7b7eSSam Leffler 50389352fb7aSAdrian Chadd /* 50399352fb7aSAdrian Chadd * Return a buffer to the pool and update the 'busy' flag on the 50409352fb7aSAdrian Chadd * previous 'tail' entry. 50419352fb7aSAdrian Chadd * 50429352fb7aSAdrian Chadd * This _must_ only be called when the buffer is involved in a completed 50439352fb7aSAdrian Chadd * TX. The logic is that if it was part of an active TX, the previous 50449352fb7aSAdrian Chadd * buffer on the list is now not involved in a halted TX DMA queue, waiting 50459352fb7aSAdrian Chadd * for restart (eg for TDMA.) 50469352fb7aSAdrian Chadd * 50479352fb7aSAdrian Chadd * The caller must free the mbuf and recycle the node reference. 50489352fb7aSAdrian Chadd */ 50499352fb7aSAdrian Chadd void 50509352fb7aSAdrian Chadd ath_freebuf(struct ath_softc *sc, struct ath_buf *bf) 50519352fb7aSAdrian Chadd { 50529352fb7aSAdrian Chadd bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 50539352fb7aSAdrian Chadd bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); 50549352fb7aSAdrian Chadd 50559352fb7aSAdrian Chadd KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__)); 50569352fb7aSAdrian Chadd KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__)); 50579352fb7aSAdrian Chadd 50589352fb7aSAdrian Chadd ATH_TXBUF_LOCK(sc); 50599352fb7aSAdrian Chadd ath_tx_update_busy(sc); 50609352fb7aSAdrian Chadd TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 50619352fb7aSAdrian Chadd ATH_TXBUF_UNLOCK(sc); 50629352fb7aSAdrian Chadd } 50639352fb7aSAdrian Chadd 50649352fb7aSAdrian Chadd /* 50659352fb7aSAdrian Chadd * This is currently used by ath_tx_draintxq() and 50669352fb7aSAdrian Chadd * ath_tx_tid_free_pkts(). 50679352fb7aSAdrian Chadd * 50689352fb7aSAdrian Chadd * It recycles a single ath_buf. 50699352fb7aSAdrian Chadd */ 50709352fb7aSAdrian Chadd void 50719352fb7aSAdrian Chadd ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status) 50729352fb7aSAdrian Chadd { 50739352fb7aSAdrian Chadd struct ieee80211_node *ni = bf->bf_node; 50749352fb7aSAdrian Chadd struct mbuf *m0 = bf->bf_m; 50759352fb7aSAdrian Chadd 50769352fb7aSAdrian Chadd bf->bf_node = NULL; 50779352fb7aSAdrian Chadd bf->bf_m = NULL; 50789352fb7aSAdrian Chadd 50799352fb7aSAdrian Chadd /* Free the buffer, it's not needed any longer */ 50809352fb7aSAdrian Chadd ath_freebuf(sc, bf); 50819352fb7aSAdrian Chadd 50829352fb7aSAdrian Chadd if (ni != NULL) { 50839352fb7aSAdrian Chadd /* 50849352fb7aSAdrian Chadd * Do any callback and reclaim the node reference. 50859352fb7aSAdrian Chadd */ 50869352fb7aSAdrian Chadd if (m0->m_flags & M_TXCB) 50879352fb7aSAdrian Chadd ieee80211_process_callback(ni, m0, status); 50889352fb7aSAdrian Chadd ieee80211_free_node(ni); 50899352fb7aSAdrian Chadd } 50909352fb7aSAdrian Chadd m_freem(m0); 50919352fb7aSAdrian Chadd 50929352fb7aSAdrian Chadd /* 50939352fb7aSAdrian Chadd * XXX the buffer used to be freed -after-, but the DMA map was 50949352fb7aSAdrian Chadd * freed where ath_freebuf() now is. I've no idea what this 50959352fb7aSAdrian Chadd * will do. 50969352fb7aSAdrian Chadd */ 50979352fb7aSAdrian Chadd } 50989352fb7aSAdrian Chadd 50999352fb7aSAdrian Chadd void 5100c42a7b7eSSam Leffler ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 51015591b213SSam Leffler { 5102a585a9a1SSam Leffler #ifdef ATH_DEBUG 51035591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 5104d2f6ed15SSam Leffler #endif 51055591b213SSam Leffler struct ath_buf *bf; 51067a4c5ed9SSam Leffler u_int ix; 51075591b213SSam Leffler 5108c42a7b7eSSam Leffler /* 5109c42a7b7eSSam Leffler * NB: this assumes output has been stopped and 51105d61b5e8SSam Leffler * we do not need to block ath_tx_proc 5111c42a7b7eSSam Leffler */ 511210ad9a77SSam Leffler ATH_TXBUF_LOCK(sc); 51136b349e5aSAdrian Chadd bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 511410ad9a77SSam Leffler if (bf != NULL) 511510ad9a77SSam Leffler bf->bf_flags &= ~ATH_BUF_BUSY; 511610ad9a77SSam Leffler ATH_TXBUF_UNLOCK(sc); 51179352fb7aSAdrian Chadd 51187a4c5ed9SSam Leffler for (ix = 0;; ix++) { 5119c42a7b7eSSam Leffler ATH_TXQ_LOCK(txq); 51206b349e5aSAdrian Chadd bf = TAILQ_FIRST(&txq->axq_q); 51215591b213SSam Leffler if (bf == NULL) { 5122ebecf802SSam Leffler txq->axq_link = NULL; 5123c42a7b7eSSam Leffler ATH_TXQ_UNLOCK(txq); 51245591b213SSam Leffler break; 51255591b213SSam Leffler } 51266b349e5aSAdrian Chadd ATH_TXQ_REMOVE(txq, bf, bf_list); 51276edf1dc7SAdrian Chadd if (bf->bf_state.bfs_aggr) 51286edf1dc7SAdrian Chadd txq->axq_aggr_depth--; 5129a585a9a1SSam Leffler #ifdef ATH_DEBUG 51304a3ac3fcSSam Leffler if (sc->sc_debug & ATH_DEBUG_RESET) { 5131b032f27cSSam Leffler struct ieee80211com *ic = sc->sc_ifp->if_l2com; 5132b032f27cSSam Leffler 51336902009eSSam Leffler ath_printtxbuf(sc, bf, txq->axq_qnum, ix, 51346edf1dc7SAdrian Chadd ath_hal_txprocdesc(ah, bf->bf_lastds, 513565f9edeeSSam Leffler &bf->bf_status.ds_txstat) == HAL_OK); 5136e40b6ab1SSam Leffler ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), 51374a3ac3fcSSam Leffler bf->bf_m->m_len, 0, -1); 51384a3ac3fcSSam Leffler } 5139a585a9a1SSam Leffler #endif /* ATH_DEBUG */ 514023428eafSSam Leffler /* 51419352fb7aSAdrian Chadd * Since we're now doing magic in the completion 51429352fb7aSAdrian Chadd * functions, we -must- call it for aggregation 51439352fb7aSAdrian Chadd * destinations or BAW tracking will get upset. 514423428eafSSam Leffler */ 51459352fb7aSAdrian Chadd /* 51469352fb7aSAdrian Chadd * Clear ATH_BUF_BUSY; the completion handler 51479352fb7aSAdrian Chadd * will free the buffer. 51489352fb7aSAdrian Chadd */ 51499352fb7aSAdrian Chadd ATH_TXQ_UNLOCK(txq); 515010ad9a77SSam Leffler bf->bf_flags &= ~ATH_BUF_BUSY; 51519352fb7aSAdrian Chadd if (bf->bf_comp) 51529352fb7aSAdrian Chadd bf->bf_comp(sc, bf, 1); 51539352fb7aSAdrian Chadd else 51549352fb7aSAdrian Chadd ath_tx_default_comp(sc, bf, 1); 51555591b213SSam Leffler } 51569352fb7aSAdrian Chadd 5157eb6f0de0SAdrian Chadd /* 5158eb6f0de0SAdrian Chadd * Drain software queued frames which are on 5159eb6f0de0SAdrian Chadd * active TIDs. 5160eb6f0de0SAdrian Chadd */ 5161eb6f0de0SAdrian Chadd ath_tx_txq_drain(sc, txq); 5162c42a7b7eSSam Leffler } 5163c42a7b7eSSam Leffler 5164c42a7b7eSSam Leffler static void 5165c42a7b7eSSam Leffler ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 5166c42a7b7eSSam Leffler { 5167c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 5168c42a7b7eSSam Leffler 5169c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 5170c42a7b7eSSam Leffler __func__, txq->axq_qnum, 51716891c875SPeter Wemm (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 51726891c875SPeter Wemm txq->axq_link); 51734a3ac3fcSSam Leffler (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 5174c42a7b7eSSam Leffler } 5175c42a7b7eSSam Leffler 51762d433424SAdrian Chadd static int 51772d433424SAdrian Chadd ath_stoptxdma(struct ath_softc *sc) 5178c42a7b7eSSam Leffler { 5179c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 5180c42a7b7eSSam Leffler int i; 5181c42a7b7eSSam Leffler 5182c42a7b7eSSam Leffler /* XXX return value */ 51832d433424SAdrian Chadd if (sc->sc_invalid) 51842d433424SAdrian Chadd return 0; 51852d433424SAdrian Chadd 5186c42a7b7eSSam Leffler if (!sc->sc_invalid) { 5187c42a7b7eSSam Leffler /* don't touch the hardware if marked invalid */ 51884a3ac3fcSSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 51894a3ac3fcSSam Leffler __func__, sc->sc_bhalq, 51904a3ac3fcSSam Leffler (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 51914a3ac3fcSSam Leffler NULL); 5192c42a7b7eSSam Leffler (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 5193c42a7b7eSSam Leffler for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 5194c42a7b7eSSam Leffler if (ATH_TXQ_SETUP(sc, i)) 5195c42a7b7eSSam Leffler ath_tx_stopdma(sc, &sc->sc_txq[i]); 5196c42a7b7eSSam Leffler } 51972d433424SAdrian Chadd 51982d433424SAdrian Chadd return 1; 51992d433424SAdrian Chadd } 52002d433424SAdrian Chadd 52012d433424SAdrian Chadd /* 52022d433424SAdrian Chadd * Drain the transmit queues and reclaim resources. 52032d433424SAdrian Chadd */ 52042d433424SAdrian Chadd static void 52052d433424SAdrian Chadd ath_draintxq(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 52062d433424SAdrian Chadd { 52072d433424SAdrian Chadd #ifdef ATH_DEBUG 52082d433424SAdrian Chadd struct ath_hal *ah = sc->sc_ah; 52092d433424SAdrian Chadd #endif 52102d433424SAdrian Chadd struct ifnet *ifp = sc->sc_ifp; 52112d433424SAdrian Chadd int i; 52122d433424SAdrian Chadd 52132d433424SAdrian Chadd (void) ath_stoptxdma(sc); 52142d433424SAdrian Chadd 5215ef27340cSAdrian Chadd for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 5216ef27340cSAdrian Chadd /* 5217ef27340cSAdrian Chadd * XXX TODO: should we just handle the completed TX frames 5218ef27340cSAdrian Chadd * here, whether or not the reset is a full one or not? 5219ef27340cSAdrian Chadd */ 5220ef27340cSAdrian Chadd if (ATH_TXQ_SETUP(sc, i)) { 5221ef27340cSAdrian Chadd if (reset_type == ATH_RESET_NOLOSS) 5222ef27340cSAdrian Chadd ath_tx_processq(sc, &sc->sc_txq[i], 0); 5223ef27340cSAdrian Chadd else 5224c42a7b7eSSam Leffler ath_tx_draintxq(sc, &sc->sc_txq[i]); 5225ef27340cSAdrian Chadd } 5226ef27340cSAdrian Chadd } 52274a3ac3fcSSam Leffler #ifdef ATH_DEBUG 52284a3ac3fcSSam Leffler if (sc->sc_debug & ATH_DEBUG_RESET) { 52296b349e5aSAdrian Chadd struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); 52304a3ac3fcSSam Leffler if (bf != NULL && bf->bf_m != NULL) { 52316902009eSSam Leffler ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, 52326edf1dc7SAdrian Chadd ath_hal_txprocdesc(ah, bf->bf_lastds, 523365f9edeeSSam Leffler &bf->bf_status.ds_txstat) == HAL_OK); 5234e40b6ab1SSam Leffler ieee80211_dump_pkt(ifp->if_l2com, 5235e40b6ab1SSam Leffler mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 5236e40b6ab1SSam Leffler 0, -1); 52374a3ac3fcSSam Leffler } 52384a3ac3fcSSam Leffler } 52394a3ac3fcSSam Leffler #endif /* ATH_DEBUG */ 5240ef27340cSAdrian Chadd /* XXX check this inside of IF_LOCK? */ 524113f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 52422e986da5SSam Leffler sc->sc_wd_timer = 0; 52435591b213SSam Leffler } 52445591b213SSam Leffler 52455591b213SSam Leffler /* 52465591b213SSam Leffler * Disable the receive h/w in preparation for a reset. 52475591b213SSam Leffler */ 52485591b213SSam Leffler static void 52499a842e8bSAdrian Chadd ath_stoprecv(struct ath_softc *sc, int dodelay) 52505591b213SSam Leffler { 52518cec0ab9SSam Leffler #define PA2DESC(_sc, _pa) \ 5252c42a7b7eSSam Leffler ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 5253c42a7b7eSSam Leffler ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 52545591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 52555591b213SSam Leffler 52565591b213SSam Leffler ath_hal_stoppcurecv(ah); /* disable PCU */ 52575591b213SSam Leffler ath_hal_setrxfilter(ah, 0); /* clear recv filter */ 52585591b213SSam Leffler ath_hal_stopdmarecv(ah); /* disable DMA engine */ 52599a842e8bSAdrian Chadd if (dodelay) 5260c42a7b7eSSam Leffler DELAY(3000); /* 3ms is long enough for 1 frame */ 5261a585a9a1SSam Leffler #ifdef ATH_DEBUG 5262c42a7b7eSSam Leffler if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) { 52635591b213SSam Leffler struct ath_buf *bf; 52647a4c5ed9SSam Leffler u_int ix; 52655591b213SSam Leffler 5266e325e530SSam Leffler printf("%s: rx queue %p, link %p\n", __func__, 526730310634SPeter Wemm (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink); 52687a4c5ed9SSam Leffler ix = 0; 52696b349e5aSAdrian Chadd TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 52708cec0ab9SSam Leffler struct ath_desc *ds = bf->bf_desc; 527165f9edeeSSam Leffler struct ath_rx_status *rs = &bf->bf_status.ds_rxstat; 5272c42a7b7eSSam Leffler HAL_STATUS status = ath_hal_rxprocdesc(ah, ds, 527365f9edeeSSam Leffler bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 5274c42a7b7eSSam Leffler if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL)) 52756902009eSSam Leffler ath_printrxbuf(sc, bf, ix, status == HAL_OK); 52767a4c5ed9SSam Leffler ix++; 52775591b213SSam Leffler } 52785591b213SSam Leffler } 52795591b213SSam Leffler #endif 528068e8e04eSSam Leffler if (sc->sc_rxpending != NULL) { 528168e8e04eSSam Leffler m_freem(sc->sc_rxpending); 528268e8e04eSSam Leffler sc->sc_rxpending = NULL; 528368e8e04eSSam Leffler } 52845591b213SSam Leffler sc->sc_rxlink = NULL; /* just in case */ 52858cec0ab9SSam Leffler #undef PA2DESC 52865591b213SSam Leffler } 52875591b213SSam Leffler 52885591b213SSam Leffler /* 52895591b213SSam Leffler * Enable the receive h/w following a reset. 52905591b213SSam Leffler */ 52915591b213SSam Leffler static int 52925591b213SSam Leffler ath_startrecv(struct ath_softc *sc) 52935591b213SSam Leffler { 52945591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 52955591b213SSam Leffler struct ath_buf *bf; 52965591b213SSam Leffler 52975591b213SSam Leffler sc->sc_rxlink = NULL; 529868e8e04eSSam Leffler sc->sc_rxpending = NULL; 52996b349e5aSAdrian Chadd TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 53005591b213SSam Leffler int error = ath_rxbuf_init(sc, bf); 53015591b213SSam Leffler if (error != 0) { 5302c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_RECV, 5303c42a7b7eSSam Leffler "%s: ath_rxbuf_init failed %d\n", 5304c42a7b7eSSam Leffler __func__, error); 53055591b213SSam Leffler return error; 53065591b213SSam Leffler } 53075591b213SSam Leffler } 53085591b213SSam Leffler 53096b349e5aSAdrian Chadd bf = TAILQ_FIRST(&sc->sc_rxbuf); 53105591b213SSam Leffler ath_hal_putrxbuf(ah, bf->bf_daddr); 53115591b213SSam Leffler ath_hal_rxena(ah); /* enable recv descriptors */ 53125591b213SSam Leffler ath_mode_init(sc); /* set filters, etc. */ 53135591b213SSam Leffler ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 53145591b213SSam Leffler return 0; 53155591b213SSam Leffler } 53165591b213SSam Leffler 53175591b213SSam Leffler /* 5318c42a7b7eSSam Leffler * Update internal state after a channel change. 5319c42a7b7eSSam Leffler */ 5320c42a7b7eSSam Leffler static void 5321c42a7b7eSSam Leffler ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 5322c42a7b7eSSam Leffler { 5323c42a7b7eSSam Leffler enum ieee80211_phymode mode; 5324c42a7b7eSSam Leffler 5325c42a7b7eSSam Leffler /* 5326c42a7b7eSSam Leffler * Change channels and update the h/w rate map 5327c42a7b7eSSam Leffler * if we're switching; e.g. 11a to 11b/g. 5328c42a7b7eSSam Leffler */ 532968e8e04eSSam Leffler mode = ieee80211_chan2mode(chan); 5330c42a7b7eSSam Leffler if (mode != sc->sc_curmode) 5331c42a7b7eSSam Leffler ath_setcurmode(sc, mode); 533259efa8b5SSam Leffler sc->sc_curchan = chan; 5333c42a7b7eSSam Leffler } 5334c42a7b7eSSam Leffler 5335c42a7b7eSSam Leffler /* 53365591b213SSam Leffler * Set/change channels. If the channel is really being changed, 53374fa8d4efSDaniel Eischen * it's done by resetting the chip. To accomplish this we must 53385591b213SSam Leffler * first cleanup any pending DMA, then restart stuff after a la 53395591b213SSam Leffler * ath_init. 53405591b213SSam Leffler */ 53415591b213SSam Leffler static int 53425591b213SSam Leffler ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 53435591b213SSam Leffler { 5344b032f27cSSam Leffler struct ifnet *ifp = sc->sc_ifp; 5345b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 53465591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 5347ef27340cSAdrian Chadd int ret = 0; 5348ef27340cSAdrian Chadd int dointr = 0; 5349ef27340cSAdrian Chadd 5350ef27340cSAdrian Chadd /* Treat this as an interface reset */ 5351ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 5352ee321975SAdrian Chadd if (ath_reset_grablock(sc, 1) == 0) { 5353ee321975SAdrian Chadd device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 5354ef27340cSAdrian Chadd __func__); 5355ee321975SAdrian Chadd } 5356ef27340cSAdrian Chadd if (chan != sc->sc_curchan) { 5357ef27340cSAdrian Chadd dointr = 1; 5358ef27340cSAdrian Chadd /* XXX only do this if inreset_cnt is 1? */ 5359ef27340cSAdrian Chadd ath_hal_intrset(ah, 0); 5360ef27340cSAdrian Chadd } 5361ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 5362ef27340cSAdrian Chadd ath_txrx_stop(sc); 5363c42a7b7eSSam Leffler 536459efa8b5SSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", 536559efa8b5SSam Leffler __func__, ieee80211_chan2ieee(ic, chan), 536659efa8b5SSam Leffler chan->ic_freq, chan->ic_flags); 536759efa8b5SSam Leffler if (chan != sc->sc_curchan) { 5368c42a7b7eSSam Leffler HAL_STATUS status; 53695591b213SSam Leffler /* 53705591b213SSam Leffler * To switch channels clear any pending DMA operations; 53715591b213SSam Leffler * wait long enough for the RX fifo to drain, reset the 53725591b213SSam Leffler * hardware at the new frequency, and then re-enable 53735591b213SSam Leffler * the relevant bits of the h/w. 53745591b213SSam Leffler */ 5375ef27340cSAdrian Chadd #if 0 53765591b213SSam Leffler ath_hal_intrset(ah, 0); /* disable interrupts */ 5377ef27340cSAdrian Chadd #endif 53789a842e8bSAdrian Chadd ath_stoprecv(sc, 1); /* turn off frame recv */ 53799a842e8bSAdrian Chadd /* 53809a842e8bSAdrian Chadd * First, handle completed TX/RX frames. 53819a842e8bSAdrian Chadd */ 53829a842e8bSAdrian Chadd ath_rx_proc(sc, 0); 53839a842e8bSAdrian Chadd ath_draintxq(sc, ATH_RESET_NOLOSS); 53849a842e8bSAdrian Chadd /* 53859a842e8bSAdrian Chadd * Next, flush the non-scheduled frames. 53869a842e8bSAdrian Chadd */ 5387517526efSAdrian Chadd ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */ 53889a842e8bSAdrian Chadd 538959efa8b5SSam Leffler if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) { 5390b032f27cSSam Leffler if_printf(ifp, "%s: unable to reset " 539179649302SGavin Atkinson "channel %u (%u MHz, flags 0x%x), hal status %u\n", 539259efa8b5SSam Leffler __func__, ieee80211_chan2ieee(ic, chan), 539359efa8b5SSam Leffler chan->ic_freq, chan->ic_flags, status); 5394ef27340cSAdrian Chadd ret = EIO; 5395ef27340cSAdrian Chadd goto finish; 53965591b213SSam Leffler } 5397c59005e9SSam Leffler sc->sc_diversity = ath_hal_getdiversity(ah); 5398c42a7b7eSSam Leffler 539948237774SAdrian Chadd /* Let DFS at it in case it's a DFS channel */ 540048237774SAdrian Chadd ath_dfs_radar_enable(sc, ic->ic_curchan); 540148237774SAdrian Chadd 54025591b213SSam Leffler /* 54035591b213SSam Leffler * Re-enable rx framework. 54045591b213SSam Leffler */ 54055591b213SSam Leffler if (ath_startrecv(sc) != 0) { 5406b032f27cSSam Leffler if_printf(ifp, "%s: unable to restart recv logic\n", 5407b032f27cSSam Leffler __func__); 5408ef27340cSAdrian Chadd ret = EIO; 5409ef27340cSAdrian Chadd goto finish; 54105591b213SSam Leffler } 54115591b213SSam Leffler 54125591b213SSam Leffler /* 54135591b213SSam Leffler * Change channels and update the h/w rate map 54145591b213SSam Leffler * if we're switching; e.g. 11a to 11b/g. 54155591b213SSam Leffler */ 5416c42a7b7eSSam Leffler ath_chan_change(sc, chan); 54170a915fadSSam Leffler 54180a915fadSSam Leffler /* 54192fd9aabbSAdrian Chadd * Reset clears the beacon timers; reset them 54202fd9aabbSAdrian Chadd * here if needed. 54212fd9aabbSAdrian Chadd */ 54222fd9aabbSAdrian Chadd if (sc->sc_beacons) { /* restart beacons */ 54232fd9aabbSAdrian Chadd #ifdef IEEE80211_SUPPORT_TDMA 54242fd9aabbSAdrian Chadd if (sc->sc_tdma) 54252fd9aabbSAdrian Chadd ath_tdma_config(sc, NULL); 54262fd9aabbSAdrian Chadd else 54272fd9aabbSAdrian Chadd #endif 54282fd9aabbSAdrian Chadd ath_beacon_config(sc, NULL); 54292fd9aabbSAdrian Chadd } 54302fd9aabbSAdrian Chadd 5431ef27340cSAdrian Chadd #if 0 54322fd9aabbSAdrian Chadd /* 54330a915fadSSam Leffler * Re-enable interrupts. 54340a915fadSSam Leffler */ 54350a915fadSSam Leffler ath_hal_intrset(ah, sc->sc_imask); 5436ef27340cSAdrian Chadd #endif 54375591b213SSam Leffler } 5438ef27340cSAdrian Chadd 5439ef27340cSAdrian Chadd finish: 5440ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 5441ef27340cSAdrian Chadd sc->sc_inreset_cnt--; 5442ef27340cSAdrian Chadd /* XXX only do this if sc_inreset_cnt == 0? */ 5443ef27340cSAdrian Chadd if (dointr) 5444ef27340cSAdrian Chadd ath_hal_intrset(ah, sc->sc_imask); 5445ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 5446ef27340cSAdrian Chadd 5447ef27340cSAdrian Chadd /* XXX do this inside of IF_LOCK? */ 5448ef27340cSAdrian Chadd ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5449ef27340cSAdrian Chadd ath_txrx_start(sc); 5450ef27340cSAdrian Chadd /* XXX ath_start? */ 5451ef27340cSAdrian Chadd 5452ef27340cSAdrian Chadd return ret; 54535591b213SSam Leffler } 54545591b213SSam Leffler 54555591b213SSam Leffler /* 54565591b213SSam Leffler * Periodically recalibrate the PHY to account 54575591b213SSam Leffler * for temperature/environment changes. 54585591b213SSam Leffler */ 54595591b213SSam Leffler static void 54605591b213SSam Leffler ath_calibrate(void *arg) 54615591b213SSam Leffler { 54625591b213SSam Leffler struct ath_softc *sc = arg; 54635591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 54642dc7fcc4SSam Leffler struct ifnet *ifp = sc->sc_ifp; 54658d91de92SSam Leffler struct ieee80211com *ic = ifp->if_l2com; 54662dc7fcc4SSam Leffler HAL_BOOL longCal, isCalDone; 5467a108ab63SAdrian Chadd HAL_BOOL aniCal, shortCal = AH_FALSE; 54682dc7fcc4SSam Leffler int nextcal; 54695591b213SSam Leffler 54708d91de92SSam Leffler if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ 54718d91de92SSam Leffler goto restart; 54722dc7fcc4SSam Leffler longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); 5473a108ab63SAdrian Chadd aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); 5474a108ab63SAdrian Chadd if (sc->sc_doresetcal) 5475a108ab63SAdrian Chadd shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); 5476a108ab63SAdrian Chadd 5477a108ab63SAdrian Chadd DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); 5478a108ab63SAdrian Chadd if (aniCal) { 5479a108ab63SAdrian Chadd sc->sc_stats.ast_ani_cal++; 5480a108ab63SAdrian Chadd sc->sc_lastani = ticks; 5481a108ab63SAdrian Chadd ath_hal_ani_poll(ah, sc->sc_curchan); 5482a108ab63SAdrian Chadd } 5483a108ab63SAdrian Chadd 54842dc7fcc4SSam Leffler if (longCal) { 54855591b213SSam Leffler sc->sc_stats.ast_per_cal++; 54868197f57eSAdrian Chadd sc->sc_lastlongcal = ticks; 54875591b213SSam Leffler if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 54885591b213SSam Leffler /* 54895591b213SSam Leffler * Rfgain is out of bounds, reset the chip 54905591b213SSam Leffler * to load new gain values. 54915591b213SSam Leffler */ 5492370572d9SSam Leffler DPRINTF(sc, ATH_DEBUG_CALIBRATE, 5493370572d9SSam Leffler "%s: rfgain change\n", __func__); 54945591b213SSam Leffler sc->sc_stats.ast_per_rfgain++; 5495ef27340cSAdrian Chadd /* 5496ef27340cSAdrian Chadd * Drop lock - we can't hold it across the 5497ef27340cSAdrian Chadd * ath_reset() call. Instead, we'll drop 5498ef27340cSAdrian Chadd * out here, do a reset, then reschedule 5499ef27340cSAdrian Chadd * the callout. 5500ef27340cSAdrian Chadd */ 5501ef27340cSAdrian Chadd callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 5502ef27340cSAdrian Chadd sc->sc_resetcal = 0; 5503ef27340cSAdrian Chadd sc->sc_doresetcal = AH_TRUE; 5504ef27340cSAdrian Chadd ATH_UNLOCK(sc); 5505517526efSAdrian Chadd ath_reset(ifp, ATH_RESET_NOLOSS); 55060fbe75a1SAdrian Chadd ATH_LOCK(sc); 5507ef27340cSAdrian Chadd return; 55085591b213SSam Leffler } 55092dc7fcc4SSam Leffler /* 55102dc7fcc4SSam Leffler * If this long cal is after an idle period, then 55112dc7fcc4SSam Leffler * reset the data collection state so we start fresh. 55122dc7fcc4SSam Leffler */ 55132dc7fcc4SSam Leffler if (sc->sc_resetcal) { 551459efa8b5SSam Leffler (void) ath_hal_calreset(ah, sc->sc_curchan); 55152dc7fcc4SSam Leffler sc->sc_lastcalreset = ticks; 5516a108ab63SAdrian Chadd sc->sc_lastshortcal = ticks; 55172dc7fcc4SSam Leffler sc->sc_resetcal = 0; 5518a108ab63SAdrian Chadd sc->sc_doresetcal = AH_TRUE; 55192dc7fcc4SSam Leffler } 55202dc7fcc4SSam Leffler } 5521a108ab63SAdrian Chadd 5522a108ab63SAdrian Chadd /* Only call if we're doing a short/long cal, not for ANI calibration */ 5523a108ab63SAdrian Chadd if (shortCal || longCal) { 552459efa8b5SSam Leffler if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { 55252dc7fcc4SSam Leffler if (longCal) { 55262dc7fcc4SSam Leffler /* 55272dc7fcc4SSam Leffler * Calibrate noise floor data again in case of change. 55282dc7fcc4SSam Leffler */ 55292dc7fcc4SSam Leffler ath_hal_process_noisefloor(ah); 55302dc7fcc4SSam Leffler } 55312dc7fcc4SSam Leffler } else { 5532c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, 5533c42a7b7eSSam Leffler "%s: calibration of channel %u failed\n", 553459efa8b5SSam Leffler __func__, sc->sc_curchan->ic_freq); 55355591b213SSam Leffler sc->sc_stats.ast_per_calfail++; 55365591b213SSam Leffler } 5537a108ab63SAdrian Chadd if (shortCal) 5538a108ab63SAdrian Chadd sc->sc_lastshortcal = ticks; 5539a108ab63SAdrian Chadd } 55402dc7fcc4SSam Leffler if (!isCalDone) { 55418d91de92SSam Leffler restart: 55427b0c77ecSSam Leffler /* 55432dc7fcc4SSam Leffler * Use a shorter interval to potentially collect multiple 55442dc7fcc4SSam Leffler * data samples required to complete calibration. Once 55452dc7fcc4SSam Leffler * we're told the work is done we drop back to a longer 55462dc7fcc4SSam Leffler * interval between requests. We're more aggressive doing 55472dc7fcc4SSam Leffler * work when operating as an AP to improve operation right 55482dc7fcc4SSam Leffler * after startup. 55497b0c77ecSSam Leffler */ 5550a108ab63SAdrian Chadd sc->sc_lastshortcal = ticks; 5551a108ab63SAdrian Chadd nextcal = ath_shortcalinterval*hz/1000; 55522dc7fcc4SSam Leffler if (sc->sc_opmode != HAL_M_HOSTAP) 55532dc7fcc4SSam Leffler nextcal *= 10; 5554a108ab63SAdrian Chadd sc->sc_doresetcal = AH_TRUE; 55552dc7fcc4SSam Leffler } else { 5556a108ab63SAdrian Chadd /* nextcal should be the shortest time for next event */ 55572dc7fcc4SSam Leffler nextcal = ath_longcalinterval*hz; 55582dc7fcc4SSam Leffler if (sc->sc_lastcalreset == 0) 55592dc7fcc4SSam Leffler sc->sc_lastcalreset = sc->sc_lastlongcal; 55602dc7fcc4SSam Leffler else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) 55612dc7fcc4SSam Leffler sc->sc_resetcal = 1; /* setup reset next trip */ 5562a108ab63SAdrian Chadd sc->sc_doresetcal = AH_FALSE; 5563bd5a9920SSam Leffler } 5564a108ab63SAdrian Chadd /* ANI calibration may occur more often than short/long/resetcal */ 5565a108ab63SAdrian Chadd if (ath_anicalinterval > 0) 5566a108ab63SAdrian Chadd nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); 5567bd5a9920SSam Leffler 55682dc7fcc4SSam Leffler if (nextcal != 0) { 55692dc7fcc4SSam Leffler DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", 55702dc7fcc4SSam Leffler __func__, nextcal, isCalDone ? "" : "!"); 55712dc7fcc4SSam Leffler callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); 55722dc7fcc4SSam Leffler } else { 55732dc7fcc4SSam Leffler DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", 55742dc7fcc4SSam Leffler __func__); 55752dc7fcc4SSam Leffler /* NB: don't rearm timer */ 55762dc7fcc4SSam Leffler } 55775591b213SSam Leffler } 55785591b213SSam Leffler 557968e8e04eSSam Leffler static void 558068e8e04eSSam Leffler ath_scan_start(struct ieee80211com *ic) 558168e8e04eSSam Leffler { 558268e8e04eSSam Leffler struct ifnet *ifp = ic->ic_ifp; 558368e8e04eSSam Leffler struct ath_softc *sc = ifp->if_softc; 558468e8e04eSSam Leffler struct ath_hal *ah = sc->sc_ah; 558568e8e04eSSam Leffler u_int32_t rfilt; 558668e8e04eSSam Leffler 558768e8e04eSSam Leffler /* XXX calibration timer? */ 558868e8e04eSSam Leffler 558968e8e04eSSam Leffler sc->sc_scanning = 1; 559068e8e04eSSam Leffler sc->sc_syncbeacon = 0; 559168e8e04eSSam Leffler rfilt = ath_calcrxfilter(sc); 559268e8e04eSSam Leffler ath_hal_setrxfilter(ah, rfilt); 559368e8e04eSSam Leffler ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0); 559468e8e04eSSam Leffler 559568e8e04eSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", 559668e8e04eSSam Leffler __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr)); 559768e8e04eSSam Leffler } 559868e8e04eSSam Leffler 559968e8e04eSSam Leffler static void 560068e8e04eSSam Leffler ath_scan_end(struct ieee80211com *ic) 560168e8e04eSSam Leffler { 560268e8e04eSSam Leffler struct ifnet *ifp = ic->ic_ifp; 560368e8e04eSSam Leffler struct ath_softc *sc = ifp->if_softc; 560468e8e04eSSam Leffler struct ath_hal *ah = sc->sc_ah; 560568e8e04eSSam Leffler u_int32_t rfilt; 560668e8e04eSSam Leffler 560768e8e04eSSam Leffler sc->sc_scanning = 0; 560868e8e04eSSam Leffler rfilt = ath_calcrxfilter(sc); 560968e8e04eSSam Leffler ath_hal_setrxfilter(ah, rfilt); 561068e8e04eSSam Leffler ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 561168e8e04eSSam Leffler 561268e8e04eSSam Leffler ath_hal_process_noisefloor(ah); 561368e8e04eSSam Leffler 561468e8e04eSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 561568e8e04eSSam Leffler __func__, rfilt, ether_sprintf(sc->sc_curbssid), 561668e8e04eSSam Leffler sc->sc_curaid); 561768e8e04eSSam Leffler } 561868e8e04eSSam Leffler 561968e8e04eSSam Leffler static void 562068e8e04eSSam Leffler ath_set_channel(struct ieee80211com *ic) 562168e8e04eSSam Leffler { 562268e8e04eSSam Leffler struct ifnet *ifp = ic->ic_ifp; 562368e8e04eSSam Leffler struct ath_softc *sc = ifp->if_softc; 562468e8e04eSSam Leffler 562568e8e04eSSam Leffler (void) ath_chan_set(sc, ic->ic_curchan); 562668e8e04eSSam Leffler /* 562768e8e04eSSam Leffler * If we are returning to our bss channel then mark state 562868e8e04eSSam Leffler * so the next recv'd beacon's tsf will be used to sync the 562968e8e04eSSam Leffler * beacon timers. Note that since we only hear beacons in 563068e8e04eSSam Leffler * sta/ibss mode this has no effect in other operating modes. 563168e8e04eSSam Leffler */ 563268e8e04eSSam Leffler if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) 563368e8e04eSSam Leffler sc->sc_syncbeacon = 1; 563468e8e04eSSam Leffler } 563568e8e04eSSam Leffler 5636b032f27cSSam Leffler /* 5637b032f27cSSam Leffler * Walk the vap list and check if there any vap's in RUN state. 5638b032f27cSSam Leffler */ 56395591b213SSam Leffler static int 5640b032f27cSSam Leffler ath_isanyrunningvaps(struct ieee80211vap *this) 56415591b213SSam Leffler { 5642b032f27cSSam Leffler struct ieee80211com *ic = this->iv_ic; 5643b032f27cSSam Leffler struct ieee80211vap *vap; 5644b032f27cSSam Leffler 5645b032f27cSSam Leffler IEEE80211_LOCK_ASSERT(ic); 5646b032f27cSSam Leffler 5647b032f27cSSam Leffler TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 5648309a3e45SSam Leffler if (vap != this && vap->iv_state >= IEEE80211_S_RUN) 5649b032f27cSSam Leffler return 1; 5650b032f27cSSam Leffler } 5651b032f27cSSam Leffler return 0; 5652b032f27cSSam Leffler } 5653b032f27cSSam Leffler 5654b032f27cSSam Leffler static int 5655b032f27cSSam Leffler ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 5656b032f27cSSam Leffler { 5657b032f27cSSam Leffler struct ieee80211com *ic = vap->iv_ic; 5658b032f27cSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 5659b032f27cSSam Leffler struct ath_vap *avp = ATH_VAP(vap); 566045bbf62fSSam Leffler struct ath_hal *ah = sc->sc_ah; 5661b032f27cSSam Leffler struct ieee80211_node *ni = NULL; 566268e8e04eSSam Leffler int i, error, stamode; 56635591b213SSam Leffler u_int32_t rfilt; 5664f52efb6dSAdrian Chadd int csa_run_transition = 0; 56655591b213SSam Leffler static const HAL_LED_STATE leds[] = { 56665591b213SSam Leffler HAL_LED_INIT, /* IEEE80211_S_INIT */ 56675591b213SSam Leffler HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 56685591b213SSam Leffler HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 56695591b213SSam Leffler HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 567077d5e068SSam Leffler HAL_LED_RUN, /* IEEE80211_S_CAC */ 56715591b213SSam Leffler HAL_LED_RUN, /* IEEE80211_S_RUN */ 567277d5e068SSam Leffler HAL_LED_RUN, /* IEEE80211_S_CSA */ 567377d5e068SSam Leffler HAL_LED_RUN, /* IEEE80211_S_SLEEP */ 56745591b213SSam Leffler }; 56755591b213SSam Leffler 5676c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 5677b032f27cSSam Leffler ieee80211_state_name[vap->iv_state], 5678c42a7b7eSSam Leffler ieee80211_state_name[nstate]); 56795591b213SSam Leffler 5680f52efb6dSAdrian Chadd if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) 5681f52efb6dSAdrian Chadd csa_run_transition = 1; 5682f52efb6dSAdrian Chadd 56832e986da5SSam Leffler callout_drain(&sc->sc_cal_ch); 56845591b213SSam Leffler ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 56855591b213SSam Leffler 5686b032f27cSSam Leffler if (nstate == IEEE80211_S_SCAN) { 568758769f58SSam Leffler /* 5688b032f27cSSam Leffler * Scanning: turn off beacon miss and don't beacon. 5689b032f27cSSam Leffler * Mark beacon state so when we reach RUN state we'll 5690b032f27cSSam Leffler * [re]setup beacons. Unblock the task q thread so 5691b032f27cSSam Leffler * deferred interrupt processing is done. 569258769f58SSam Leffler */ 5693b032f27cSSam Leffler ath_hal_intrset(ah, 5694b032f27cSSam Leffler sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 56955591b213SSam Leffler sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5696b032f27cSSam Leffler sc->sc_beacons = 0; 5697b032f27cSSam Leffler taskqueue_unblock(sc->sc_tq); 56985591b213SSam Leffler } 56995591b213SSam Leffler 5700b032f27cSSam Leffler ni = vap->iv_bss; 570168e8e04eSSam Leffler rfilt = ath_calcrxfilter(sc); 5702b032f27cSSam Leffler stamode = (vap->iv_opmode == IEEE80211_M_STA || 57037b916f89SSam Leffler vap->iv_opmode == IEEE80211_M_AHDEMO || 5704b032f27cSSam Leffler vap->iv_opmode == IEEE80211_M_IBSS); 570568e8e04eSSam Leffler if (stamode && nstate == IEEE80211_S_RUN) { 570668e8e04eSSam Leffler sc->sc_curaid = ni->ni_associd; 570768e8e04eSSam Leffler IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); 5708b032f27cSSam Leffler ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5709b032f27cSSam Leffler } 571068e8e04eSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5711b032f27cSSam Leffler __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); 571268e8e04eSSam Leffler ath_hal_setrxfilter(ah, rfilt); 571368e8e04eSSam Leffler 5714b032f27cSSam Leffler /* XXX is this to restore keycache on resume? */ 5715b032f27cSSam Leffler if (vap->iv_opmode != IEEE80211_M_STA && 5716b032f27cSSam Leffler (vap->iv_flags & IEEE80211_F_PRIVACY)) { 57175591b213SSam Leffler for (i = 0; i < IEEE80211_WEP_NKID; i++) 57185591b213SSam Leffler if (ath_hal_keyisvalid(ah, i)) 571968e8e04eSSam Leffler ath_hal_keysetmac(ah, i, ni->ni_bssid); 57205591b213SSam Leffler } 5721b032f27cSSam Leffler 5722b032f27cSSam Leffler /* 5723b032f27cSSam Leffler * Invoke the parent method to do net80211 work. 5724b032f27cSSam Leffler */ 5725b032f27cSSam Leffler error = avp->av_newstate(vap, nstate, arg); 5726b032f27cSSam Leffler if (error != 0) 5727b032f27cSSam Leffler goto bad; 5728c42a7b7eSSam Leffler 572968e8e04eSSam Leffler if (nstate == IEEE80211_S_RUN) { 5730b032f27cSSam Leffler /* NB: collect bss node again, it may have changed */ 5731b032f27cSSam Leffler ni = vap->iv_bss; 57325591b213SSam Leffler 5733b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, 5734b032f27cSSam Leffler "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " 5735b032f27cSSam Leffler "capinfo 0x%04x chan %d\n", __func__, 5736b032f27cSSam Leffler vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), 5737b032f27cSSam Leffler ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); 5738b032f27cSSam Leffler 5739b032f27cSSam Leffler switch (vap->iv_opmode) { 5740584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 574110ad9a77SSam Leffler case IEEE80211_M_AHDEMO: 574210ad9a77SSam Leffler if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) 574310ad9a77SSam Leffler break; 574410ad9a77SSam Leffler /* fall thru... */ 574510ad9a77SSam Leffler #endif 5746e8fd88a3SSam Leffler case IEEE80211_M_HOSTAP: 5747e8fd88a3SSam Leffler case IEEE80211_M_IBSS: 574859aa14a9SRui Paulo case IEEE80211_M_MBSS: 57495591b213SSam Leffler /* 5750e8fd88a3SSam Leffler * Allocate and setup the beacon frame. 5751e8fd88a3SSam Leffler * 5752f818612bSSam Leffler * Stop any previous beacon DMA. This may be 5753f818612bSSam Leffler * necessary, for example, when an ibss merge 5754f818612bSSam Leffler * causes reconfiguration; there will be a state 5755f818612bSSam Leffler * transition from RUN->RUN that means we may 5756f818612bSSam Leffler * be called with beacon transmission active. 5757f818612bSSam Leffler */ 5758f818612bSSam Leffler ath_hal_stoptxdma(ah, sc->sc_bhalq); 5759b032f27cSSam Leffler 57605591b213SSam Leffler error = ath_beacon_alloc(sc, ni); 57615591b213SSam Leffler if (error != 0) 57625591b213SSam Leffler goto bad; 57637a04dc27SSam Leffler /* 576480d939bfSSam Leffler * If joining an adhoc network defer beacon timer 576580d939bfSSam Leffler * configuration to the next beacon frame so we 576680d939bfSSam Leffler * have a current TSF to use. Otherwise we're 5767b032f27cSSam Leffler * starting an ibss/bss so there's no need to delay; 5768b032f27cSSam Leffler * if this is the first vap moving to RUN state, then 5769b032f27cSSam Leffler * beacon state needs to be [re]configured. 57707a04dc27SSam Leffler */ 5771b032f27cSSam Leffler if (vap->iv_opmode == IEEE80211_M_IBSS && 5772b032f27cSSam Leffler ni->ni_tstamp.tsf != 0) { 577380d939bfSSam Leffler sc->sc_syncbeacon = 1; 5774b032f27cSSam Leffler } else if (!sc->sc_beacons) { 5775584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 577610ad9a77SSam Leffler if (vap->iv_caps & IEEE80211_C_TDMA) 577710ad9a77SSam Leffler ath_tdma_config(sc, vap); 577810ad9a77SSam Leffler else 577910ad9a77SSam Leffler #endif 5780b032f27cSSam Leffler ath_beacon_config(sc, vap); 5781b032f27cSSam Leffler sc->sc_beacons = 1; 5782b032f27cSSam Leffler } 5783e8fd88a3SSam Leffler break; 5784e8fd88a3SSam Leffler case IEEE80211_M_STA: 5785e8fd88a3SSam Leffler /* 578680d939bfSSam Leffler * Defer beacon timer configuration to the next 578780d939bfSSam Leffler * beacon frame so we have a current TSF to use 578880d939bfSSam Leffler * (any TSF collected when scanning is likely old). 5789f52efb6dSAdrian Chadd * However if it's due to a CSA -> RUN transition, 5790f52efb6dSAdrian Chadd * force a beacon update so we pick up a lack of 5791f52efb6dSAdrian Chadd * beacons from an AP in CAC and thus force a 5792f52efb6dSAdrian Chadd * scan. 57937a04dc27SSam Leffler */ 579480d939bfSSam Leffler sc->sc_syncbeacon = 1; 5795f52efb6dSAdrian Chadd if (csa_run_transition) 5796f52efb6dSAdrian Chadd ath_beacon_config(sc, vap); 5797e8fd88a3SSam Leffler break; 5798b032f27cSSam Leffler case IEEE80211_M_MONITOR: 5799b032f27cSSam Leffler /* 5800b032f27cSSam Leffler * Monitor mode vaps have only INIT->RUN and RUN->RUN 5801b032f27cSSam Leffler * transitions so we must re-enable interrupts here to 5802b032f27cSSam Leffler * handle the case of a single monitor mode vap. 5803b032f27cSSam Leffler */ 5804b032f27cSSam Leffler ath_hal_intrset(ah, sc->sc_imask); 5805b032f27cSSam Leffler break; 5806b032f27cSSam Leffler case IEEE80211_M_WDS: 5807b032f27cSSam Leffler break; 5808e8fd88a3SSam Leffler default: 5809e8fd88a3SSam Leffler break; 58105591b213SSam Leffler } 58115591b213SSam Leffler /* 58127b0c77ecSSam Leffler * Let the hal process statistics collected during a 58137b0c77ecSSam Leffler * scan so it can provide calibrated noise floor data. 58147b0c77ecSSam Leffler */ 58157b0c77ecSSam Leffler ath_hal_process_noisefloor(ah); 58167b0c77ecSSam Leffler /* 5817ffa2cab6SSam Leffler * Reset rssi stats; maybe not the best place... 5818ffa2cab6SSam Leffler */ 5819ffa2cab6SSam Leffler sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 5820ffa2cab6SSam Leffler sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 5821ffa2cab6SSam Leffler sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 582245bbf62fSSam Leffler /* 5823b032f27cSSam Leffler * Finally, start any timers and the task q thread 5824b032f27cSSam Leffler * (in case we didn't go through SCAN state). 582545bbf62fSSam Leffler */ 58262dc7fcc4SSam Leffler if (ath_longcalinterval != 0) { 5827c42a7b7eSSam Leffler /* start periodic recalibration timer */ 58282dc7fcc4SSam Leffler callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 58292dc7fcc4SSam Leffler } else { 58302dc7fcc4SSam Leffler DPRINTF(sc, ATH_DEBUG_CALIBRATE, 58312dc7fcc4SSam Leffler "%s: calibration disabled\n", __func__); 5832c42a7b7eSSam Leffler } 5833b032f27cSSam Leffler taskqueue_unblock(sc->sc_tq); 5834b032f27cSSam Leffler } else if (nstate == IEEE80211_S_INIT) { 5835b032f27cSSam Leffler /* 5836b032f27cSSam Leffler * If there are no vaps left in RUN state then 5837b032f27cSSam Leffler * shutdown host/driver operation: 5838b032f27cSSam Leffler * o disable interrupts 5839b032f27cSSam Leffler * o disable the task queue thread 5840b032f27cSSam Leffler * o mark beacon processing as stopped 5841b032f27cSSam Leffler */ 5842b032f27cSSam Leffler if (!ath_isanyrunningvaps(vap)) { 5843b032f27cSSam Leffler sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5844b032f27cSSam Leffler /* disable interrupts */ 5845b032f27cSSam Leffler ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 5846b032f27cSSam Leffler taskqueue_block(sc->sc_tq); 5847b032f27cSSam Leffler sc->sc_beacons = 0; 5848b032f27cSSam Leffler } 5849584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 585010ad9a77SSam Leffler ath_hal_setcca(ah, AH_TRUE); 585110ad9a77SSam Leffler #endif 5852b032f27cSSam Leffler } 58535591b213SSam Leffler bad: 58545591b213SSam Leffler return error; 58555591b213SSam Leffler } 58565591b213SSam Leffler 58575591b213SSam Leffler /* 5858e8fd88a3SSam Leffler * Allocate a key cache slot to the station so we can 5859e8fd88a3SSam Leffler * setup a mapping from key index to node. The key cache 5860e8fd88a3SSam Leffler * slot is needed for managing antenna state and for 5861e8fd88a3SSam Leffler * compression when stations do not use crypto. We do 5862e8fd88a3SSam Leffler * it uniliaterally here; if crypto is employed this slot 5863e8fd88a3SSam Leffler * will be reassigned. 5864e8fd88a3SSam Leffler */ 5865e8fd88a3SSam Leffler static void 5866e8fd88a3SSam Leffler ath_setup_stationkey(struct ieee80211_node *ni) 5867e8fd88a3SSam Leffler { 5868b032f27cSSam Leffler struct ieee80211vap *vap = ni->ni_vap; 5869b032f27cSSam Leffler struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 5870c1225b52SSam Leffler ieee80211_keyix keyix, rxkeyix; 5871e8fd88a3SSam Leffler 5872b032f27cSSam Leffler if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 5873e8fd88a3SSam Leffler /* 5874e8fd88a3SSam Leffler * Key cache is full; we'll fall back to doing 5875e8fd88a3SSam Leffler * the more expensive lookup in software. Note 5876e8fd88a3SSam Leffler * this also means no h/w compression. 5877e8fd88a3SSam Leffler */ 5878e8fd88a3SSam Leffler /* XXX msg+statistic */ 5879e8fd88a3SSam Leffler } else { 5880c1225b52SSam Leffler /* XXX locking? */ 5881e8fd88a3SSam Leffler ni->ni_ucastkey.wk_keyix = keyix; 5882c1225b52SSam Leffler ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 588333052833SSam Leffler /* NB: must mark device key to get called back on delete */ 588433052833SSam Leffler ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; 5885d3ac945bSSam Leffler IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); 5886e8fd88a3SSam Leffler /* NB: this will create a pass-thru key entry */ 588755c7b877SAdrian Chadd ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss); 5888e8fd88a3SSam Leffler } 5889e8fd88a3SSam Leffler } 5890e8fd88a3SSam Leffler 5891e8fd88a3SSam Leffler /* 58925591b213SSam Leffler * Setup driver-specific state for a newly associated node. 58935591b213SSam Leffler * Note that we're called also on a re-associate, the isnew 58945591b213SSam Leffler * param tells us if this is the first time or not. 58955591b213SSam Leffler */ 58965591b213SSam Leffler static void 5897e9962332SSam Leffler ath_newassoc(struct ieee80211_node *ni, int isnew) 58985591b213SSam Leffler { 5899b032f27cSSam Leffler struct ath_node *an = ATH_NODE(ni); 5900b032f27cSSam Leffler struct ieee80211vap *vap = ni->ni_vap; 5901b032f27cSSam Leffler struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 5902c62362cbSSam Leffler const struct ieee80211_txparam *tp = ni->ni_txparms; 59035591b213SSam Leffler 5904ab06fdf2SSam Leffler an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); 5905ab06fdf2SSam Leffler an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); 5906b032f27cSSam Leffler 5907b032f27cSSam Leffler ath_rate_newassoc(sc, an, isnew); 5908e8fd88a3SSam Leffler if (isnew && 5909b032f27cSSam Leffler (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && 5910b032f27cSSam Leffler ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 5911e8fd88a3SSam Leffler ath_setup_stationkey(ni); 5912e8fd88a3SSam Leffler } 59135591b213SSam Leffler 59145591b213SSam Leffler static int 591559efa8b5SSam Leffler ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, 5916b032f27cSSam Leffler int nchans, struct ieee80211_channel chans[]) 5917b032f27cSSam Leffler { 5918b032f27cSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 5919b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 592059efa8b5SSam Leffler HAL_STATUS status; 5921b032f27cSSam Leffler 5922033022a9SSam Leffler DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 592359efa8b5SSam Leffler "%s: rd %u cc %u location %c%s\n", 592459efa8b5SSam Leffler __func__, reg->regdomain, reg->country, reg->location, 592559efa8b5SSam Leffler reg->ecm ? " ecm" : ""); 5926033022a9SSam Leffler 592759efa8b5SSam Leffler status = ath_hal_set_channels(ah, chans, nchans, 592859efa8b5SSam Leffler reg->country, reg->regdomain); 592959efa8b5SSam Leffler if (status != HAL_OK) { 593059efa8b5SSam Leffler DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", 593159efa8b5SSam Leffler __func__, status); 593259efa8b5SSam Leffler return EINVAL; /* XXX */ 5933b032f27cSSam Leffler } 59348db87e40SAdrian Chadd 5935b032f27cSSam Leffler return 0; 5936b032f27cSSam Leffler } 5937b032f27cSSam Leffler 5938b032f27cSSam Leffler static void 5939b032f27cSSam Leffler ath_getradiocaps(struct ieee80211com *ic, 59405fe9f044SSam Leffler int maxchans, int *nchans, struct ieee80211_channel chans[]) 5941b032f27cSSam Leffler { 5942b032f27cSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 5943b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 5944b032f27cSSam Leffler 594559efa8b5SSam Leffler DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", 594659efa8b5SSam Leffler __func__, SKU_DEBUG, CTRY_DEFAULT); 5947033022a9SSam Leffler 594859efa8b5SSam Leffler /* XXX check return */ 594959efa8b5SSam Leffler (void) ath_hal_getchannels(ah, chans, maxchans, nchans, 595059efa8b5SSam Leffler HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); 5951033022a9SSam Leffler 5952b032f27cSSam Leffler } 5953b032f27cSSam Leffler 5954b032f27cSSam Leffler static int 5955b032f27cSSam Leffler ath_getchannels(struct ath_softc *sc) 5956b032f27cSSam Leffler { 5957b032f27cSSam Leffler struct ifnet *ifp = sc->sc_ifp; 5958b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 5959b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 596059efa8b5SSam Leffler HAL_STATUS status; 5961b032f27cSSam Leffler 5962b032f27cSSam Leffler /* 596359efa8b5SSam Leffler * Collect channel set based on EEPROM contents. 5964b032f27cSSam Leffler */ 596559efa8b5SSam Leffler status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, 596659efa8b5SSam Leffler &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); 596759efa8b5SSam Leffler if (status != HAL_OK) { 596859efa8b5SSam Leffler if_printf(ifp, "%s: unable to collect channel list from hal, " 596959efa8b5SSam Leffler "status %d\n", __func__, status); 597059efa8b5SSam Leffler return EINVAL; 597159efa8b5SSam Leffler } 5972ca876918SSam Leffler (void) ath_hal_getregdomain(ah, &sc->sc_eerd); 5973ca876918SSam Leffler ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ 597459efa8b5SSam Leffler /* XXX map Atheros sku's to net80211 SKU's */ 597559efa8b5SSam Leffler /* XXX net80211 types too small */ 597659efa8b5SSam Leffler ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; 597759efa8b5SSam Leffler ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; 597859efa8b5SSam Leffler ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ 597959efa8b5SSam Leffler ic->ic_regdomain.isocc[1] = ' '; 598059efa8b5SSam Leffler 5981b032f27cSSam Leffler ic->ic_regdomain.ecm = 1; 5982b032f27cSSam Leffler ic->ic_regdomain.location = 'I'; 5983033022a9SSam Leffler 5984033022a9SSam Leffler DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 598559efa8b5SSam Leffler "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", 5986033022a9SSam Leffler __func__, sc->sc_eerd, sc->sc_eecc, 5987033022a9SSam Leffler ic->ic_regdomain.regdomain, ic->ic_regdomain.country, 598859efa8b5SSam Leffler ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); 59895591b213SSam Leffler return 0; 59905591b213SSam Leffler } 59915591b213SSam Leffler 59926c4612b9SSam Leffler static int 59936c4612b9SSam Leffler ath_rate_setup(struct ath_softc *sc, u_int mode) 59946c4612b9SSam Leffler { 59956c4612b9SSam Leffler struct ath_hal *ah = sc->sc_ah; 59966c4612b9SSam Leffler const HAL_RATE_TABLE *rt; 59976c4612b9SSam Leffler 59986c4612b9SSam Leffler switch (mode) { 59996c4612b9SSam Leffler case IEEE80211_MODE_11A: 60006c4612b9SSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11A); 60016c4612b9SSam Leffler break; 6002724c193aSSam Leffler case IEEE80211_MODE_HALF: 6003aaa70f2fSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 6004aaa70f2fSSam Leffler break; 6005724c193aSSam Leffler case IEEE80211_MODE_QUARTER: 6006aaa70f2fSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 6007aaa70f2fSSam Leffler break; 60086c4612b9SSam Leffler case IEEE80211_MODE_11B: 60096c4612b9SSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11B); 60106c4612b9SSam Leffler break; 60116c4612b9SSam Leffler case IEEE80211_MODE_11G: 60126c4612b9SSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11G); 60136c4612b9SSam Leffler break; 60146c4612b9SSam Leffler case IEEE80211_MODE_TURBO_A: 601568e8e04eSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_108A); 60166c4612b9SSam Leffler break; 60176c4612b9SSam Leffler case IEEE80211_MODE_TURBO_G: 60186c4612b9SSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_108G); 60196c4612b9SSam Leffler break; 602068e8e04eSSam Leffler case IEEE80211_MODE_STURBO_A: 602168e8e04eSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 602268e8e04eSSam Leffler break; 602368e8e04eSSam Leffler case IEEE80211_MODE_11NA: 602468e8e04eSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); 602568e8e04eSSam Leffler break; 602668e8e04eSSam Leffler case IEEE80211_MODE_11NG: 602768e8e04eSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); 602868e8e04eSSam Leffler break; 60296c4612b9SSam Leffler default: 60306c4612b9SSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 60316c4612b9SSam Leffler __func__, mode); 60326c4612b9SSam Leffler return 0; 60336c4612b9SSam Leffler } 60346c4612b9SSam Leffler sc->sc_rates[mode] = rt; 6035aaa70f2fSSam Leffler return (rt != NULL); 60365591b213SSam Leffler } 60375591b213SSam Leffler 60385591b213SSam Leffler static void 60395591b213SSam Leffler ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 60405591b213SSam Leffler { 60413e50ec2cSSam Leffler #define N(a) (sizeof(a)/sizeof(a[0])) 60423e50ec2cSSam Leffler /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 60433e50ec2cSSam Leffler static const struct { 60443e50ec2cSSam Leffler u_int rate; /* tx/rx 802.11 rate */ 60453e50ec2cSSam Leffler u_int16_t timeOn; /* LED on time (ms) */ 60463e50ec2cSSam Leffler u_int16_t timeOff; /* LED off time (ms) */ 60473e50ec2cSSam Leffler } blinkrates[] = { 60483e50ec2cSSam Leffler { 108, 40, 10 }, 60493e50ec2cSSam Leffler { 96, 44, 11 }, 60503e50ec2cSSam Leffler { 72, 50, 13 }, 60513e50ec2cSSam Leffler { 48, 57, 14 }, 60523e50ec2cSSam Leffler { 36, 67, 16 }, 60533e50ec2cSSam Leffler { 24, 80, 20 }, 60543e50ec2cSSam Leffler { 22, 100, 25 }, 60553e50ec2cSSam Leffler { 18, 133, 34 }, 60563e50ec2cSSam Leffler { 12, 160, 40 }, 60573e50ec2cSSam Leffler { 10, 200, 50 }, 60583e50ec2cSSam Leffler { 6, 240, 58 }, 60593e50ec2cSSam Leffler { 4, 267, 66 }, 60603e50ec2cSSam Leffler { 2, 400, 100 }, 60613e50ec2cSSam Leffler { 0, 500, 130 }, 6062724c193aSSam Leffler /* XXX half/quarter rates */ 60633e50ec2cSSam Leffler }; 60645591b213SSam Leffler const HAL_RATE_TABLE *rt; 60653e50ec2cSSam Leffler int i, j; 60665591b213SSam Leffler 60675591b213SSam Leffler memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 60685591b213SSam Leffler rt = sc->sc_rates[mode]; 60695591b213SSam Leffler KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 6070180f268dSSam Leffler for (i = 0; i < rt->rateCount; i++) { 6071180f268dSSam Leffler uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 6072180f268dSSam Leffler if (rt->info[i].phy != IEEE80211_T_HT) 6073180f268dSSam Leffler sc->sc_rixmap[ieeerate] = i; 6074180f268dSSam Leffler else 6075180f268dSSam Leffler sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; 6076180f268dSSam Leffler } 60771b1a8e41SSam Leffler memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 607846d4d74cSSam Leffler for (i = 0; i < N(sc->sc_hwmap); i++) { 607946d4d74cSSam Leffler if (i >= rt->rateCount) { 60803e50ec2cSSam Leffler sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 60813e50ec2cSSam Leffler sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 608216b4851aSSam Leffler continue; 60833e50ec2cSSam Leffler } 60843e50ec2cSSam Leffler sc->sc_hwmap[i].ieeerate = 608546d4d74cSSam Leffler rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 608646d4d74cSSam Leffler if (rt->info[i].phy == IEEE80211_T_HT) 608726041a14SSam Leffler sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; 6088d3be6f5bSSam Leffler sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 608946d4d74cSSam Leffler if (rt->info[i].shortPreamble || 609046d4d74cSSam Leffler rt->info[i].phy == IEEE80211_T_OFDM) 6091d3be6f5bSSam Leffler sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 60925463c4a4SSam Leffler sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; 60933e50ec2cSSam Leffler for (j = 0; j < N(blinkrates)-1; j++) 60943e50ec2cSSam Leffler if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 60953e50ec2cSSam Leffler break; 60963e50ec2cSSam Leffler /* NB: this uses the last entry if the rate isn't found */ 60973e50ec2cSSam Leffler /* XXX beware of overlow */ 60983e50ec2cSSam Leffler sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 60993e50ec2cSSam Leffler sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 6100c42a7b7eSSam Leffler } 61015591b213SSam Leffler sc->sc_currates = rt; 61025591b213SSam Leffler sc->sc_curmode = mode; 61035591b213SSam Leffler /* 6104c42a7b7eSSam Leffler * All protection frames are transmited at 2Mb/s for 6105c42a7b7eSSam Leffler * 11g, otherwise at 1Mb/s. 61065591b213SSam Leffler */ 6107913a1ba1SSam Leffler if (mode == IEEE80211_MODE_11G) 6108ab06fdf2SSam Leffler sc->sc_protrix = ath_tx_findrix(sc, 2*2); 6109913a1ba1SSam Leffler else 6110ab06fdf2SSam Leffler sc->sc_protrix = ath_tx_findrix(sc, 2*1); 61114fa8d4efSDaniel Eischen /* NB: caller is responsible for resetting rate control state */ 61123e50ec2cSSam Leffler #undef N 61135591b213SSam Leffler } 61145591b213SSam Leffler 6115c42a7b7eSSam Leffler static void 61162e986da5SSam Leffler ath_watchdog(void *arg) 6117c42a7b7eSSam Leffler { 61182e986da5SSam Leffler struct ath_softc *sc = arg; 6119ef27340cSAdrian Chadd int do_reset = 0; 6120c42a7b7eSSam Leffler 61212e986da5SSam Leffler if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { 61222e986da5SSam Leffler struct ifnet *ifp = sc->sc_ifp; 6123459bc4f0SSam Leffler uint32_t hangs; 6124459bc4f0SSam Leffler 6125459bc4f0SSam Leffler if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && 6126459bc4f0SSam Leffler hangs != 0) { 6127459bc4f0SSam Leffler if_printf(ifp, "%s hang detected (0x%x)\n", 6128459bc4f0SSam Leffler hangs & 0xff ? "bb" : "mac", hangs); 6129459bc4f0SSam Leffler } else 6130c42a7b7eSSam Leffler if_printf(ifp, "device timeout\n"); 6131ef27340cSAdrian Chadd do_reset = 1; 6132c42a7b7eSSam Leffler ifp->if_oerrors++; 6133c42a7b7eSSam Leffler sc->sc_stats.ast_watchdog++; 6134c42a7b7eSSam Leffler } 6135ef27340cSAdrian Chadd 6136ef27340cSAdrian Chadd /* 6137ef27340cSAdrian Chadd * We can't hold the lock across the ath_reset() call. 6138ef27340cSAdrian Chadd */ 6139ef27340cSAdrian Chadd if (do_reset) { 6140ef27340cSAdrian Chadd ATH_UNLOCK(sc); 6141ef27340cSAdrian Chadd ath_reset(sc->sc_ifp, ATH_RESET_NOLOSS); 6142ef27340cSAdrian Chadd ATH_LOCK(sc); 6143ef27340cSAdrian Chadd } 6144ef27340cSAdrian Chadd 61452e986da5SSam Leffler callout_schedule(&sc->sc_wd_ch, hz); 6146c42a7b7eSSam Leffler } 6147c42a7b7eSSam Leffler 6148a585a9a1SSam Leffler #ifdef ATH_DIAGAPI 6149c42a7b7eSSam Leffler /* 6150c42a7b7eSSam Leffler * Diagnostic interface to the HAL. This is used by various 6151c42a7b7eSSam Leffler * tools to do things like retrieve register contents for 6152c42a7b7eSSam Leffler * debugging. The mechanism is intentionally opaque so that 6153c42a7b7eSSam Leffler * it can change frequently w/o concern for compatiblity. 6154c42a7b7eSSam Leffler */ 6155c42a7b7eSSam Leffler static int 6156c42a7b7eSSam Leffler ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 6157c42a7b7eSSam Leffler { 6158c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 6159c42a7b7eSSam Leffler u_int id = ad->ad_id & ATH_DIAG_ID; 6160c42a7b7eSSam Leffler void *indata = NULL; 6161c42a7b7eSSam Leffler void *outdata = NULL; 6162c42a7b7eSSam Leffler u_int32_t insize = ad->ad_in_size; 6163c42a7b7eSSam Leffler u_int32_t outsize = ad->ad_out_size; 6164c42a7b7eSSam Leffler int error = 0; 6165c42a7b7eSSam Leffler 6166c42a7b7eSSam Leffler if (ad->ad_id & ATH_DIAG_IN) { 6167c42a7b7eSSam Leffler /* 6168c42a7b7eSSam Leffler * Copy in data. 6169c42a7b7eSSam Leffler */ 6170c42a7b7eSSam Leffler indata = malloc(insize, M_TEMP, M_NOWAIT); 6171c42a7b7eSSam Leffler if (indata == NULL) { 6172c42a7b7eSSam Leffler error = ENOMEM; 6173c42a7b7eSSam Leffler goto bad; 6174c42a7b7eSSam Leffler } 6175c42a7b7eSSam Leffler error = copyin(ad->ad_in_data, indata, insize); 6176c42a7b7eSSam Leffler if (error) 6177c42a7b7eSSam Leffler goto bad; 6178c42a7b7eSSam Leffler } 6179c42a7b7eSSam Leffler if (ad->ad_id & ATH_DIAG_DYN) { 6180c42a7b7eSSam Leffler /* 6181c42a7b7eSSam Leffler * Allocate a buffer for the results (otherwise the HAL 6182c42a7b7eSSam Leffler * returns a pointer to a buffer where we can read the 6183c42a7b7eSSam Leffler * results). Note that we depend on the HAL leaving this 6184c42a7b7eSSam Leffler * pointer for us to use below in reclaiming the buffer; 6185c42a7b7eSSam Leffler * may want to be more defensive. 6186c42a7b7eSSam Leffler */ 6187c42a7b7eSSam Leffler outdata = malloc(outsize, M_TEMP, M_NOWAIT); 6188c42a7b7eSSam Leffler if (outdata == NULL) { 6189c42a7b7eSSam Leffler error = ENOMEM; 6190c42a7b7eSSam Leffler goto bad; 6191c42a7b7eSSam Leffler } 6192c42a7b7eSSam Leffler } 6193c42a7b7eSSam Leffler if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 6194c42a7b7eSSam Leffler if (outsize < ad->ad_out_size) 6195c42a7b7eSSam Leffler ad->ad_out_size = outsize; 6196c42a7b7eSSam Leffler if (outdata != NULL) 6197c42a7b7eSSam Leffler error = copyout(outdata, ad->ad_out_data, 6198c42a7b7eSSam Leffler ad->ad_out_size); 6199c42a7b7eSSam Leffler } else { 6200c42a7b7eSSam Leffler error = EINVAL; 6201c42a7b7eSSam Leffler } 6202c42a7b7eSSam Leffler bad: 6203c42a7b7eSSam Leffler if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 6204c42a7b7eSSam Leffler free(indata, M_TEMP); 6205c42a7b7eSSam Leffler if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 6206c42a7b7eSSam Leffler free(outdata, M_TEMP); 6207c42a7b7eSSam Leffler return error; 6208c42a7b7eSSam Leffler } 6209a585a9a1SSam Leffler #endif /* ATH_DIAGAPI */ 6210c42a7b7eSSam Leffler 6211c42a7b7eSSam Leffler static int 6212c42a7b7eSSam Leffler ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 6213c42a7b7eSSam Leffler { 6214c42a7b7eSSam Leffler #define IS_RUNNING(ifp) \ 621513f4c340SRobert Watson ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 6216c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 6217b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 6218c42a7b7eSSam Leffler struct ifreq *ifr = (struct ifreq *)data; 621984784be1SSam Leffler const HAL_RATE_TABLE *rt; 6220c42a7b7eSSam Leffler int error = 0; 6221c42a7b7eSSam Leffler 6222c42a7b7eSSam Leffler switch (cmd) { 6223c42a7b7eSSam Leffler case SIOCSIFFLAGS: 622431a8c1edSAndrew Thompson ATH_LOCK(sc); 6225c42a7b7eSSam Leffler if (IS_RUNNING(ifp)) { 6226c42a7b7eSSam Leffler /* 6227c42a7b7eSSam Leffler * To avoid rescanning another access point, 6228c42a7b7eSSam Leffler * do not call ath_init() here. Instead, 6229c42a7b7eSSam Leffler * only reflect promisc mode settings. 6230c42a7b7eSSam Leffler */ 6231c42a7b7eSSam Leffler ath_mode_init(sc); 6232c42a7b7eSSam Leffler } else if (ifp->if_flags & IFF_UP) { 6233c42a7b7eSSam Leffler /* 6234c42a7b7eSSam Leffler * Beware of being called during attach/detach 6235c42a7b7eSSam Leffler * to reset promiscuous mode. In that case we 6236c42a7b7eSSam Leffler * will still be marked UP but not RUNNING. 6237c42a7b7eSSam Leffler * However trying to re-init the interface 6238c42a7b7eSSam Leffler * is the wrong thing to do as we've already 6239c42a7b7eSSam Leffler * torn down much of our state. There's 6240c42a7b7eSSam Leffler * probably a better way to deal with this. 6241c42a7b7eSSam Leffler */ 6242b032f27cSSam Leffler if (!sc->sc_invalid) 6243fc74a9f9SBrooks Davis ath_init(sc); /* XXX lose error */ 6244d3ac945bSSam Leffler } else { 6245c42a7b7eSSam Leffler ath_stop_locked(ifp); 6246d3ac945bSSam Leffler #ifdef notyet 6247d3ac945bSSam Leffler /* XXX must wakeup in places like ath_vap_delete */ 6248d3ac945bSSam Leffler if (!sc->sc_invalid) 6249d3ac945bSSam Leffler ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP); 6250d3ac945bSSam Leffler #endif 6251d3ac945bSSam Leffler } 625231a8c1edSAndrew Thompson ATH_UNLOCK(sc); 6253c42a7b7eSSam Leffler break; 6254b032f27cSSam Leffler case SIOCGIFMEDIA: 6255b032f27cSSam Leffler case SIOCSIFMEDIA: 6256b032f27cSSam Leffler error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 6257b032f27cSSam Leffler break; 6258c42a7b7eSSam Leffler case SIOCGATHSTATS: 6259c42a7b7eSSam Leffler /* NB: embed these numbers to get a consistent view */ 6260c42a7b7eSSam Leffler sc->sc_stats.ast_tx_packets = ifp->if_opackets; 6261c42a7b7eSSam Leffler sc->sc_stats.ast_rx_packets = ifp->if_ipackets; 626284784be1SSam Leffler sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi); 626384784be1SSam Leffler sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi); 6264584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 626510ad9a77SSam Leffler sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap); 626610ad9a77SSam Leffler sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam); 626710ad9a77SSam Leffler #endif 626884784be1SSam Leffler rt = sc->sc_currates; 626946d4d74cSSam Leffler sc->sc_stats.ast_tx_rate = 627046d4d74cSSam Leffler rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC; 62716aa113fdSAdrian Chadd if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT) 62726aa113fdSAdrian Chadd sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS; 6273c42a7b7eSSam Leffler return copyout(&sc->sc_stats, 6274c42a7b7eSSam Leffler ifr->ifr_data, sizeof (sc->sc_stats)); 62753fc21fedSSam Leffler case SIOCZATHSTATS: 62763fc21fedSSam Leffler error = priv_check(curthread, PRIV_DRIVER); 62773fc21fedSSam Leffler if (error == 0) 62783fc21fedSSam Leffler memset(&sc->sc_stats, 0, sizeof(sc->sc_stats)); 62793fc21fedSSam Leffler break; 6280a585a9a1SSam Leffler #ifdef ATH_DIAGAPI 6281c42a7b7eSSam Leffler case SIOCGATHDIAG: 6282c42a7b7eSSam Leffler error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 6283c42a7b7eSSam Leffler break; 6284f51c84eaSAdrian Chadd case SIOCGATHPHYERR: 6285f51c84eaSAdrian Chadd error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr); 6286f51c84eaSAdrian Chadd break; 6287a585a9a1SSam Leffler #endif 628831a8c1edSAndrew Thompson case SIOCGIFADDR: 6289b032f27cSSam Leffler error = ether_ioctl(ifp, cmd, data); 6290c42a7b7eSSam Leffler break; 629131a8c1edSAndrew Thompson default: 629231a8c1edSAndrew Thompson error = EINVAL; 629331a8c1edSAndrew Thompson break; 6294c42a7b7eSSam Leffler } 6295c42a7b7eSSam Leffler return error; 6296a614e076SSam Leffler #undef IS_RUNNING 6297c42a7b7eSSam Leffler } 6298c42a7b7eSSam Leffler 6299c42a7b7eSSam Leffler /* 6300c42a7b7eSSam Leffler * Announce various information on device/driver attach. 6301c42a7b7eSSam Leffler */ 6302c42a7b7eSSam Leffler static void 6303c42a7b7eSSam Leffler ath_announce(struct ath_softc *sc) 6304c42a7b7eSSam Leffler { 6305fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 6306c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 6307c42a7b7eSSam Leffler 6308498657cfSSam Leffler if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n", 6309498657cfSSam Leffler ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, 6310498657cfSSam Leffler ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 631146a924c4SAdrian Chadd if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n", 631246a924c4SAdrian Chadd ah->ah_analog2GhzRev, ah->ah_analog5GhzRev); 6313c42a7b7eSSam Leffler if (bootverbose) { 6314c42a7b7eSSam Leffler int i; 6315c42a7b7eSSam Leffler for (i = 0; i <= WME_AC_VO; i++) { 6316c42a7b7eSSam Leffler struct ath_txq *txq = sc->sc_ac2q[i]; 6317c42a7b7eSSam Leffler if_printf(ifp, "Use hw queue %u for %s traffic\n", 6318c42a7b7eSSam Leffler txq->axq_qnum, ieee80211_wme_acnames[i]); 6319c42a7b7eSSam Leffler } 6320c42a7b7eSSam Leffler if_printf(ifp, "Use hw queue %u for CAB traffic\n", 6321c42a7b7eSSam Leffler sc->sc_cabq->axq_qnum); 6322c42a7b7eSSam Leffler if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 6323c42a7b7eSSam Leffler } 6324e2d787faSSam Leffler if (ath_rxbuf != ATH_RXBUF) 6325e2d787faSSam Leffler if_printf(ifp, "using %u rx buffers\n", ath_rxbuf); 6326e2d787faSSam Leffler if (ath_txbuf != ATH_TXBUF) 6327e2d787faSSam Leffler if_printf(ifp, "using %u tx buffers\n", ath_txbuf); 63289ac01d39SRui Paulo if (sc->sc_mcastkey && bootverbose) 63299ac01d39SRui Paulo if_printf(ifp, "using multicast key search\n"); 6330c42a7b7eSSam Leffler } 633110ad9a77SSam Leffler 6332584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 633310ad9a77SSam Leffler static void 633410ad9a77SSam Leffler ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, u_int32_t bintval) 633510ad9a77SSam Leffler { 633610ad9a77SSam Leffler struct ath_hal *ah = sc->sc_ah; 633710ad9a77SSam Leffler HAL_BEACON_TIMERS bt; 633810ad9a77SSam Leffler 633910ad9a77SSam Leffler bt.bt_intval = bintval | HAL_BEACON_ENA; 634010ad9a77SSam Leffler bt.bt_nexttbtt = nexttbtt; 634110ad9a77SSam Leffler bt.bt_nextdba = (nexttbtt<<3) - sc->sc_tdmadbaprep; 634210ad9a77SSam Leffler bt.bt_nextswba = (nexttbtt<<3) - sc->sc_tdmaswbaprep; 634310ad9a77SSam Leffler bt.bt_nextatim = nexttbtt+1; 6344f3fb1687SAdrian Chadd /* Enables TBTT, DBA, SWBA timers by default */ 6345f3fb1687SAdrian Chadd bt.bt_flags = 0; 634610ad9a77SSam Leffler ath_hal_beaconsettimers(ah, &bt); 634710ad9a77SSam Leffler } 634810ad9a77SSam Leffler 634910ad9a77SSam Leffler /* 635010ad9a77SSam Leffler * Calculate the beacon interval. This is periodic in the 635110ad9a77SSam Leffler * superframe for the bss. We assume each station is configured 635210ad9a77SSam Leffler * identically wrt transmit rate so the guard time we calculate 635310ad9a77SSam Leffler * above will be the same on all stations. Note we need to 635410ad9a77SSam Leffler * factor in the xmit time because the hardware will schedule 635510ad9a77SSam Leffler * a frame for transmit if the start of the frame is within 635610ad9a77SSam Leffler * the burst time. When we get hardware that properly kills 635710ad9a77SSam Leffler * frames in the PCU we can reduce/eliminate the guard time. 635810ad9a77SSam Leffler * 635910ad9a77SSam Leffler * Roundup to 1024 is so we have 1 TU buffer in the guard time 636010ad9a77SSam Leffler * to deal with the granularity of the nexttbtt timer. 11n MAC's 636110ad9a77SSam Leffler * with 1us timer granularity should allow us to reduce/eliminate 636210ad9a77SSam Leffler * this. 636310ad9a77SSam Leffler */ 636410ad9a77SSam Leffler static void 636510ad9a77SSam Leffler ath_tdma_bintvalsetup(struct ath_softc *sc, 636610ad9a77SSam Leffler const struct ieee80211_tdma_state *tdma) 636710ad9a77SSam Leffler { 636810ad9a77SSam Leffler /* copy from vap state (XXX check all vaps have same value?) */ 636910ad9a77SSam Leffler sc->sc_tdmaslotlen = tdma->tdma_slotlen; 637010ad9a77SSam Leffler 637110ad9a77SSam Leffler sc->sc_tdmabintval = roundup((sc->sc_tdmaslotlen+sc->sc_tdmaguard) * 637210ad9a77SSam Leffler tdma->tdma_slotcnt, 1024); 637310ad9a77SSam Leffler sc->sc_tdmabintval >>= 10; /* TSF -> TU */ 637410ad9a77SSam Leffler if (sc->sc_tdmabintval & 1) 637510ad9a77SSam Leffler sc->sc_tdmabintval++; 637610ad9a77SSam Leffler 637710ad9a77SSam Leffler if (tdma->tdma_slot == 0) { 637810ad9a77SSam Leffler /* 637910ad9a77SSam Leffler * Only slot 0 beacons; other slots respond. 638010ad9a77SSam Leffler */ 638110ad9a77SSam Leffler sc->sc_imask |= HAL_INT_SWBA; 638210ad9a77SSam Leffler sc->sc_tdmaswba = 0; /* beacon immediately */ 638310ad9a77SSam Leffler } else { 638410ad9a77SSam Leffler /* XXX all vaps must be slot 0 or slot !0 */ 638510ad9a77SSam Leffler sc->sc_imask &= ~HAL_INT_SWBA; 638610ad9a77SSam Leffler } 638710ad9a77SSam Leffler } 638810ad9a77SSam Leffler 638910ad9a77SSam Leffler /* 639010ad9a77SSam Leffler * Max 802.11 overhead. This assumes no 4-address frames and 639110ad9a77SSam Leffler * the encapsulation done by ieee80211_encap (llc). We also 639210ad9a77SSam Leffler * include potential crypto overhead. 639310ad9a77SSam Leffler */ 639410ad9a77SSam Leffler #define IEEE80211_MAXOVERHEAD \ 639510ad9a77SSam Leffler (sizeof(struct ieee80211_qosframe) \ 639610ad9a77SSam Leffler + sizeof(struct llc) \ 639710ad9a77SSam Leffler + IEEE80211_ADDR_LEN \ 639810ad9a77SSam Leffler + IEEE80211_WEP_IVLEN \ 639910ad9a77SSam Leffler + IEEE80211_WEP_KIDLEN \ 640010ad9a77SSam Leffler + IEEE80211_WEP_CRCLEN \ 640110ad9a77SSam Leffler + IEEE80211_WEP_MICLEN \ 640210ad9a77SSam Leffler + IEEE80211_CRC_LEN) 640310ad9a77SSam Leffler 640410ad9a77SSam Leffler /* 640510ad9a77SSam Leffler * Setup initially for tdma operation. Start the beacon 640610ad9a77SSam Leffler * timers and enable SWBA if we are slot 0. Otherwise 640710ad9a77SSam Leffler * we wait for slot 0 to arrive so we can sync up before 640810ad9a77SSam Leffler * starting to transmit. 640910ad9a77SSam Leffler */ 641010ad9a77SSam Leffler static void 641110ad9a77SSam Leffler ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap) 641210ad9a77SSam Leffler { 641310ad9a77SSam Leffler struct ath_hal *ah = sc->sc_ah; 641410ad9a77SSam Leffler struct ifnet *ifp = sc->sc_ifp; 641510ad9a77SSam Leffler struct ieee80211com *ic = ifp->if_l2com; 641610ad9a77SSam Leffler const struct ieee80211_txparam *tp; 641710ad9a77SSam Leffler const struct ieee80211_tdma_state *tdma = NULL; 641810ad9a77SSam Leffler int rix; 641910ad9a77SSam Leffler 642010ad9a77SSam Leffler if (vap == NULL) { 642110ad9a77SSam Leffler vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */ 642210ad9a77SSam Leffler if (vap == NULL) { 642310ad9a77SSam Leffler if_printf(ifp, "%s: no vaps?\n", __func__); 642410ad9a77SSam Leffler return; 642510ad9a77SSam Leffler } 642610ad9a77SSam Leffler } 642710ad9a77SSam Leffler tp = vap->iv_bss->ni_txparms; 642810ad9a77SSam Leffler /* 642910ad9a77SSam Leffler * Calculate the guard time for each slot. This is the 643010ad9a77SSam Leffler * time to send a maximal-size frame according to the 643110ad9a77SSam Leffler * fixed/lowest transmit rate. Note that the interface 643210ad9a77SSam Leffler * mtu does not include the 802.11 overhead so we must 643310ad9a77SSam Leffler * tack that on (ath_hal_computetxtime includes the 643410ad9a77SSam Leffler * preamble and plcp in it's calculation). 643510ad9a77SSam Leffler */ 643610ad9a77SSam Leffler tdma = vap->iv_tdma; 643710ad9a77SSam Leffler if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 6438ab06fdf2SSam Leffler rix = ath_tx_findrix(sc, tp->ucastrate); 643910ad9a77SSam Leffler else 6440ab06fdf2SSam Leffler rix = ath_tx_findrix(sc, tp->mcastrate); 644110ad9a77SSam Leffler /* XXX short preamble assumed */ 644210ad9a77SSam Leffler sc->sc_tdmaguard = ath_hal_computetxtime(ah, sc->sc_currates, 644310ad9a77SSam Leffler ifp->if_mtu + IEEE80211_MAXOVERHEAD, rix, AH_TRUE); 644410ad9a77SSam Leffler 644510ad9a77SSam Leffler ath_hal_intrset(ah, 0); 644610ad9a77SSam Leffler 644710ad9a77SSam Leffler ath_beaconq_config(sc); /* setup h/w beacon q */ 64489c859a04SSam Leffler if (sc->sc_setcca) 644910ad9a77SSam Leffler ath_hal_setcca(ah, AH_FALSE); /* disable CCA */ 645010ad9a77SSam Leffler ath_tdma_bintvalsetup(sc, tdma); /* calculate beacon interval */ 645110ad9a77SSam Leffler ath_tdma_settimers(sc, sc->sc_tdmabintval, 645210ad9a77SSam Leffler sc->sc_tdmabintval | HAL_BEACON_RESET_TSF); 645310ad9a77SSam Leffler sc->sc_syncbeacon = 0; 645410ad9a77SSam Leffler 645510ad9a77SSam Leffler sc->sc_avgtsfdeltap = TDMA_DUMMY_MARKER; 645610ad9a77SSam Leffler sc->sc_avgtsfdeltam = TDMA_DUMMY_MARKER; 645710ad9a77SSam Leffler 645810ad9a77SSam Leffler ath_hal_intrset(ah, sc->sc_imask); 645910ad9a77SSam Leffler 646010ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_TDMA, "%s: slot %u len %uus cnt %u " 646110ad9a77SSam Leffler "bsched %u guard %uus bintval %u TU dba prep %u\n", __func__, 646210ad9a77SSam Leffler tdma->tdma_slot, tdma->tdma_slotlen, tdma->tdma_slotcnt, 646310ad9a77SSam Leffler tdma->tdma_bintval, sc->sc_tdmaguard, sc->sc_tdmabintval, 646410ad9a77SSam Leffler sc->sc_tdmadbaprep); 646510ad9a77SSam Leffler } 646610ad9a77SSam Leffler 646710ad9a77SSam Leffler /* 646810ad9a77SSam Leffler * Update tdma operation. Called from the 802.11 layer 646910ad9a77SSam Leffler * when a beacon is received from the TDMA station operating 647010ad9a77SSam Leffler * in the slot immediately preceding us in the bss. Use 647110ad9a77SSam Leffler * the rx timestamp for the beacon frame to update our 647210ad9a77SSam Leffler * beacon timers so we follow their schedule. Note that 647310ad9a77SSam Leffler * by using the rx timestamp we implicitly include the 647410ad9a77SSam Leffler * propagation delay in our schedule. 647510ad9a77SSam Leffler */ 647610ad9a77SSam Leffler static void 647710ad9a77SSam Leffler ath_tdma_update(struct ieee80211_node *ni, 64782bc3ce77SSam Leffler const struct ieee80211_tdma_param *tdma, int changed) 647910ad9a77SSam Leffler { 648010ad9a77SSam Leffler #define TSF_TO_TU(_h,_l) \ 648110ad9a77SSam Leffler ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) 648210ad9a77SSam Leffler #define TU_TO_TSF(_tu) (((u_int64_t)(_tu)) << 10) 648310ad9a77SSam Leffler struct ieee80211vap *vap = ni->ni_vap; 648410ad9a77SSam Leffler struct ieee80211com *ic = ni->ni_ic; 648510ad9a77SSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 648610ad9a77SSam Leffler struct ath_hal *ah = sc->sc_ah; 648710ad9a77SSam Leffler const HAL_RATE_TABLE *rt = sc->sc_currates; 6488fc4de9b7SAdrian Chadd u_int64_t tsf, rstamp, nextslot, nexttbtt; 6489fc4de9b7SAdrian Chadd u_int32_t txtime, nextslottu; 649010ad9a77SSam Leffler int32_t tudelta, tsfdelta; 649110ad9a77SSam Leffler const struct ath_rx_status *rs; 649210ad9a77SSam Leffler int rix; 649310ad9a77SSam Leffler 649410ad9a77SSam Leffler sc->sc_stats.ast_tdma_update++; 649510ad9a77SSam Leffler 649610ad9a77SSam Leffler /* 649710ad9a77SSam Leffler * Check for and adopt configuration changes. 649810ad9a77SSam Leffler */ 64992bc3ce77SSam Leffler if (changed != 0) { 650010ad9a77SSam Leffler const struct ieee80211_tdma_state *ts = vap->iv_tdma; 650110ad9a77SSam Leffler 650210ad9a77SSam Leffler ath_tdma_bintvalsetup(sc, ts); 6503040972a1SSam Leffler if (changed & TDMA_UPDATE_SLOTLEN) 6504040972a1SSam Leffler ath_wme_update(ic); 650510ad9a77SSam Leffler 650610ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_TDMA, 650710ad9a77SSam Leffler "%s: adopt slot %u slotcnt %u slotlen %u us " 650810ad9a77SSam Leffler "bintval %u TU\n", __func__, 650910ad9a77SSam Leffler ts->tdma_slot, ts->tdma_slotcnt, ts->tdma_slotlen, 651010ad9a77SSam Leffler sc->sc_tdmabintval); 651110ad9a77SSam Leffler 651210ad9a77SSam Leffler /* XXX right? */ 651310ad9a77SSam Leffler ath_hal_intrset(ah, sc->sc_imask); 651410ad9a77SSam Leffler /* NB: beacon timers programmed below */ 651510ad9a77SSam Leffler } 651610ad9a77SSam Leffler 651710ad9a77SSam Leffler /* extend rx timestamp to 64 bits */ 65185463c4a4SSam Leffler rs = sc->sc_lastrs; 651910ad9a77SSam Leffler tsf = ath_hal_gettsf64(ah); 6520fc4de9b7SAdrian Chadd rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf); 652110ad9a77SSam Leffler /* 652210ad9a77SSam Leffler * The rx timestamp is set by the hardware on completing 652310ad9a77SSam Leffler * reception (at the point where the rx descriptor is DMA'd 652410ad9a77SSam Leffler * to the host). To find the start of our next slot we 652510ad9a77SSam Leffler * must adjust this time by the time required to send 652610ad9a77SSam Leffler * the packet just received. 652710ad9a77SSam Leffler */ 652810ad9a77SSam Leffler rix = rt->rateCodeToIndex[rs->rs_rate]; 652910ad9a77SSam Leffler txtime = ath_hal_computetxtime(ah, rt, rs->rs_datalen, rix, 653010ad9a77SSam Leffler rt->info[rix].shortPreamble); 653110ad9a77SSam Leffler /* NB: << 9 is to cvt to TU and /2 */ 653210ad9a77SSam Leffler nextslot = (rstamp - txtime) + (sc->sc_tdmabintval << 9); 653310ad9a77SSam Leffler nextslottu = TSF_TO_TU(nextslot>>32, nextslot) & HAL_BEACON_PERIOD; 653410ad9a77SSam Leffler 653510ad9a77SSam Leffler /* 6536fc4de9b7SAdrian Chadd * Retrieve the hardware NextTBTT in usecs 6537fc4de9b7SAdrian Chadd * and calculate the difference between what the 653810ad9a77SSam Leffler * other station thinks and what we have programmed. This 653910ad9a77SSam Leffler * lets us figure how to adjust our timers to match. The 654010ad9a77SSam Leffler * adjustments are done by pulling the TSF forward and possibly 654110ad9a77SSam Leffler * rewriting the beacon timers. 654210ad9a77SSam Leffler */ 6543fc4de9b7SAdrian Chadd nexttbtt = ath_hal_getnexttbtt(ah); 6544fc4de9b7SAdrian Chadd tsfdelta = (int32_t)((nextslot % TU_TO_TSF(HAL_BEACON_PERIOD + 1)) - nexttbtt); 654510ad9a77SSam Leffler 654610ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_TDMA_TIMER, 654710ad9a77SSam Leffler "tsfdelta %d avg +%d/-%d\n", tsfdelta, 654810ad9a77SSam Leffler TDMA_AVG(sc->sc_avgtsfdeltap), TDMA_AVG(sc->sc_avgtsfdeltam)); 654910ad9a77SSam Leffler 655010ad9a77SSam Leffler if (tsfdelta < 0) { 655110ad9a77SSam Leffler TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0); 655210ad9a77SSam Leffler TDMA_SAMPLE(sc->sc_avgtsfdeltam, -tsfdelta); 655310ad9a77SSam Leffler tsfdelta = -tsfdelta % 1024; 655410ad9a77SSam Leffler nextslottu++; 655510ad9a77SSam Leffler } else if (tsfdelta > 0) { 655610ad9a77SSam Leffler TDMA_SAMPLE(sc->sc_avgtsfdeltap, tsfdelta); 655710ad9a77SSam Leffler TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0); 655810ad9a77SSam Leffler tsfdelta = 1024 - (tsfdelta % 1024); 655910ad9a77SSam Leffler nextslottu++; 656010ad9a77SSam Leffler } else { 656110ad9a77SSam Leffler TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0); 656210ad9a77SSam Leffler TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0); 656310ad9a77SSam Leffler } 6564fc4de9b7SAdrian Chadd tudelta = nextslottu - TSF_TO_TU(nexttbtt >> 32, nexttbtt); 656510ad9a77SSam Leffler 656610ad9a77SSam Leffler /* 656710ad9a77SSam Leffler * Copy sender's timetstamp into tdma ie so they can 656810ad9a77SSam Leffler * calculate roundtrip time. We submit a beacon frame 656910ad9a77SSam Leffler * below after any timer adjustment. The frame goes out 657010ad9a77SSam Leffler * at the next TBTT so the sender can calculate the 657110ad9a77SSam Leffler * roundtrip by inspecting the tdma ie in our beacon frame. 657210ad9a77SSam Leffler * 657310ad9a77SSam Leffler * NB: This tstamp is subtlely preserved when 657410ad9a77SSam Leffler * IEEE80211_BEACON_TDMA is marked (e.g. when the 657510ad9a77SSam Leffler * slot position changes) because ieee80211_add_tdma 657610ad9a77SSam Leffler * skips over the data. 657710ad9a77SSam Leffler */ 657810ad9a77SSam Leffler memcpy(ATH_VAP(vap)->av_boff.bo_tdma + 657910ad9a77SSam Leffler __offsetof(struct ieee80211_tdma_param, tdma_tstamp), 658010ad9a77SSam Leffler &ni->ni_tstamp.data, 8); 658110ad9a77SSam Leffler #if 0 658210ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_TDMA_TIMER, 6583fc4de9b7SAdrian Chadd "tsf %llu nextslot %llu (%d, %d) nextslottu %u nexttbtt %llu (%d)\n", 658410ad9a77SSam Leffler (unsigned long long) tsf, (unsigned long long) nextslot, 6585fc4de9b7SAdrian Chadd (int)(nextslot - tsf), tsfdelta, nextslottu, nexttbtt, tudelta); 658610ad9a77SSam Leffler #endif 658710ad9a77SSam Leffler /* 658810ad9a77SSam Leffler * Adjust the beacon timers only when pulling them forward 658910ad9a77SSam Leffler * or when going back by less than the beacon interval. 659010ad9a77SSam Leffler * Negative jumps larger than the beacon interval seem to 659110ad9a77SSam Leffler * cause the timers to stop and generally cause instability. 659210ad9a77SSam Leffler * This basically filters out jumps due to missed beacons. 659310ad9a77SSam Leffler */ 659410ad9a77SSam Leffler if (tudelta != 0 && (tudelta > 0 || -tudelta < sc->sc_tdmabintval)) { 659510ad9a77SSam Leffler ath_tdma_settimers(sc, nextslottu, sc->sc_tdmabintval); 659610ad9a77SSam Leffler sc->sc_stats.ast_tdma_timers++; 659710ad9a77SSam Leffler } 659810ad9a77SSam Leffler if (tsfdelta > 0) { 659910ad9a77SSam Leffler ath_hal_adjusttsf(ah, tsfdelta); 660010ad9a77SSam Leffler sc->sc_stats.ast_tdma_tsf++; 660110ad9a77SSam Leffler } 660210ad9a77SSam Leffler ath_tdma_beacon_send(sc, vap); /* prepare response */ 660310ad9a77SSam Leffler #undef TU_TO_TSF 660410ad9a77SSam Leffler #undef TSF_TO_TU 660510ad9a77SSam Leffler } 660610ad9a77SSam Leffler 660710ad9a77SSam Leffler /* 660810ad9a77SSam Leffler * Transmit a beacon frame at SWBA. Dynamic updates 660910ad9a77SSam Leffler * to the frame contents are done as needed. 661010ad9a77SSam Leffler */ 661110ad9a77SSam Leffler static void 661210ad9a77SSam Leffler ath_tdma_beacon_send(struct ath_softc *sc, struct ieee80211vap *vap) 661310ad9a77SSam Leffler { 661410ad9a77SSam Leffler struct ath_hal *ah = sc->sc_ah; 661510ad9a77SSam Leffler struct ath_buf *bf; 661610ad9a77SSam Leffler int otherant; 661710ad9a77SSam Leffler 661810ad9a77SSam Leffler /* 661910ad9a77SSam Leffler * Check if the previous beacon has gone out. If 662010ad9a77SSam Leffler * not don't try to post another, skip this period 662110ad9a77SSam Leffler * and wait for the next. Missed beacons indicate 662210ad9a77SSam Leffler * a problem and should not occur. If we miss too 662310ad9a77SSam Leffler * many consecutive beacons reset the device. 662410ad9a77SSam Leffler */ 662510ad9a77SSam Leffler if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { 662610ad9a77SSam Leffler sc->sc_bmisscount++; 662710ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 662810ad9a77SSam Leffler "%s: missed %u consecutive beacons\n", 662910ad9a77SSam Leffler __func__, sc->sc_bmisscount); 6630a32ac9d3SSam Leffler if (sc->sc_bmisscount >= ath_bstuck_threshold) 663110ad9a77SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask); 663210ad9a77SSam Leffler return; 663310ad9a77SSam Leffler } 663410ad9a77SSam Leffler if (sc->sc_bmisscount != 0) { 663510ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 663610ad9a77SSam Leffler "%s: resume beacon xmit after %u misses\n", 663710ad9a77SSam Leffler __func__, sc->sc_bmisscount); 663810ad9a77SSam Leffler sc->sc_bmisscount = 0; 663910ad9a77SSam Leffler } 664010ad9a77SSam Leffler 664110ad9a77SSam Leffler /* 664210ad9a77SSam Leffler * Check recent per-antenna transmit statistics and flip 664310ad9a77SSam Leffler * the default antenna if noticeably more frames went out 664410ad9a77SSam Leffler * on the non-default antenna. 664510ad9a77SSam Leffler * XXX assumes 2 anntenae 664610ad9a77SSam Leffler */ 664710ad9a77SSam Leffler if (!sc->sc_diversity) { 664810ad9a77SSam Leffler otherant = sc->sc_defant & 1 ? 2 : 1; 664910ad9a77SSam Leffler if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) 665010ad9a77SSam Leffler ath_setdefantenna(sc, otherant); 665110ad9a77SSam Leffler sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; 665210ad9a77SSam Leffler } 665310ad9a77SSam Leffler 665410ad9a77SSam Leffler bf = ath_beacon_generate(sc, vap); 665510ad9a77SSam Leffler if (bf != NULL) { 665610ad9a77SSam Leffler /* 665710ad9a77SSam Leffler * Stop any current dma and put the new frame on the queue. 665810ad9a77SSam Leffler * This should never fail since we check above that no frames 665910ad9a77SSam Leffler * are still pending on the queue. 666010ad9a77SSam Leffler */ 666110ad9a77SSam Leffler if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { 666210ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, 666310ad9a77SSam Leffler "%s: beacon queue %u did not stop?\n", 666410ad9a77SSam Leffler __func__, sc->sc_bhalq); 666510ad9a77SSam Leffler /* NB: the HAL still stops DMA, so proceed */ 666610ad9a77SSam Leffler } 666710ad9a77SSam Leffler ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); 666810ad9a77SSam Leffler ath_hal_txstart(ah, sc->sc_bhalq); 666910ad9a77SSam Leffler 667010ad9a77SSam Leffler sc->sc_stats.ast_be_xmit++; /* XXX per-vap? */ 667110ad9a77SSam Leffler 667210ad9a77SSam Leffler /* 667310ad9a77SSam Leffler * Record local TSF for our last send for use 667410ad9a77SSam Leffler * in arbitrating slot collisions. 667510ad9a77SSam Leffler */ 667610ad9a77SSam Leffler vap->iv_bss->ni_tstamp.tsf = ath_hal_gettsf64(ah); 667710ad9a77SSam Leffler } 667810ad9a77SSam Leffler } 6679584f7327SSam Leffler #endif /* IEEE80211_SUPPORT_TDMA */ 6680e8dabfbeSAdrian Chadd 668148237774SAdrian Chadd static void 668248237774SAdrian Chadd ath_dfs_tasklet(void *p, int npending) 668348237774SAdrian Chadd { 668448237774SAdrian Chadd struct ath_softc *sc = (struct ath_softc *) p; 668548237774SAdrian Chadd struct ifnet *ifp = sc->sc_ifp; 668648237774SAdrian Chadd struct ieee80211com *ic = ifp->if_l2com; 668748237774SAdrian Chadd 668848237774SAdrian Chadd /* 668948237774SAdrian Chadd * If previous processing has found a radar event, 669048237774SAdrian Chadd * signal this to the net80211 layer to begin DFS 669148237774SAdrian Chadd * processing. 669248237774SAdrian Chadd */ 669348237774SAdrian Chadd if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { 669448237774SAdrian Chadd /* DFS event found, initiate channel change */ 6695*06fc4a10SAdrian Chadd /* 6696*06fc4a10SAdrian Chadd * XXX doesn't currently tell us whether the event 6697*06fc4a10SAdrian Chadd * XXX was found in the primary or extension 6698*06fc4a10SAdrian Chadd * XXX channel! 6699*06fc4a10SAdrian Chadd */ 6700*06fc4a10SAdrian Chadd IEEE80211_LOCK(ic); 670148237774SAdrian Chadd ieee80211_dfs_notify_radar(ic, sc->sc_curchan); 6702*06fc4a10SAdrian Chadd IEEE80211_UNLOCK(ic); 670348237774SAdrian Chadd } 670448237774SAdrian Chadd } 670548237774SAdrian Chadd 6706dba9c859SAdrian Chadd MODULE_VERSION(if_ath, 1); 6707dba9c859SAdrian Chadd MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */ 6708