15591b213SSam Leffler /*- 210ad9a77SSam Leffler * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 35591b213SSam Leffler * All rights reserved. 45591b213SSam Leffler * 55591b213SSam Leffler * Redistribution and use in source and binary forms, with or without 65591b213SSam Leffler * modification, are permitted provided that the following conditions 75591b213SSam Leffler * are met: 85591b213SSam Leffler * 1. Redistributions of source code must retain the above copyright 95591b213SSam Leffler * notice, this list of conditions and the following disclaimer, 105591b213SSam Leffler * without modification. 115591b213SSam Leffler * 2. Redistributions in binary form must reproduce at minimum a disclaimer 125591b213SSam Leffler * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 135591b213SSam Leffler * redistribution must be conditioned upon including a substantially 145591b213SSam Leffler * similar Disclaimer requirement for further binary redistribution. 155591b213SSam Leffler * 165591b213SSam Leffler * NO WARRANTY 175591b213SSam Leffler * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 185591b213SSam Leffler * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 195591b213SSam Leffler * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 205591b213SSam Leffler * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 215591b213SSam Leffler * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 225591b213SSam Leffler * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 235591b213SSam Leffler * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 245591b213SSam Leffler * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 255591b213SSam Leffler * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 265591b213SSam Leffler * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 275591b213SSam Leffler * THE POSSIBILITY OF SUCH DAMAGES. 285591b213SSam Leffler */ 295591b213SSam Leffler 305591b213SSam Leffler #include <sys/cdefs.h> 315591b213SSam Leffler __FBSDID("$FreeBSD$"); 325591b213SSam Leffler 335591b213SSam Leffler /* 345591b213SSam Leffler * Driver for the Atheros Wireless LAN controller. 355f3721d5SSam Leffler * 365f3721d5SSam Leffler * This software is derived from work of Atsushi Onoe; his contribution 375f3721d5SSam Leffler * is greatly appreciated. 385591b213SSam Leffler */ 395591b213SSam Leffler 405591b213SSam Leffler #include "opt_inet.h" 41a585a9a1SSam Leffler #include "opt_ath.h" 423f3087fdSAdrian Chadd /* 433f3087fdSAdrian Chadd * This is needed for register operations which are performed 443f3087fdSAdrian Chadd * by the driver - eg, calls to ath_hal_gettsf32(). 4558816f3fSAdrian Chadd * 4658816f3fSAdrian Chadd * It's also required for any AH_DEBUG checks in here, eg the 4758816f3fSAdrian Chadd * module dependencies. 483f3087fdSAdrian Chadd */ 493f3087fdSAdrian Chadd #include "opt_ah.h" 50584f7327SSam Leffler #include "opt_wlan.h" 515591b213SSam Leffler 525591b213SSam Leffler #include <sys/param.h> 535591b213SSam Leffler #include <sys/systm.h> 545591b213SSam Leffler #include <sys/sysctl.h> 555591b213SSam Leffler #include <sys/mbuf.h> 565591b213SSam Leffler #include <sys/malloc.h> 575591b213SSam Leffler #include <sys/lock.h> 585591b213SSam Leffler #include <sys/mutex.h> 595591b213SSam Leffler #include <sys/kernel.h> 605591b213SSam Leffler #include <sys/socket.h> 615591b213SSam Leffler #include <sys/sockio.h> 625591b213SSam Leffler #include <sys/errno.h> 635591b213SSam Leffler #include <sys/callout.h> 645591b213SSam Leffler #include <sys/bus.h> 655591b213SSam Leffler #include <sys/endian.h> 660bbf5441SSam Leffler #include <sys/kthread.h> 670bbf5441SSam Leffler #include <sys/taskqueue.h> 683fc21fedSSam Leffler #include <sys/priv.h> 69dba9c859SAdrian Chadd #include <sys/module.h> 70f52d3452SAdrian Chadd #include <sys/ktr.h> 71ddbe3036SAdrian Chadd #include <sys/smp.h> /* for mp_ncpus */ 725591b213SSam Leffler 735591b213SSam Leffler #include <machine/bus.h> 745591b213SSam Leffler 755591b213SSam Leffler #include <net/if.h> 765591b213SSam Leffler #include <net/if_dl.h> 775591b213SSam Leffler #include <net/if_media.h> 78fc74a9f9SBrooks Davis #include <net/if_types.h> 795591b213SSam Leffler #include <net/if_arp.h> 805591b213SSam Leffler #include <net/ethernet.h> 815591b213SSam Leffler #include <net/if_llc.h> 825591b213SSam Leffler 835591b213SSam Leffler #include <net80211/ieee80211_var.h> 8459efa8b5SSam Leffler #include <net80211/ieee80211_regdomain.h> 85339ccfb3SSam Leffler #ifdef IEEE80211_SUPPORT_SUPERG 86339ccfb3SSam Leffler #include <net80211/ieee80211_superg.h> 87339ccfb3SSam Leffler #endif 88584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 8910ad9a77SSam Leffler #include <net80211/ieee80211_tdma.h> 9010ad9a77SSam Leffler #endif 915591b213SSam Leffler 925591b213SSam Leffler #include <net/bpf.h> 935591b213SSam Leffler 945591b213SSam Leffler #ifdef INET 955591b213SSam Leffler #include <netinet/in.h> 965591b213SSam Leffler #include <netinet/if_ether.h> 975591b213SSam Leffler #endif 985591b213SSam Leffler 995591b213SSam Leffler #include <dev/ath/if_athvar.h> 10033644623SSam Leffler #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 1010dbe9289SAdrian Chadd #include <dev/ath/ath_hal/ah_diagcodes.h> 1025591b213SSam Leffler 1035bc8125aSAdrian Chadd #include <dev/ath/if_ath_debug.h> 104b8e788a5SAdrian Chadd #include <dev/ath/if_ath_misc.h> 105e60c4fc2SAdrian Chadd #include <dev/ath/if_ath_tsf.h> 106b8e788a5SAdrian Chadd #include <dev/ath/if_ath_tx.h> 1076079fdbeSAdrian Chadd #include <dev/ath/if_ath_sysctl.h> 108c65ee21dSAdrian Chadd #include <dev/ath/if_ath_led.h> 109d2d7a00aSAdrian Chadd #include <dev/ath/if_ath_keycache.h> 110e60c4fc2SAdrian Chadd #include <dev/ath/if_ath_rx.h> 111f8cc9b09SAdrian Chadd #include <dev/ath/if_ath_rx_edma.h> 112a35dae8dSAdrian Chadd #include <dev/ath/if_ath_beacon.h> 11348237774SAdrian Chadd #include <dev/ath/if_athdfs.h> 1145bc8125aSAdrian Chadd 11586e07743SSam Leffler #ifdef ATH_TX99_DIAG 11686e07743SSam Leffler #include <dev/ath/ath_tx99/ath_tx99.h> 11786e07743SSam Leffler #endif 11886e07743SSam Leffler 119b032f27cSSam Leffler /* 120b032f27cSSam Leffler * ATH_BCBUF determines the number of vap's that can transmit 121b032f27cSSam Leffler * beacons and also (currently) the number of vap's that can 122b032f27cSSam Leffler * have unique mac addresses/bssid. When staggering beacons 123b032f27cSSam Leffler * 4 is probably a good max as otherwise the beacons become 124b032f27cSSam Leffler * very closely spaced and there is limited time for cab q traffic 125b032f27cSSam Leffler * to go out. You can burst beacons instead but that is not good 126b032f27cSSam Leffler * for stations in power save and at some point you really want 127b032f27cSSam Leffler * another radio (and channel). 128b032f27cSSam Leffler * 129b032f27cSSam Leffler * The limit on the number of mac addresses is tied to our use of 130b032f27cSSam Leffler * the U/L bit and tracking addresses in a byte; it would be 131b032f27cSSam Leffler * worthwhile to allow more for applications like proxy sta. 132b032f27cSSam Leffler */ 133b032f27cSSam Leffler CTASSERT(ATH_BCBUF <= 8); 134b032f27cSSam Leffler 135b032f27cSSam Leffler static struct ieee80211vap *ath_vap_create(struct ieee80211com *, 136fcd9500fSBernhard Schmidt const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 137fcd9500fSBernhard Schmidt const uint8_t [IEEE80211_ADDR_LEN], 138fcd9500fSBernhard Schmidt const uint8_t [IEEE80211_ADDR_LEN]); 139b032f27cSSam Leffler static void ath_vap_delete(struct ieee80211vap *); 1405591b213SSam Leffler static void ath_init(void *); 141c42a7b7eSSam Leffler static void ath_stop_locked(struct ifnet *); 1425591b213SSam Leffler static void ath_stop(struct ifnet *); 143b032f27cSSam Leffler static int ath_reset_vap(struct ieee80211vap *, u_long); 1445591b213SSam Leffler static int ath_media_change(struct ifnet *); 1452e986da5SSam Leffler static void ath_watchdog(void *); 1465591b213SSam Leffler static int ath_ioctl(struct ifnet *, u_long, caddr_t); 1475591b213SSam Leffler static void ath_fatal_proc(void *, int); 148b032f27cSSam Leffler static void ath_bmiss_vap(struct ieee80211vap *); 1495591b213SSam Leffler static void ath_bmiss_proc(void *, int); 150b032f27cSSam Leffler static void ath_key_update_begin(struct ieee80211vap *); 151b032f27cSSam Leffler static void ath_key_update_end(struct ieee80211vap *); 152b032f27cSSam Leffler static void ath_update_mcast(struct ifnet *); 153b032f27cSSam Leffler static void ath_update_promisc(struct ifnet *); 154c42a7b7eSSam Leffler static void ath_updateslot(struct ifnet *); 155c42a7b7eSSam Leffler static void ath_bstuck_proc(void *, int); 156d52f7132SAdrian Chadd static void ath_reset_proc(void *, int); 1575591b213SSam Leffler static int ath_desc_alloc(struct ath_softc *); 1585591b213SSam Leffler static void ath_desc_free(struct ath_softc *); 15938c208f8SSam Leffler static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, 16038c208f8SSam Leffler const uint8_t [IEEE80211_ADDR_LEN]); 1614afa805eSAdrian Chadd static void ath_node_cleanup(struct ieee80211_node *); 162c42a7b7eSSam Leffler static void ath_node_free(struct ieee80211_node *); 16368e8e04eSSam Leffler static void ath_node_getsignal(const struct ieee80211_node *, 16468e8e04eSSam Leffler int8_t *, int8_t *); 165622b3fd2SSam Leffler static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 166c42a7b7eSSam Leffler static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 167c42a7b7eSSam Leffler static int ath_tx_setup(struct ath_softc *, int, int); 168c42a7b7eSSam Leffler static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 169c42a7b7eSSam Leffler static void ath_tx_cleanup(struct ath_softc *); 170c42a7b7eSSam Leffler static void ath_tx_proc_q0(void *, int); 171c42a7b7eSSam Leffler static void ath_tx_proc_q0123(void *, int); 1725591b213SSam Leffler static void ath_tx_proc(void *, int); 17303e9308fSAdrian Chadd static void ath_txq_sched_tasklet(void *, int); 1745591b213SSam Leffler static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 175517526efSAdrian Chadd static void ath_draintxq(struct ath_softc *, ATH_RESET_TYPE reset_type); 176c42a7b7eSSam Leffler static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 17768e8e04eSSam Leffler static void ath_scan_start(struct ieee80211com *); 17868e8e04eSSam Leffler static void ath_scan_end(struct ieee80211com *); 17968e8e04eSSam Leffler static void ath_set_channel(struct ieee80211com *); 180fdd72b4aSAdrian Chadd #ifdef ATH_ENABLE_11N 181e7200579SAdrian Chadd static void ath_update_chw(struct ieee80211com *); 182fdd72b4aSAdrian Chadd #endif /* ATH_ENABLE_11N */ 1835591b213SSam Leffler static void ath_calibrate(void *); 184b032f27cSSam Leffler static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); 185e8fd88a3SSam Leffler static void ath_setup_stationkey(struct ieee80211_node *); 186e9962332SSam Leffler static void ath_newassoc(struct ieee80211_node *, int); 187b032f27cSSam Leffler static int ath_setregdomain(struct ieee80211com *, 188b032f27cSSam Leffler struct ieee80211_regdomain *, int, 189b032f27cSSam Leffler struct ieee80211_channel []); 1905fe9f044SSam Leffler static void ath_getradiocaps(struct ieee80211com *, int, int *, 191b032f27cSSam Leffler struct ieee80211_channel []); 192b032f27cSSam Leffler static int ath_getchannels(struct ath_softc *); 1935591b213SSam Leffler 194c42a7b7eSSam Leffler static int ath_rate_setup(struct ath_softc *, u_int mode); 1955591b213SSam Leffler static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 196c42a7b7eSSam Leffler 197c42a7b7eSSam Leffler static void ath_announce(struct ath_softc *); 1985591b213SSam Leffler 19948237774SAdrian Chadd static void ath_dfs_tasklet(void *, int); 20048237774SAdrian Chadd 201584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 202a35dae8dSAdrian Chadd #include <dev/ath/if_ath_tdma.h> 203a35dae8dSAdrian Chadd #endif 20410ad9a77SSam Leffler 205a35dae8dSAdrian Chadd #if 0 20610ad9a77SSam Leffler #define TDMA_EP_MULTIPLIER (1<<10) /* pow2 to optimize out * and / */ 20710ad9a77SSam Leffler #define TDMA_LPF_LEN 6 20810ad9a77SSam Leffler #define TDMA_DUMMY_MARKER 0x127 20910ad9a77SSam Leffler #define TDMA_EP_MUL(x, mul) ((x) * (mul)) 21010ad9a77SSam Leffler #define TDMA_IN(x) (TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER)) 21110ad9a77SSam Leffler #define TDMA_LPF(x, y, len) \ 21210ad9a77SSam Leffler ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y)) 21310ad9a77SSam Leffler #define TDMA_SAMPLE(x, y) do { \ 21410ad9a77SSam Leffler x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN); \ 21510ad9a77SSam Leffler } while (0) 21610ad9a77SSam Leffler #define TDMA_EP_RND(x,mul) \ 21710ad9a77SSam Leffler ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 21810ad9a77SSam Leffler #define TDMA_AVG(x) TDMA_EP_RND(x, TDMA_EP_MULTIPLIER) 219584f7327SSam Leffler #endif /* IEEE80211_SUPPORT_TDMA */ 22010ad9a77SSam Leffler 2215591b213SSam Leffler SYSCTL_DECL(_hw_ath); 2225591b213SSam Leffler 2235591b213SSam Leffler /* XXX validate sysctl values */ 2242dc7fcc4SSam Leffler static int ath_longcalinterval = 30; /* long cals every 30 secs */ 2252dc7fcc4SSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, 2262dc7fcc4SSam Leffler 0, "long chip calibration interval (secs)"); 2272dc7fcc4SSam Leffler static int ath_shortcalinterval = 100; /* short cals every 100 ms */ 2282dc7fcc4SSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, 2292dc7fcc4SSam Leffler 0, "short chip calibration interval (msecs)"); 2302dc7fcc4SSam Leffler static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ 2312dc7fcc4SSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, 2322dc7fcc4SSam Leffler 0, "reset chip calibration results (secs)"); 233a108ab63SAdrian Chadd static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ 234a108ab63SAdrian Chadd SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, 235a108ab63SAdrian Chadd 0, "ANI calibration (msecs)"); 2365591b213SSam Leffler 2373d184db2SAdrian Chadd int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 238aaa70f2fSSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf, 239e2d787faSSam Leffler 0, "rx buffers allocated"); 240e2d787faSSam Leffler TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf); 2413d184db2SAdrian Chadd int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 242aaa70f2fSSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf, 243e2d787faSSam Leffler 0, "tx buffers allocated"); 244e2d787faSSam Leffler TUNABLE_INT("hw.ath.txbuf", &ath_txbuf); 2453d184db2SAdrian Chadd int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */ 246af33d486SAdrian Chadd SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RW, &ath_txbuf_mgmt, 247af33d486SAdrian Chadd 0, "tx (mgmt) buffers allocated"); 248af33d486SAdrian Chadd TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt); 249e2d787faSSam Leffler 250a35dae8dSAdrian Chadd int ath_bstuck_threshold = 4; /* max missed beacons */ 251a32ac9d3SSam Leffler SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, 252a32ac9d3SSam Leffler 0, "max missed beacon xmits before chip reset"); 253a32ac9d3SSam Leffler 2546b349e5aSAdrian Chadd MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 255c42a7b7eSSam Leffler 25667397d39SAdrian Chadd #define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) 25767397d39SAdrian Chadd #define HAL_MODE_HT40 \ 25867397d39SAdrian Chadd (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ 25967397d39SAdrian Chadd HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) 2605591b213SSam Leffler int 2615591b213SSam Leffler ath_attach(u_int16_t devid, struct ath_softc *sc) 2625591b213SSam Leffler { 263fc74a9f9SBrooks Davis struct ifnet *ifp; 264b032f27cSSam Leffler struct ieee80211com *ic; 265fc74a9f9SBrooks Davis struct ath_hal *ah = NULL; 2665591b213SSam Leffler HAL_STATUS status; 267c42a7b7eSSam Leffler int error = 0, i; 268411373ebSSam Leffler u_int wmodes; 26929aca940SSam Leffler uint8_t macaddr[IEEE80211_ADDR_LEN]; 270a865860dSAdrian Chadd int rx_chainmask, tx_chainmask; 2715591b213SSam Leffler 272c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 2735591b213SSam Leffler 274b032f27cSSam Leffler ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 275fc74a9f9SBrooks Davis if (ifp == NULL) { 276fc74a9f9SBrooks Davis device_printf(sc->sc_dev, "can not if_alloc()\n"); 277fc74a9f9SBrooks Davis error = ENOSPC; 278fc74a9f9SBrooks Davis goto bad; 279fc74a9f9SBrooks Davis } 280b032f27cSSam Leffler ic = ifp->if_l2com; 281fc74a9f9SBrooks Davis 2825591b213SSam Leffler /* set these up early for if_printf use */ 2839bf40edeSBrooks Davis if_initname(ifp, device_get_name(sc->sc_dev), 2849bf40edeSBrooks Davis device_get_unit(sc->sc_dev)); 2855591b213SSam Leffler 2867e97436bSAdrian Chadd ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 2877e97436bSAdrian Chadd sc->sc_eepromdata, &status); 2885591b213SSam Leffler if (ah == NULL) { 2895591b213SSam Leffler if_printf(ifp, "unable to attach hardware; HAL status %u\n", 2905591b213SSam Leffler status); 2915591b213SSam Leffler error = ENXIO; 2925591b213SSam Leffler goto bad; 2935591b213SSam Leffler } 2945591b213SSam Leffler sc->sc_ah = ah; 295b58b3803SSam Leffler sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 2963297be13SSam Leffler #ifdef ATH_DEBUG 2973297be13SSam Leffler sc->sc_debug = ath_debug; 2983297be13SSam Leffler #endif 2995591b213SSam Leffler 3005591b213SSam Leffler /* 301f8cc9b09SAdrian Chadd * Setup the DMA/EDMA functions based on the current 302f8cc9b09SAdrian Chadd * hardware support. 303f8cc9b09SAdrian Chadd * 304f8cc9b09SAdrian Chadd * This is required before the descriptors are allocated. 305f8cc9b09SAdrian Chadd */ 3063d184db2SAdrian Chadd if (ath_hal_hasedma(sc->sc_ah)) { 3073d184db2SAdrian Chadd sc->sc_isedma = 1; 308f8cc9b09SAdrian Chadd ath_recv_setup_edma(sc); 3093d184db2SAdrian Chadd } else 310f8cc9b09SAdrian Chadd ath_recv_setup_legacy(sc); 311f8cc9b09SAdrian Chadd 312f8cc9b09SAdrian Chadd /* 313c42a7b7eSSam Leffler * Check if the MAC has multi-rate retry support. 314c42a7b7eSSam Leffler * We do this by trying to setup a fake extended 315c42a7b7eSSam Leffler * descriptor. MAC's that don't have support will 316c42a7b7eSSam Leffler * return false w/o doing anything. MAC's that do 317c42a7b7eSSam Leffler * support it will return true w/o doing anything. 318c42a7b7eSSam Leffler */ 319c42a7b7eSSam Leffler sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 320c42a7b7eSSam Leffler 321c42a7b7eSSam Leffler /* 322c42a7b7eSSam Leffler * Check if the device has hardware counters for PHY 323c42a7b7eSSam Leffler * errors. If so we need to enable the MIB interrupt 324c42a7b7eSSam Leffler * so we can act on stat triggers. 325c42a7b7eSSam Leffler */ 326c42a7b7eSSam Leffler if (ath_hal_hwphycounters(ah)) 327c42a7b7eSSam Leffler sc->sc_needmib = 1; 328c42a7b7eSSam Leffler 329c42a7b7eSSam Leffler /* 330c42a7b7eSSam Leffler * Get the hardware key cache size. 331c42a7b7eSSam Leffler */ 332c42a7b7eSSam Leffler sc->sc_keymax = ath_hal_keycachesize(ah); 333e8fd88a3SSam Leffler if (sc->sc_keymax > ATH_KEYMAX) { 334e8fd88a3SSam Leffler if_printf(ifp, "Warning, using only %u of %u key cache slots\n", 335e8fd88a3SSam Leffler ATH_KEYMAX, sc->sc_keymax); 336e8fd88a3SSam Leffler sc->sc_keymax = ATH_KEYMAX; 337c42a7b7eSSam Leffler } 338c42a7b7eSSam Leffler /* 339c42a7b7eSSam Leffler * Reset the key cache since some parts do not 340c42a7b7eSSam Leffler * reset the contents on initial power up. 341c42a7b7eSSam Leffler */ 342c42a7b7eSSam Leffler for (i = 0; i < sc->sc_keymax; i++) 343c42a7b7eSSam Leffler ath_hal_keyreset(ah, i); 344c42a7b7eSSam Leffler 345c42a7b7eSSam Leffler /* 346b032f27cSSam Leffler * Collect the default channel list. 3475591b213SSam Leffler */ 348b032f27cSSam Leffler error = ath_getchannels(sc); 3495591b213SSam Leffler if (error != 0) 3505591b213SSam Leffler goto bad; 3515591b213SSam Leffler 3525591b213SSam Leffler /* 3535591b213SSam Leffler * Setup rate tables for all potential media types. 3545591b213SSam Leffler */ 3555591b213SSam Leffler ath_rate_setup(sc, IEEE80211_MODE_11A); 3565591b213SSam Leffler ath_rate_setup(sc, IEEE80211_MODE_11B); 3575591b213SSam Leffler ath_rate_setup(sc, IEEE80211_MODE_11G); 358c42a7b7eSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 359c42a7b7eSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 36068e8e04eSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); 36168e8e04eSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_11NA); 36268e8e04eSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_11NG); 363724c193aSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_HALF); 364724c193aSSam Leffler ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 365aaa70f2fSSam Leffler 366c42a7b7eSSam Leffler /* NB: setup here so ath_rate_update is happy */ 367c42a7b7eSSam Leffler ath_setcurmode(sc, IEEE80211_MODE_11A); 3685591b213SSam Leffler 369c42a7b7eSSam Leffler /* 370c42a7b7eSSam Leffler * Allocate tx+rx descriptors and populate the lists. 371c42a7b7eSSam Leffler */ 3725591b213SSam Leffler error = ath_desc_alloc(sc); 3735591b213SSam Leffler if (error != 0) { 3745591b213SSam Leffler if_printf(ifp, "failed to allocate descriptors: %d\n", error); 3755591b213SSam Leffler goto bad; 3765591b213SSam Leffler } 3773d184db2SAdrian Chadd 3783d184db2SAdrian Chadd error = ath_rxdma_setup(sc); 3793d184db2SAdrian Chadd if (error != 0) { 3803d184db2SAdrian Chadd if_printf(ifp, "failed to allocate RX descriptors: %d\n", 3813d184db2SAdrian Chadd error); 3823d184db2SAdrian Chadd goto bad; 3833d184db2SAdrian Chadd } 3843d184db2SAdrian Chadd 3852e986da5SSam Leffler callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0); 3862e986da5SSam Leffler callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0); 3875591b213SSam Leffler 388f0b2a0beSSam Leffler ATH_TXBUF_LOCK_INIT(sc); 3895591b213SSam Leffler 3900bbf5441SSam Leffler sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, 3910bbf5441SSam Leffler taskqueue_thread_enqueue, &sc->sc_tq); 3920bbf5441SSam Leffler taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, 3930bbf5441SSam Leffler "%s taskq", ifp->if_xname); 3940bbf5441SSam Leffler 395f8cc9b09SAdrian Chadd TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); 3965591b213SSam Leffler TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 397c42a7b7eSSam Leffler TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 398d52f7132SAdrian Chadd TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); 39903e9308fSAdrian Chadd TASK_INIT(&sc->sc_txqtask,0, ath_txq_sched_tasklet, sc); 400f846cf42SAdrian Chadd TASK_INIT(&sc->sc_fataltask,0, ath_fatal_proc, sc); 4015591b213SSam Leffler 4025591b213SSam Leffler /* 403c42a7b7eSSam Leffler * Allocate hardware transmit queues: one queue for 404c42a7b7eSSam Leffler * beacon frames and one data queue for each QoS 4054fa8d4efSDaniel Eischen * priority. Note that the hal handles resetting 406c42a7b7eSSam Leffler * these queues at the needed time. 407c42a7b7eSSam Leffler * 408c42a7b7eSSam Leffler * XXX PS-Poll 4095591b213SSam Leffler */ 41080d2765fSSam Leffler sc->sc_bhalq = ath_beaconq_setup(ah); 4115591b213SSam Leffler if (sc->sc_bhalq == (u_int) -1) { 4125591b213SSam Leffler if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 413c42a7b7eSSam Leffler error = EIO; 414b28b4653SSam Leffler goto bad2; 4155591b213SSam Leffler } 416c42a7b7eSSam Leffler sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 417c42a7b7eSSam Leffler if (sc->sc_cabq == NULL) { 418c42a7b7eSSam Leffler if_printf(ifp, "unable to setup CAB xmit queue!\n"); 419c42a7b7eSSam Leffler error = EIO; 420c42a7b7eSSam Leffler goto bad2; 421c42a7b7eSSam Leffler } 422c42a7b7eSSam Leffler /* NB: insure BK queue is the lowest priority h/w queue */ 423c42a7b7eSSam Leffler if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 424c42a7b7eSSam Leffler if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 425c42a7b7eSSam Leffler ieee80211_wme_acnames[WME_AC_BK]); 426c42a7b7eSSam Leffler error = EIO; 427c42a7b7eSSam Leffler goto bad2; 428c42a7b7eSSam Leffler } 429c42a7b7eSSam Leffler if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 430c42a7b7eSSam Leffler !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 431c42a7b7eSSam Leffler !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 432c42a7b7eSSam Leffler /* 433c42a7b7eSSam Leffler * Not enough hardware tx queues to properly do WME; 434c42a7b7eSSam Leffler * just punt and assign them all to the same h/w queue. 435c42a7b7eSSam Leffler * We could do a better job of this if, for example, 436c42a7b7eSSam Leffler * we allocate queues when we switch from station to 437c42a7b7eSSam Leffler * AP mode. 438c42a7b7eSSam Leffler */ 439c42a7b7eSSam Leffler if (sc->sc_ac2q[WME_AC_VI] != NULL) 440c42a7b7eSSam Leffler ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 441c42a7b7eSSam Leffler if (sc->sc_ac2q[WME_AC_BE] != NULL) 442c42a7b7eSSam Leffler ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 443c42a7b7eSSam Leffler sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 444c42a7b7eSSam Leffler sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 445c42a7b7eSSam Leffler sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 446c42a7b7eSSam Leffler } 447c42a7b7eSSam Leffler 448c42a7b7eSSam Leffler /* 449c42a7b7eSSam Leffler * Special case certain configurations. Note the 450c42a7b7eSSam Leffler * CAB queue is handled by these specially so don't 451c42a7b7eSSam Leffler * include them when checking the txq setup mask. 452c42a7b7eSSam Leffler */ 453c42a7b7eSSam Leffler switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 454c42a7b7eSSam Leffler case 0x01: 455c42a7b7eSSam Leffler TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 456c42a7b7eSSam Leffler break; 457c42a7b7eSSam Leffler case 0x0f: 458c42a7b7eSSam Leffler TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 459c42a7b7eSSam Leffler break; 460c42a7b7eSSam Leffler default: 461c42a7b7eSSam Leffler TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 462c42a7b7eSSam Leffler break; 463c42a7b7eSSam Leffler } 464c42a7b7eSSam Leffler 465c42a7b7eSSam Leffler /* 466c42a7b7eSSam Leffler * Setup rate control. Some rate control modules 467c42a7b7eSSam Leffler * call back to change the anntena state so expose 468c42a7b7eSSam Leffler * the necessary entry points. 469c42a7b7eSSam Leffler * XXX maybe belongs in struct ath_ratectrl? 470c42a7b7eSSam Leffler */ 471c42a7b7eSSam Leffler sc->sc_setdefantenna = ath_setdefantenna; 472c42a7b7eSSam Leffler sc->sc_rc = ath_rate_attach(sc); 473c42a7b7eSSam Leffler if (sc->sc_rc == NULL) { 474c42a7b7eSSam Leffler error = EIO; 475c42a7b7eSSam Leffler goto bad2; 476c42a7b7eSSam Leffler } 477c42a7b7eSSam Leffler 47848237774SAdrian Chadd /* Attach DFS module */ 47948237774SAdrian Chadd if (! ath_dfs_attach(sc)) { 4807e97436bSAdrian Chadd device_printf(sc->sc_dev, 4817e97436bSAdrian Chadd "%s: unable to attach DFS\n", __func__); 48248237774SAdrian Chadd error = EIO; 48348237774SAdrian Chadd goto bad2; 48448237774SAdrian Chadd } 48548237774SAdrian Chadd 48648237774SAdrian Chadd /* Start DFS processing tasklet */ 48748237774SAdrian Chadd TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); 48848237774SAdrian Chadd 4893440495aSAdrian Chadd /* Configure LED state */ 4903e50ec2cSSam Leffler sc->sc_blinking = 0; 491c42a7b7eSSam Leffler sc->sc_ledstate = 1; 4923e50ec2cSSam Leffler sc->sc_ledon = 0; /* low true */ 4933e50ec2cSSam Leffler sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 4943e50ec2cSSam Leffler callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE); 4953440495aSAdrian Chadd 4963440495aSAdrian Chadd /* 4973440495aSAdrian Chadd * Don't setup hardware-based blinking. 4983440495aSAdrian Chadd * 4993440495aSAdrian Chadd * Although some NICs may have this configured in the 5003440495aSAdrian Chadd * default reset register values, the user may wish 5013440495aSAdrian Chadd * to alter which pins have which function. 5023440495aSAdrian Chadd * 5033440495aSAdrian Chadd * The reference driver attaches the MAC network LED to GPIO1 and 5043440495aSAdrian Chadd * the MAC power LED to GPIO2. However, the DWA-552 cardbus 5053440495aSAdrian Chadd * NIC has these reversed. 5063440495aSAdrian Chadd */ 5073440495aSAdrian Chadd sc->sc_hardled = (1 == 0); 5083440495aSAdrian Chadd sc->sc_led_net_pin = -1; 5093440495aSAdrian Chadd sc->sc_led_pwr_pin = -1; 510c42a7b7eSSam Leffler /* 511c42a7b7eSSam Leffler * Auto-enable soft led processing for IBM cards and for 512c42a7b7eSSam Leffler * 5211 minipci cards. Users can also manually enable/disable 513c42a7b7eSSam Leffler * support with a sysctl. 514c42a7b7eSSam Leffler */ 515c42a7b7eSSam Leffler sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 5166558ffd9SAdrian Chadd ath_led_config(sc); 517a497cd88SAdrian Chadd ath_hal_setledstate(ah, HAL_LED_INIT); 5185591b213SSam Leffler 5195591b213SSam Leffler ifp->if_softc = sc; 5205591b213SSam Leffler ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 5215591b213SSam Leffler ifp->if_start = ath_start; 5225591b213SSam Leffler ifp->if_ioctl = ath_ioctl; 5235591b213SSam Leffler ifp->if_init = ath_init; 524e50d35e6SMaxim Sobolev IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 525e50d35e6SMaxim Sobolev ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 526154b8df2SMax Laier IFQ_SET_READY(&ifp->if_snd); 5275591b213SSam Leffler 528c42a7b7eSSam Leffler ic->ic_ifp = ifp; 5295591b213SSam Leffler /* XXX not right but it's not used anywhere important */ 5305591b213SSam Leffler ic->ic_phytype = IEEE80211_T_OFDM; 5315591b213SSam Leffler ic->ic_opmode = IEEE80211_M_STA; 532c42a7b7eSSam Leffler ic->ic_caps = 533c43feedeSSam Leffler IEEE80211_C_STA /* station mode */ 534c43feedeSSam Leffler | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 535fe32c3efSSam Leffler | IEEE80211_C_HOSTAP /* hostap mode */ 536fe32c3efSSam Leffler | IEEE80211_C_MONITOR /* monitor mode */ 5377a04dc27SSam Leffler | IEEE80211_C_AHDEMO /* adhoc demo mode */ 538b032f27cSSam Leffler | IEEE80211_C_WDS /* 4-address traffic works */ 53959aa14a9SRui Paulo | IEEE80211_C_MBSS /* mesh point link mode */ 540fe32c3efSSam Leffler | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 541c42a7b7eSSam Leffler | IEEE80211_C_SHSLOT /* short slot time supported */ 542c42a7b7eSSam Leffler | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 5433b324f57SAdrian Chadd #ifndef ATH_ENABLE_11N 54468e8e04eSSam Leffler | IEEE80211_C_BGSCAN /* capable of bg scanning */ 5453b324f57SAdrian Chadd #endif 54668e8e04eSSam Leffler | IEEE80211_C_TXFRAG /* handle tx frags */ 54710dc8de4SAdrian Chadd #ifdef ATH_ENABLE_DFS 5487e97436bSAdrian Chadd | IEEE80211_C_DFS /* Enable radar detection */ 54910dc8de4SAdrian Chadd #endif 55001e7e035SSam Leffler ; 551c42a7b7eSSam Leffler /* 552c42a7b7eSSam Leffler * Query the hal to figure out h/w crypto support. 553c42a7b7eSSam Leffler */ 554c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 555b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; 556c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 557b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; 558c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 559b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 560c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 561b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; 562c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 563b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; 564c42a7b7eSSam Leffler /* 565c42a7b7eSSam Leffler * Check if h/w does the MIC and/or whether the 566c42a7b7eSSam Leffler * separate key cache entries are required to 567c42a7b7eSSam Leffler * handle both tx+rx MIC keys. 568c42a7b7eSSam Leffler */ 569c42a7b7eSSam Leffler if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 570b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 5715901d2d3SSam Leffler /* 5725901d2d3SSam Leffler * If the h/w supports storing tx+rx MIC keys 5735901d2d3SSam Leffler * in one cache slot automatically enable use. 5745901d2d3SSam Leffler */ 5755901d2d3SSam Leffler if (ath_hal_hastkipsplit(ah) || 5765901d2d3SSam Leffler !ath_hal_settkipsplit(ah, AH_FALSE)) 577c42a7b7eSSam Leffler sc->sc_splitmic = 1; 578b032f27cSSam Leffler /* 579b032f27cSSam Leffler * If the h/w can do TKIP MIC together with WME then 580b032f27cSSam Leffler * we use it; otherwise we force the MIC to be done 581b032f27cSSam Leffler * in software by the net80211 layer. 582b032f27cSSam Leffler */ 583b032f27cSSam Leffler if (ath_hal_haswmetkipmic(ah)) 584b032f27cSSam Leffler sc->sc_wmetkipmic = 1; 585c42a7b7eSSam Leffler } 586e8fd88a3SSam Leffler sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 5879ac01d39SRui Paulo /* 5881ac5dac2SRui Paulo * Check for multicast key search support. 5899ac01d39SRui Paulo */ 5909ac01d39SRui Paulo if (ath_hal_hasmcastkeysearch(sc->sc_ah) && 5919ac01d39SRui Paulo !ath_hal_getmcastkeysearch(sc->sc_ah)) { 5929ac01d39SRui Paulo ath_hal_setmcastkeysearch(sc->sc_ah, 1); 5939ac01d39SRui Paulo } 594e8fd88a3SSam Leffler sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 595c42a7b7eSSam Leffler /* 5965901d2d3SSam Leffler * Mark key cache slots associated with global keys 5975901d2d3SSam Leffler * as in use. If we knew TKIP was not to be used we 5985901d2d3SSam Leffler * could leave the +32, +64, and +32+64 slots free. 5995901d2d3SSam Leffler */ 6005901d2d3SSam Leffler for (i = 0; i < IEEE80211_WEP_NKID; i++) { 6015901d2d3SSam Leffler setbit(sc->sc_keymap, i); 6025901d2d3SSam Leffler setbit(sc->sc_keymap, i+64); 6035901d2d3SSam Leffler if (sc->sc_splitmic) { 6045901d2d3SSam Leffler setbit(sc->sc_keymap, i+32); 6055901d2d3SSam Leffler setbit(sc->sc_keymap, i+32+64); 6065901d2d3SSam Leffler } 6075901d2d3SSam Leffler } 6085901d2d3SSam Leffler /* 609c42a7b7eSSam Leffler * TPC support can be done either with a global cap or 610c42a7b7eSSam Leffler * per-packet support. The latter is not available on 611c42a7b7eSSam Leffler * all parts. We're a bit pedantic here as all parts 612c42a7b7eSSam Leffler * support a global cap. 613c42a7b7eSSam Leffler */ 614c59005e9SSam Leffler if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 615c42a7b7eSSam Leffler ic->ic_caps |= IEEE80211_C_TXPMGT; 616c42a7b7eSSam Leffler 617c42a7b7eSSam Leffler /* 618c42a7b7eSSam Leffler * Mark WME capability only if we have sufficient 619c42a7b7eSSam Leffler * hardware queues to do proper priority scheduling. 620c42a7b7eSSam Leffler */ 621c42a7b7eSSam Leffler if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 622c42a7b7eSSam Leffler ic->ic_caps |= IEEE80211_C_WME; 623c42a7b7eSSam Leffler /* 624e8fd88a3SSam Leffler * Check for misc other capabilities. 625c42a7b7eSSam Leffler */ 626c42a7b7eSSam Leffler if (ath_hal_hasbursting(ah)) 627c42a7b7eSSam Leffler ic->ic_caps |= IEEE80211_C_BURST; 628b032f27cSSam Leffler sc->sc_hasbmask = ath_hal_hasbssidmask(ah); 62959aa14a9SRui Paulo sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); 630b032f27cSSam Leffler sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); 6318a2a6beeSAdrian Chadd sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); 632fc4de9b7SAdrian Chadd sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah); 63368e8e04eSSam Leffler if (ath_hal_hasfastframes(ah)) 63468e8e04eSSam Leffler ic->ic_caps |= IEEE80211_C_FF; 63559efa8b5SSam Leffler wmodes = ath_hal_getwirelessmodes(ah); 636411373ebSSam Leffler if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) 63768e8e04eSSam Leffler ic->ic_caps |= IEEE80211_C_TURBOP; 638584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 63910ad9a77SSam Leffler if (ath_hal_macversion(ah) > 0x78) { 64010ad9a77SSam Leffler ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ 64110ad9a77SSam Leffler ic->ic_tdma_update = ath_tdma_update; 64210ad9a77SSam Leffler } 64310ad9a77SSam Leffler #endif 64467397d39SAdrian Chadd 64567397d39SAdrian Chadd /* 6469c85ff91SAdrian Chadd * TODO: enforce that at least this many frames are available 6479c85ff91SAdrian Chadd * in the txbuf list before allowing data frames (raw or 6489c85ff91SAdrian Chadd * otherwise) to be transmitted. 6499c85ff91SAdrian Chadd */ 6509c85ff91SAdrian Chadd sc->sc_txq_data_minfree = 10; 6519c85ff91SAdrian Chadd /* 6529c85ff91SAdrian Chadd * Leave this as default to maintain legacy behaviour. 6539c85ff91SAdrian Chadd * Shortening the cabq/mcastq may end up causing some 6549c85ff91SAdrian Chadd * undesirable behaviour. 6559c85ff91SAdrian Chadd */ 6569c85ff91SAdrian Chadd sc->sc_txq_mcastq_maxdepth = ath_txbuf; 6579c85ff91SAdrian Chadd 6589c85ff91SAdrian Chadd /* 659a865860dSAdrian Chadd * Allow the TX and RX chainmasks to be overridden by 660a865860dSAdrian Chadd * environment variables and/or device.hints. 661a865860dSAdrian Chadd * 662a865860dSAdrian Chadd * This must be done early - before the hardware is 663a865860dSAdrian Chadd * calibrated or before the 802.11n stream calculation 664a865860dSAdrian Chadd * is done. 665a865860dSAdrian Chadd */ 666a865860dSAdrian Chadd if (resource_int_value(device_get_name(sc->sc_dev), 667a865860dSAdrian Chadd device_get_unit(sc->sc_dev), "rx_chainmask", 668a865860dSAdrian Chadd &rx_chainmask) == 0) { 669a865860dSAdrian Chadd device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n", 670a865860dSAdrian Chadd rx_chainmask); 671a865860dSAdrian Chadd (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask); 672a865860dSAdrian Chadd } 673a865860dSAdrian Chadd if (resource_int_value(device_get_name(sc->sc_dev), 674a865860dSAdrian Chadd device_get_unit(sc->sc_dev), "tx_chainmask", 675a865860dSAdrian Chadd &tx_chainmask) == 0) { 676a865860dSAdrian Chadd device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n", 677a865860dSAdrian Chadd tx_chainmask); 678dc8552d5SAdrian Chadd (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask); 679a865860dSAdrian Chadd } 680a865860dSAdrian Chadd 6818fd67f92SAdrian Chadd #ifdef ATH_ENABLE_11N 68267397d39SAdrian Chadd /* 68367397d39SAdrian Chadd * Query HT capabilities 68467397d39SAdrian Chadd */ 68567397d39SAdrian Chadd if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && 68667397d39SAdrian Chadd (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { 68767397d39SAdrian Chadd int rxs, txs; 68867397d39SAdrian Chadd 68967397d39SAdrian Chadd device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); 69067397d39SAdrian Chadd ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ 69167397d39SAdrian Chadd | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ 69267397d39SAdrian Chadd | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ 6937e97436bSAdrian Chadd | IEEE80211_HTCAP_MAXAMSDU_3839 6947e97436bSAdrian Chadd /* max A-MSDU length */ 69567397d39SAdrian Chadd | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ 69667397d39SAdrian Chadd ; 69767397d39SAdrian Chadd 69876355edbSAdrian Chadd /* 69976355edbSAdrian Chadd * Enable short-GI for HT20 only if the hardware 70076355edbSAdrian Chadd * advertises support. 70176355edbSAdrian Chadd * Notably, anything earlier than the AR9287 doesn't. 70276355edbSAdrian Chadd */ 70376355edbSAdrian Chadd if ((ath_hal_getcapability(ah, 70476355edbSAdrian Chadd HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && 70576355edbSAdrian Chadd (wmodes & HAL_MODE_HT20)) { 70676355edbSAdrian Chadd device_printf(sc->sc_dev, 70776355edbSAdrian Chadd "[HT] enabling short-GI in 20MHz mode\n"); 70876355edbSAdrian Chadd ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; 70976355edbSAdrian Chadd } 71076355edbSAdrian Chadd 71167397d39SAdrian Chadd if (wmodes & HAL_MODE_HT40) 71267397d39SAdrian Chadd ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 71367397d39SAdrian Chadd | IEEE80211_HTCAP_SHORTGI40; 71467397d39SAdrian Chadd 71567397d39SAdrian Chadd /* 7167e97436bSAdrian Chadd * TX/RX streams need to be taken into account when 7177e97436bSAdrian Chadd * negotiating which MCS rates it'll receive and 71867397d39SAdrian Chadd * what MCS rates are available for TX. 71967397d39SAdrian Chadd */ 72054517070SAdrian Chadd (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs); 72154517070SAdrian Chadd (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs); 72267397d39SAdrian Chadd 72367397d39SAdrian Chadd ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); 72467397d39SAdrian Chadd ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); 72567397d39SAdrian Chadd 72667397d39SAdrian Chadd ic->ic_txstream = txs; 72767397d39SAdrian Chadd ic->ic_rxstream = rxs; 72867397d39SAdrian Chadd 729ce656facSAdrian Chadd (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1, 730ce656facSAdrian Chadd &sc->sc_rts_aggr_limit); 731ce656facSAdrian Chadd if (sc->sc_rts_aggr_limit != (64 * 1024)) 732ce656facSAdrian Chadd device_printf(sc->sc_dev, 733ce656facSAdrian Chadd "[HT] RTS aggregates limited to %d KiB\n", 734ce656facSAdrian Chadd sc->sc_rts_aggr_limit / 1024); 735ce656facSAdrian Chadd 7367e97436bSAdrian Chadd device_printf(sc->sc_dev, 7377e97436bSAdrian Chadd "[HT] %d RX streams; %d TX streams\n", rxs, txs); 73867397d39SAdrian Chadd } 73967397d39SAdrian Chadd #endif 74067397d39SAdrian Chadd 741c42a7b7eSSam Leffler /* 742f8aa9fd5SAdrian Chadd * Initial aggregation settings. 743f8aa9fd5SAdrian Chadd */ 744f8aa9fd5SAdrian Chadd sc->sc_hwq_limit = ATH_AGGR_MIN_QDEPTH; 745f8aa9fd5SAdrian Chadd sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; 746f8aa9fd5SAdrian Chadd sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; 747f8aa9fd5SAdrian Chadd 748f8aa9fd5SAdrian Chadd /* 749ddbe3036SAdrian Chadd * Check if the hardware requires PCI register serialisation. 750ddbe3036SAdrian Chadd * Some of the Owl based MACs require this. 751ddbe3036SAdrian Chadd */ 752ddbe3036SAdrian Chadd if (mp_ncpus > 1 && 753ddbe3036SAdrian Chadd ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR, 754ddbe3036SAdrian Chadd 0, NULL) == HAL_OK) { 755ddbe3036SAdrian Chadd sc->sc_ah->ah_config.ah_serialise_reg_war = 1; 7567e97436bSAdrian Chadd device_printf(sc->sc_dev, 7577e97436bSAdrian Chadd "Enabling register serialisation\n"); 758ddbe3036SAdrian Chadd } 759ddbe3036SAdrian Chadd 760ddbe3036SAdrian Chadd /* 761c42a7b7eSSam Leffler * Indicate we need the 802.11 header padded to a 762c42a7b7eSSam Leffler * 32-bit boundary for 4-address and QoS frames. 763c42a7b7eSSam Leffler */ 764c42a7b7eSSam Leffler ic->ic_flags |= IEEE80211_F_DATAPAD; 765c42a7b7eSSam Leffler 766c42a7b7eSSam Leffler /* 767c42a7b7eSSam Leffler * Query the hal about antenna support. 768c42a7b7eSSam Leffler */ 769c42a7b7eSSam Leffler sc->sc_defant = ath_hal_getdefantenna(ah); 770c42a7b7eSSam Leffler 771c42a7b7eSSam Leffler /* 772c42a7b7eSSam Leffler * Not all chips have the VEOL support we want to 773c42a7b7eSSam Leffler * use with IBSS beacons; check here for it. 774c42a7b7eSSam Leffler */ 775c42a7b7eSSam Leffler sc->sc_hasveol = ath_hal_hasveol(ah); 7765591b213SSam Leffler 7775591b213SSam Leffler /* get mac address from hardware */ 77829aca940SSam Leffler ath_hal_getmac(ah, macaddr); 779b032f27cSSam Leffler if (sc->sc_hasbmask) 780b032f27cSSam Leffler ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); 7815591b213SSam Leffler 782b032f27cSSam Leffler /* NB: used to size node table key mapping array */ 783b032f27cSSam Leffler ic->ic_max_keyix = sc->sc_keymax; 7845591b213SSam Leffler /* call MI attach routine. */ 78529aca940SSam Leffler ieee80211_ifattach(ic, macaddr); 786b032f27cSSam Leffler ic->ic_setregdomain = ath_setregdomain; 787b032f27cSSam Leffler ic->ic_getradiocaps = ath_getradiocaps; 788b032f27cSSam Leffler sc->sc_opmode = HAL_M_STA; 789b032f27cSSam Leffler 7905591b213SSam Leffler /* override default methods */ 791b032f27cSSam Leffler ic->ic_newassoc = ath_newassoc; 792b032f27cSSam Leffler ic->ic_updateslot = ath_updateslot; 793b032f27cSSam Leffler ic->ic_wme.wme_update = ath_wme_update; 794b032f27cSSam Leffler ic->ic_vap_create = ath_vap_create; 795b032f27cSSam Leffler ic->ic_vap_delete = ath_vap_delete; 796b032f27cSSam Leffler ic->ic_raw_xmit = ath_raw_xmit; 797b032f27cSSam Leffler ic->ic_update_mcast = ath_update_mcast; 798b032f27cSSam Leffler ic->ic_update_promisc = ath_update_promisc; 7995591b213SSam Leffler ic->ic_node_alloc = ath_node_alloc; 8001e774079SSam Leffler sc->sc_node_free = ic->ic_node_free; 8015591b213SSam Leffler ic->ic_node_free = ath_node_free; 8024afa805eSAdrian Chadd sc->sc_node_cleanup = ic->ic_node_cleanup; 8034afa805eSAdrian Chadd ic->ic_node_cleanup = ath_node_cleanup; 80468e8e04eSSam Leffler ic->ic_node_getsignal = ath_node_getsignal; 80568e8e04eSSam Leffler ic->ic_scan_start = ath_scan_start; 80668e8e04eSSam Leffler ic->ic_scan_end = ath_scan_end; 80768e8e04eSSam Leffler ic->ic_set_channel = ath_set_channel; 808fdd72b4aSAdrian Chadd #ifdef ATH_ENABLE_11N 809eb6f0de0SAdrian Chadd /* 802.11n specific - but just override anyway */ 810eb6f0de0SAdrian Chadd sc->sc_addba_request = ic->ic_addba_request; 811eb6f0de0SAdrian Chadd sc->sc_addba_response = ic->ic_addba_response; 812eb6f0de0SAdrian Chadd sc->sc_addba_stop = ic->ic_addba_stop; 813eb6f0de0SAdrian Chadd sc->sc_bar_response = ic->ic_bar_response; 814eb6f0de0SAdrian Chadd sc->sc_addba_response_timeout = ic->ic_addba_response_timeout; 815eb6f0de0SAdrian Chadd 816eb6f0de0SAdrian Chadd ic->ic_addba_request = ath_addba_request; 817eb6f0de0SAdrian Chadd ic->ic_addba_response = ath_addba_response; 818eb6f0de0SAdrian Chadd ic->ic_addba_response_timeout = ath_addba_response_timeout; 819eb6f0de0SAdrian Chadd ic->ic_addba_stop = ath_addba_stop; 820eb6f0de0SAdrian Chadd ic->ic_bar_response = ath_bar_response; 821eb6f0de0SAdrian Chadd 822fdd72b4aSAdrian Chadd ic->ic_update_chw = ath_update_chw; 823fdd72b4aSAdrian Chadd #endif /* ATH_ENABLE_11N */ 824fdd72b4aSAdrian Chadd 825e1b5ab97SAdrian Chadd #ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT 826e1b5ab97SAdrian Chadd /* 827e1b5ab97SAdrian Chadd * There's one vendor bitmap entry in the RX radiotap 828e1b5ab97SAdrian Chadd * header; make sure that's taken into account. 829e1b5ab97SAdrian Chadd */ 830e1b5ab97SAdrian Chadd ieee80211_radiotap_attachv(ic, 831e1b5ab97SAdrian Chadd &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 0, 832e1b5ab97SAdrian Chadd ATH_TX_RADIOTAP_PRESENT, 833e1b5ab97SAdrian Chadd &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1, 834e1b5ab97SAdrian Chadd ATH_RX_RADIOTAP_PRESENT); 835e1b5ab97SAdrian Chadd #else 836e1b5ab97SAdrian Chadd /* 837e1b5ab97SAdrian Chadd * No vendor bitmap/extensions are present. 838e1b5ab97SAdrian Chadd */ 8395463c4a4SSam Leffler ieee80211_radiotap_attach(ic, 8405463c4a4SSam Leffler &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 8415463c4a4SSam Leffler ATH_TX_RADIOTAP_PRESENT, 8425463c4a4SSam Leffler &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 8435463c4a4SSam Leffler ATH_RX_RADIOTAP_PRESENT); 844e1b5ab97SAdrian Chadd #endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */ 8455463c4a4SSam Leffler 8464866e6c2SSam Leffler /* 8474866e6c2SSam Leffler * Setup dynamic sysctl's now that country code and 8484866e6c2SSam Leffler * regdomain are available from the hal. 8494866e6c2SSam Leffler */ 8504866e6c2SSam Leffler ath_sysctlattach(sc); 851e8dabfbeSAdrian Chadd ath_sysctl_stats_attach(sc); 85237931a35SAdrian Chadd ath_sysctl_hal_attach(sc); 85373454c73SSam Leffler 854c42a7b7eSSam Leffler if (bootverbose) 855c42a7b7eSSam Leffler ieee80211_announce(ic); 856c42a7b7eSSam Leffler ath_announce(sc); 8575591b213SSam Leffler return 0; 858b28b4653SSam Leffler bad2: 859c42a7b7eSSam Leffler ath_tx_cleanup(sc); 860b28b4653SSam Leffler ath_desc_free(sc); 8613d184db2SAdrian Chadd ath_rxdma_teardown(sc); 8625591b213SSam Leffler bad: 8635591b213SSam Leffler if (ah) 8645591b213SSam Leffler ath_hal_detach(ah); 865fc74a9f9SBrooks Davis if (ifp != NULL) 866fc74a9f9SBrooks Davis if_free(ifp); 8675591b213SSam Leffler sc->sc_invalid = 1; 8685591b213SSam Leffler return error; 8695591b213SSam Leffler } 8705591b213SSam Leffler 8715591b213SSam Leffler int 8725591b213SSam Leffler ath_detach(struct ath_softc *sc) 8735591b213SSam Leffler { 874fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 8755591b213SSam Leffler 876c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 877c42a7b7eSSam Leffler __func__, ifp->if_flags); 8785591b213SSam Leffler 879c42a7b7eSSam Leffler /* 880c42a7b7eSSam Leffler * NB: the order of these is important: 88171b85077SSam Leffler * o stop the chip so no more interrupts will fire 882c42a7b7eSSam Leffler * o call the 802.11 layer before detaching the hal to 883c42a7b7eSSam Leffler * insure callbacks into the driver to delete global 884c42a7b7eSSam Leffler * key cache entries can be handled 88571b85077SSam Leffler * o free the taskqueue which drains any pending tasks 886c42a7b7eSSam Leffler * o reclaim the tx queue data structures after calling 887c42a7b7eSSam Leffler * the 802.11 layer as we'll get called back to reclaim 888c42a7b7eSSam Leffler * node state and potentially want to use them 889c42a7b7eSSam Leffler * o to cleanup the tx queues the hal is called, so detach 890c42a7b7eSSam Leffler * it last 891c42a7b7eSSam Leffler * Other than that, it's straightforward... 892c42a7b7eSSam Leffler */ 89371b85077SSam Leffler ath_stop(ifp); 894b032f27cSSam Leffler ieee80211_ifdetach(ifp->if_l2com); 89571b85077SSam Leffler taskqueue_free(sc->sc_tq); 89686e07743SSam Leffler #ifdef ATH_TX99_DIAG 89786e07743SSam Leffler if (sc->sc_tx99 != NULL) 89886e07743SSam Leffler sc->sc_tx99->detach(sc->sc_tx99); 89986e07743SSam Leffler #endif 900c42a7b7eSSam Leffler ath_rate_detach(sc->sc_rc); 90148237774SAdrian Chadd 90248237774SAdrian Chadd ath_dfs_detach(sc); 9035591b213SSam Leffler ath_desc_free(sc); 9043d184db2SAdrian Chadd ath_rxdma_teardown(sc); 905c42a7b7eSSam Leffler ath_tx_cleanup(sc); 90671b85077SSam Leffler ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ 907c4c6f08fSRuslan Ermilov if_free(ifp); 908f0b2a0beSSam Leffler 9095591b213SSam Leffler return 0; 9105591b213SSam Leffler } 9115591b213SSam Leffler 912b032f27cSSam Leffler /* 913b032f27cSSam Leffler * MAC address handling for multiple BSS on the same radio. 914b032f27cSSam Leffler * The first vap uses the MAC address from the EEPROM. For 915b032f27cSSam Leffler * subsequent vap's we set the U/L bit (bit 1) in the MAC 916b032f27cSSam Leffler * address and use the next six bits as an index. 917b032f27cSSam Leffler */ 918b032f27cSSam Leffler static void 919b032f27cSSam Leffler assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) 920b032f27cSSam Leffler { 921b032f27cSSam Leffler int i; 922b032f27cSSam Leffler 923b032f27cSSam Leffler if (clone && sc->sc_hasbmask) { 924b032f27cSSam Leffler /* NB: we only do this if h/w supports multiple bssid */ 925b032f27cSSam Leffler for (i = 0; i < 8; i++) 926b032f27cSSam Leffler if ((sc->sc_bssidmask & (1<<i)) == 0) 927b032f27cSSam Leffler break; 928b032f27cSSam Leffler if (i != 0) 929b032f27cSSam Leffler mac[0] |= (i << 2)|0x2; 930b032f27cSSam Leffler } else 931b032f27cSSam Leffler i = 0; 932b032f27cSSam Leffler sc->sc_bssidmask |= 1<<i; 933b032f27cSSam Leffler sc->sc_hwbssidmask[0] &= ~mac[0]; 934b032f27cSSam Leffler if (i == 0) 935b032f27cSSam Leffler sc->sc_nbssid0++; 936b032f27cSSam Leffler } 937b032f27cSSam Leffler 938b032f27cSSam Leffler static void 939b032f27cSSam Leffler reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) 940b032f27cSSam Leffler { 941b032f27cSSam Leffler int i = mac[0] >> 2; 942b032f27cSSam Leffler uint8_t mask; 943b032f27cSSam Leffler 944b032f27cSSam Leffler if (i != 0 || --sc->sc_nbssid0 == 0) { 945b032f27cSSam Leffler sc->sc_bssidmask &= ~(1<<i); 946b032f27cSSam Leffler /* recalculate bssid mask from remaining addresses */ 947b032f27cSSam Leffler mask = 0xff; 948b032f27cSSam Leffler for (i = 1; i < 8; i++) 949b032f27cSSam Leffler if (sc->sc_bssidmask & (1<<i)) 950b032f27cSSam Leffler mask &= ~((i<<2)|0x2); 951b032f27cSSam Leffler sc->sc_hwbssidmask[0] |= mask; 952b032f27cSSam Leffler } 953b032f27cSSam Leffler } 954b032f27cSSam Leffler 955b032f27cSSam Leffler /* 956b032f27cSSam Leffler * Assign a beacon xmit slot. We try to space out 957b032f27cSSam Leffler * assignments so when beacons are staggered the 958b032f27cSSam Leffler * traffic coming out of the cab q has maximal time 959b032f27cSSam Leffler * to go out before the next beacon is scheduled. 960b032f27cSSam Leffler */ 961b032f27cSSam Leffler static int 962b032f27cSSam Leffler assign_bslot(struct ath_softc *sc) 963b032f27cSSam Leffler { 964b032f27cSSam Leffler u_int slot, free; 965b032f27cSSam Leffler 966b032f27cSSam Leffler free = 0; 967b032f27cSSam Leffler for (slot = 0; slot < ATH_BCBUF; slot++) 968b032f27cSSam Leffler if (sc->sc_bslot[slot] == NULL) { 969b032f27cSSam Leffler if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && 970b032f27cSSam Leffler sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) 971b032f27cSSam Leffler return slot; 972b032f27cSSam Leffler free = slot; 973b032f27cSSam Leffler /* NB: keep looking for a double slot */ 974b032f27cSSam Leffler } 975b032f27cSSam Leffler return free; 976b032f27cSSam Leffler } 977b032f27cSSam Leffler 978b032f27cSSam Leffler static struct ieee80211vap * 979fcd9500fSBernhard Schmidt ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 980fcd9500fSBernhard Schmidt enum ieee80211_opmode opmode, int flags, 981b032f27cSSam Leffler const uint8_t bssid[IEEE80211_ADDR_LEN], 982b032f27cSSam Leffler const uint8_t mac0[IEEE80211_ADDR_LEN]) 983b032f27cSSam Leffler { 984b032f27cSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 985b032f27cSSam Leffler struct ath_vap *avp; 986b032f27cSSam Leffler struct ieee80211vap *vap; 987b032f27cSSam Leffler uint8_t mac[IEEE80211_ADDR_LEN]; 988fcd9500fSBernhard Schmidt int needbeacon, error; 989fcd9500fSBernhard Schmidt enum ieee80211_opmode ic_opmode; 990b032f27cSSam Leffler 991b032f27cSSam Leffler avp = (struct ath_vap *) malloc(sizeof(struct ath_vap), 992b032f27cSSam Leffler M_80211_VAP, M_WAITOK | M_ZERO); 993b032f27cSSam Leffler needbeacon = 0; 994b032f27cSSam Leffler IEEE80211_ADDR_COPY(mac, mac0); 995b032f27cSSam Leffler 996b032f27cSSam Leffler ATH_LOCK(sc); 997a8962181SSam Leffler ic_opmode = opmode; /* default to opmode of new vap */ 998b032f27cSSam Leffler switch (opmode) { 999b032f27cSSam Leffler case IEEE80211_M_STA: 1000a8962181SSam Leffler if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ 1001b032f27cSSam Leffler device_printf(sc->sc_dev, "only 1 sta vap supported\n"); 1002b032f27cSSam Leffler goto bad; 1003b032f27cSSam Leffler } 1004b032f27cSSam Leffler if (sc->sc_nvaps) { 1005b032f27cSSam Leffler /* 1006a8962181SSam Leffler * With multiple vaps we must fall back 1007a8962181SSam Leffler * to s/w beacon miss handling. 1008b032f27cSSam Leffler */ 1009b032f27cSSam Leffler flags |= IEEE80211_CLONE_NOBEACONS; 1010b032f27cSSam Leffler } 1011a8962181SSam Leffler if (flags & IEEE80211_CLONE_NOBEACONS) { 1012a8962181SSam Leffler /* 1013a8962181SSam Leffler * Station mode w/o beacons are implemented w/ AP mode. 1014a8962181SSam Leffler */ 1015b032f27cSSam Leffler ic_opmode = IEEE80211_M_HOSTAP; 1016a8962181SSam Leffler } 1017b032f27cSSam Leffler break; 1018b032f27cSSam Leffler case IEEE80211_M_IBSS: 1019b032f27cSSam Leffler if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ 1020b032f27cSSam Leffler device_printf(sc->sc_dev, 1021b032f27cSSam Leffler "only 1 ibss vap supported\n"); 1022b032f27cSSam Leffler goto bad; 1023b032f27cSSam Leffler } 1024b032f27cSSam Leffler needbeacon = 1; 1025b032f27cSSam Leffler break; 1026b032f27cSSam Leffler case IEEE80211_M_AHDEMO: 1027584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 102810ad9a77SSam Leffler if (flags & IEEE80211_CLONE_TDMA) { 1029a8962181SSam Leffler if (sc->sc_nvaps != 0) { 1030a8962181SSam Leffler device_printf(sc->sc_dev, 1031a8962181SSam Leffler "only 1 tdma vap supported\n"); 1032a8962181SSam Leffler goto bad; 1033a8962181SSam Leffler } 103410ad9a77SSam Leffler needbeacon = 1; 103510ad9a77SSam Leffler flags |= IEEE80211_CLONE_NOBEACONS; 103610ad9a77SSam Leffler } 1037b032f27cSSam Leffler /* fall thru... */ 103810ad9a77SSam Leffler #endif 1039b032f27cSSam Leffler case IEEE80211_M_MONITOR: 1040b032f27cSSam Leffler if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { 1041a8962181SSam Leffler /* 1042a8962181SSam Leffler * Adopt existing mode. Adding a monitor or ahdemo 1043a8962181SSam Leffler * vap to an existing configuration is of dubious 1044a8962181SSam Leffler * value but should be ok. 1045a8962181SSam Leffler */ 1046b032f27cSSam Leffler /* XXX not right for monitor mode */ 1047b032f27cSSam Leffler ic_opmode = ic->ic_opmode; 1048a8962181SSam Leffler } 1049b032f27cSSam Leffler break; 1050b032f27cSSam Leffler case IEEE80211_M_HOSTAP: 105159aa14a9SRui Paulo case IEEE80211_M_MBSS: 1052b032f27cSSam Leffler needbeacon = 1; 1053a8962181SSam Leffler break; 1054b032f27cSSam Leffler case IEEE80211_M_WDS: 1055a8962181SSam Leffler if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { 1056b032f27cSSam Leffler device_printf(sc->sc_dev, 1057b032f27cSSam Leffler "wds not supported in sta mode\n"); 1058b032f27cSSam Leffler goto bad; 1059b032f27cSSam Leffler } 1060b032f27cSSam Leffler /* 1061b032f27cSSam Leffler * Silently remove any request for a unique 1062b032f27cSSam Leffler * bssid; WDS vap's always share the local 1063b032f27cSSam Leffler * mac address. 1064b032f27cSSam Leffler */ 1065b032f27cSSam Leffler flags &= ~IEEE80211_CLONE_BSSID; 1066a8962181SSam Leffler if (sc->sc_nvaps == 0) 1067b032f27cSSam Leffler ic_opmode = IEEE80211_M_HOSTAP; 1068a8962181SSam Leffler else 1069a8962181SSam Leffler ic_opmode = ic->ic_opmode; 10707d261891SRui Paulo break; 1071b032f27cSSam Leffler default: 1072b032f27cSSam Leffler device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 1073b032f27cSSam Leffler goto bad; 1074b032f27cSSam Leffler } 1075b032f27cSSam Leffler /* 1076b032f27cSSam Leffler * Check that a beacon buffer is available; the code below assumes it. 1077b032f27cSSam Leffler */ 10786b349e5aSAdrian Chadd if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { 1079b032f27cSSam Leffler device_printf(sc->sc_dev, "no beacon buffer available\n"); 1080b032f27cSSam Leffler goto bad; 1081b032f27cSSam Leffler } 1082b032f27cSSam Leffler 1083b032f27cSSam Leffler /* STA, AHDEMO? */ 108459aa14a9SRui Paulo if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { 1085b032f27cSSam Leffler assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 1086b032f27cSSam Leffler ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1087b032f27cSSam Leffler } 1088b032f27cSSam Leffler 1089b032f27cSSam Leffler vap = &avp->av_vap; 1090b032f27cSSam Leffler /* XXX can't hold mutex across if_alloc */ 1091b032f27cSSam Leffler ATH_UNLOCK(sc); 1092b032f27cSSam Leffler error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, 1093b032f27cSSam Leffler bssid, mac); 1094b032f27cSSam Leffler ATH_LOCK(sc); 1095b032f27cSSam Leffler if (error != 0) { 1096b032f27cSSam Leffler device_printf(sc->sc_dev, "%s: error %d creating vap\n", 1097b032f27cSSam Leffler __func__, error); 1098b032f27cSSam Leffler goto bad2; 1099b032f27cSSam Leffler } 1100b032f27cSSam Leffler 1101b032f27cSSam Leffler /* h/w crypto support */ 1102b032f27cSSam Leffler vap->iv_key_alloc = ath_key_alloc; 1103b032f27cSSam Leffler vap->iv_key_delete = ath_key_delete; 1104b032f27cSSam Leffler vap->iv_key_set = ath_key_set; 1105b032f27cSSam Leffler vap->iv_key_update_begin = ath_key_update_begin; 1106b032f27cSSam Leffler vap->iv_key_update_end = ath_key_update_end; 1107b032f27cSSam Leffler 1108b032f27cSSam Leffler /* override various methods */ 1109b032f27cSSam Leffler avp->av_recv_mgmt = vap->iv_recv_mgmt; 1110b032f27cSSam Leffler vap->iv_recv_mgmt = ath_recv_mgmt; 1111b032f27cSSam Leffler vap->iv_reset = ath_reset_vap; 1112b032f27cSSam Leffler vap->iv_update_beacon = ath_beacon_update; 1113b032f27cSSam Leffler avp->av_newstate = vap->iv_newstate; 1114b032f27cSSam Leffler vap->iv_newstate = ath_newstate; 1115b032f27cSSam Leffler avp->av_bmiss = vap->iv_bmiss; 1116b032f27cSSam Leffler vap->iv_bmiss = ath_bmiss_vap; 1117b032f27cSSam Leffler 11189be25f4aSAdrian Chadd /* Set default parameters */ 11199be25f4aSAdrian Chadd 11209be25f4aSAdrian Chadd /* 11219be25f4aSAdrian Chadd * Anything earlier than some AR9300 series MACs don't 11229be25f4aSAdrian Chadd * support a smaller MPDU density. 11239be25f4aSAdrian Chadd */ 11249be25f4aSAdrian Chadd vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; 11259be25f4aSAdrian Chadd /* 11269be25f4aSAdrian Chadd * All NICs can handle the maximum size, however 11279be25f4aSAdrian Chadd * AR5416 based MACs can only TX aggregates w/ RTS 11289be25f4aSAdrian Chadd * protection when the total aggregate size is <= 8k. 11299be25f4aSAdrian Chadd * However, for now that's enforced by the TX path. 11309be25f4aSAdrian Chadd */ 11319be25f4aSAdrian Chadd vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 11329be25f4aSAdrian Chadd 1133b032f27cSSam Leffler avp->av_bslot = -1; 1134b032f27cSSam Leffler if (needbeacon) { 1135b032f27cSSam Leffler /* 1136b032f27cSSam Leffler * Allocate beacon state and setup the q for buffered 1137b032f27cSSam Leffler * multicast frames. We know a beacon buffer is 1138b032f27cSSam Leffler * available because we checked above. 1139b032f27cSSam Leffler */ 11406b349e5aSAdrian Chadd avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); 11416b349e5aSAdrian Chadd TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); 1142b032f27cSSam Leffler if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { 1143b032f27cSSam Leffler /* 1144b032f27cSSam Leffler * Assign the vap to a beacon xmit slot. As above 1145b032f27cSSam Leffler * this cannot fail to find a free one. 1146b032f27cSSam Leffler */ 1147b032f27cSSam Leffler avp->av_bslot = assign_bslot(sc); 1148b032f27cSSam Leffler KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, 1149b032f27cSSam Leffler ("beacon slot %u not empty", avp->av_bslot)); 1150b032f27cSSam Leffler sc->sc_bslot[avp->av_bslot] = vap; 1151b032f27cSSam Leffler sc->sc_nbcnvaps++; 1152b032f27cSSam Leffler } 1153b032f27cSSam Leffler if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { 1154b032f27cSSam Leffler /* 1155b032f27cSSam Leffler * Multple vaps are to transmit beacons and we 1156b032f27cSSam Leffler * have h/w support for TSF adjusting; enable 1157b032f27cSSam Leffler * use of staggered beacons. 1158b032f27cSSam Leffler */ 1159b032f27cSSam Leffler sc->sc_stagbeacons = 1; 1160b032f27cSSam Leffler } 1161b032f27cSSam Leffler ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); 1162b032f27cSSam Leffler } 1163b032f27cSSam Leffler 1164b032f27cSSam Leffler ic->ic_opmode = ic_opmode; 1165b032f27cSSam Leffler if (opmode != IEEE80211_M_WDS) { 1166b032f27cSSam Leffler sc->sc_nvaps++; 1167b032f27cSSam Leffler if (opmode == IEEE80211_M_STA) 1168b032f27cSSam Leffler sc->sc_nstavaps++; 1169fe0dd789SSam Leffler if (opmode == IEEE80211_M_MBSS) 1170fe0dd789SSam Leffler sc->sc_nmeshvaps++; 1171b032f27cSSam Leffler } 1172b032f27cSSam Leffler switch (ic_opmode) { 1173b032f27cSSam Leffler case IEEE80211_M_IBSS: 1174b032f27cSSam Leffler sc->sc_opmode = HAL_M_IBSS; 1175b032f27cSSam Leffler break; 1176b032f27cSSam Leffler case IEEE80211_M_STA: 1177b032f27cSSam Leffler sc->sc_opmode = HAL_M_STA; 1178b032f27cSSam Leffler break; 1179b032f27cSSam Leffler case IEEE80211_M_AHDEMO: 1180584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 118110ad9a77SSam Leffler if (vap->iv_caps & IEEE80211_C_TDMA) { 118210ad9a77SSam Leffler sc->sc_tdma = 1; 118310ad9a77SSam Leffler /* NB: disable tsf adjust */ 118410ad9a77SSam Leffler sc->sc_stagbeacons = 0; 118510ad9a77SSam Leffler } 118610ad9a77SSam Leffler /* 118710ad9a77SSam Leffler * NB: adhoc demo mode is a pseudo mode; to the hal it's 118810ad9a77SSam Leffler * just ap mode. 118910ad9a77SSam Leffler */ 119010ad9a77SSam Leffler /* fall thru... */ 119110ad9a77SSam Leffler #endif 1192b032f27cSSam Leffler case IEEE80211_M_HOSTAP: 119359aa14a9SRui Paulo case IEEE80211_M_MBSS: 1194b032f27cSSam Leffler sc->sc_opmode = HAL_M_HOSTAP; 1195b032f27cSSam Leffler break; 1196b032f27cSSam Leffler case IEEE80211_M_MONITOR: 1197b032f27cSSam Leffler sc->sc_opmode = HAL_M_MONITOR; 1198b032f27cSSam Leffler break; 1199b032f27cSSam Leffler default: 1200b032f27cSSam Leffler /* XXX should not happen */ 1201b032f27cSSam Leffler break; 1202b032f27cSSam Leffler } 1203b032f27cSSam Leffler if (sc->sc_hastsfadd) { 1204b032f27cSSam Leffler /* 1205b032f27cSSam Leffler * Configure whether or not TSF adjust should be done. 1206b032f27cSSam Leffler */ 1207b032f27cSSam Leffler ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); 1208b032f27cSSam Leffler } 120910ad9a77SSam Leffler if (flags & IEEE80211_CLONE_NOBEACONS) { 121010ad9a77SSam Leffler /* 121110ad9a77SSam Leffler * Enable s/w beacon miss handling. 121210ad9a77SSam Leffler */ 121310ad9a77SSam Leffler sc->sc_swbmiss = 1; 121410ad9a77SSam Leffler } 1215b032f27cSSam Leffler ATH_UNLOCK(sc); 1216b032f27cSSam Leffler 1217b032f27cSSam Leffler /* complete setup */ 1218b032f27cSSam Leffler ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status); 1219b032f27cSSam Leffler return vap; 1220b032f27cSSam Leffler bad2: 1221b032f27cSSam Leffler reclaim_address(sc, mac); 1222b032f27cSSam Leffler ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1223b032f27cSSam Leffler bad: 1224b032f27cSSam Leffler free(avp, M_80211_VAP); 1225b032f27cSSam Leffler ATH_UNLOCK(sc); 1226b032f27cSSam Leffler return NULL; 1227b032f27cSSam Leffler } 1228b032f27cSSam Leffler 1229b032f27cSSam Leffler static void 1230b032f27cSSam Leffler ath_vap_delete(struct ieee80211vap *vap) 1231b032f27cSSam Leffler { 1232b032f27cSSam Leffler struct ieee80211com *ic = vap->iv_ic; 1233b032f27cSSam Leffler struct ifnet *ifp = ic->ic_ifp; 1234b032f27cSSam Leffler struct ath_softc *sc = ifp->if_softc; 1235b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 1236b032f27cSSam Leffler struct ath_vap *avp = ATH_VAP(vap); 1237b032f27cSSam Leffler 1238f52d3452SAdrian Chadd DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 1239b032f27cSSam Leffler if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1240b032f27cSSam Leffler /* 1241b032f27cSSam Leffler * Quiesce the hardware while we remove the vap. In 1242b032f27cSSam Leffler * particular we need to reclaim all references to 1243b032f27cSSam Leffler * the vap state by any frames pending on the tx queues. 1244b032f27cSSam Leffler */ 1245b032f27cSSam Leffler ath_hal_intrset(ah, 0); /* disable interrupts */ 1246517526efSAdrian Chadd ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */ 1247517526efSAdrian Chadd /* XXX Do all frames from all vaps/nodes need draining here? */ 12489a842e8bSAdrian Chadd ath_stoprecv(sc, 1); /* stop recv side */ 1249b032f27cSSam Leffler } 1250b032f27cSSam Leffler 1251b032f27cSSam Leffler ieee80211_vap_detach(vap); 125216d4de92SAdrian Chadd 125316d4de92SAdrian Chadd /* 125416d4de92SAdrian Chadd * XXX Danger Will Robinson! Danger! 125516d4de92SAdrian Chadd * 125616d4de92SAdrian Chadd * Because ieee80211_vap_detach() can queue a frame (the station 125716d4de92SAdrian Chadd * diassociate message?) after we've drained the TXQ and 125816d4de92SAdrian Chadd * flushed the software TXQ, we will end up with a frame queued 125916d4de92SAdrian Chadd * to a node whose vap is about to be freed. 126016d4de92SAdrian Chadd * 126116d4de92SAdrian Chadd * To work around this, flush the hardware/software again. 126216d4de92SAdrian Chadd * This may be racy - the ath task may be running and the packet 126316d4de92SAdrian Chadd * may be being scheduled between sw->hw txq. Tsk. 126416d4de92SAdrian Chadd * 126516d4de92SAdrian Chadd * TODO: figure out why a new node gets allocated somewhere around 126616d4de92SAdrian Chadd * here (after the ath_tx_swq() call; and after an ath_stop_locked() 126716d4de92SAdrian Chadd * call!) 126816d4de92SAdrian Chadd */ 126916d4de92SAdrian Chadd 127016d4de92SAdrian Chadd ath_draintxq(sc, ATH_RESET_DEFAULT); 127116d4de92SAdrian Chadd 1272b032f27cSSam Leffler ATH_LOCK(sc); 1273b032f27cSSam Leffler /* 1274b032f27cSSam Leffler * Reclaim beacon state. Note this must be done before 1275b032f27cSSam Leffler * the vap instance is reclaimed as we may have a reference 1276b032f27cSSam Leffler * to it in the buffer for the beacon frame. 1277b032f27cSSam Leffler */ 1278b032f27cSSam Leffler if (avp->av_bcbuf != NULL) { 1279b032f27cSSam Leffler if (avp->av_bslot != -1) { 1280b032f27cSSam Leffler sc->sc_bslot[avp->av_bslot] = NULL; 1281b032f27cSSam Leffler sc->sc_nbcnvaps--; 1282b032f27cSSam Leffler } 1283b032f27cSSam Leffler ath_beacon_return(sc, avp->av_bcbuf); 1284b032f27cSSam Leffler avp->av_bcbuf = NULL; 1285b032f27cSSam Leffler if (sc->sc_nbcnvaps == 0) { 1286b032f27cSSam Leffler sc->sc_stagbeacons = 0; 1287b032f27cSSam Leffler if (sc->sc_hastsfadd) 1288b032f27cSSam Leffler ath_hal_settsfadjust(sc->sc_ah, 0); 1289b032f27cSSam Leffler } 1290b032f27cSSam Leffler /* 1291b032f27cSSam Leffler * Reclaim any pending mcast frames for the vap. 1292b032f27cSSam Leffler */ 1293b032f27cSSam Leffler ath_tx_draintxq(sc, &avp->av_mcastq); 1294b032f27cSSam Leffler ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq); 1295b032f27cSSam Leffler } 1296b032f27cSSam Leffler /* 1297b032f27cSSam Leffler * Update bookkeeping. 1298b032f27cSSam Leffler */ 1299b032f27cSSam Leffler if (vap->iv_opmode == IEEE80211_M_STA) { 1300b032f27cSSam Leffler sc->sc_nstavaps--; 1301b032f27cSSam Leffler if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) 1302b032f27cSSam Leffler sc->sc_swbmiss = 0; 130359aa14a9SRui Paulo } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || 130459aa14a9SRui Paulo vap->iv_opmode == IEEE80211_M_MBSS) { 1305b032f27cSSam Leffler reclaim_address(sc, vap->iv_myaddr); 1306b032f27cSSam Leffler ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); 1307fe0dd789SSam Leffler if (vap->iv_opmode == IEEE80211_M_MBSS) 1308fe0dd789SSam Leffler sc->sc_nmeshvaps--; 1309b032f27cSSam Leffler } 1310b032f27cSSam Leffler if (vap->iv_opmode != IEEE80211_M_WDS) 1311b032f27cSSam Leffler sc->sc_nvaps--; 1312584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 131310ad9a77SSam Leffler /* TDMA operation ceases when the last vap is destroyed */ 131410ad9a77SSam Leffler if (sc->sc_tdma && sc->sc_nvaps == 0) { 131510ad9a77SSam Leffler sc->sc_tdma = 0; 131610ad9a77SSam Leffler sc->sc_swbmiss = 0; 131710ad9a77SSam Leffler } 131810ad9a77SSam Leffler #endif 1319b032f27cSSam Leffler free(avp, M_80211_VAP); 1320b032f27cSSam Leffler 1321b032f27cSSam Leffler if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1322b032f27cSSam Leffler /* 1323b032f27cSSam Leffler * Restart rx+tx machines if still running (RUNNING will 1324b032f27cSSam Leffler * be reset if we just destroyed the last vap). 1325b032f27cSSam Leffler */ 1326b032f27cSSam Leffler if (ath_startrecv(sc) != 0) 1327b032f27cSSam Leffler if_printf(ifp, "%s: unable to restart recv logic\n", 1328b032f27cSSam Leffler __func__); 1329c89b957aSSam Leffler if (sc->sc_beacons) { /* restart beacons */ 1330c89b957aSSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 1331c89b957aSSam Leffler if (sc->sc_tdma) 1332c89b957aSSam Leffler ath_tdma_config(sc, NULL); 1333c89b957aSSam Leffler else 1334c89b957aSSam Leffler #endif 1335b032f27cSSam Leffler ath_beacon_config(sc, NULL); 1336c89b957aSSam Leffler } 1337b032f27cSSam Leffler ath_hal_intrset(ah, sc->sc_imask); 1338b032f27cSSam Leffler } 133916d4de92SAdrian Chadd ATH_UNLOCK(sc); 1340b032f27cSSam Leffler } 1341b032f27cSSam Leffler 13425591b213SSam Leffler void 13435591b213SSam Leffler ath_suspend(struct ath_softc *sc) 13445591b213SSam Leffler { 1345fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 1346d3ac945bSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 13475591b213SSam Leffler 1348c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1349c42a7b7eSSam Leffler __func__, ifp->if_flags); 13505591b213SSam Leffler 1351d3ac945bSSam Leffler sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0; 1352d1328898SAdrian Chadd 1353d3ac945bSSam Leffler ieee80211_suspend_all(ic); 1354d3ac945bSSam Leffler /* 1355d3ac945bSSam Leffler * NB: don't worry about putting the chip in low power 1356d3ac945bSSam Leffler * mode; pci will power off our socket on suspend and 1357f29b8b7fSWarner Losh * CardBus detaches the device. 1358d3ac945bSSam Leffler */ 1359d73df6d5SAdrian Chadd 1360ae2a0aa4SAdrian Chadd /* 1361ae2a0aa4SAdrian Chadd * XXX ensure none of the taskqueues are running 1362ae2a0aa4SAdrian Chadd * XXX ensure sc_invalid is 1 1363ae2a0aa4SAdrian Chadd * XXX ensure the calibration callout is disabled 1364ae2a0aa4SAdrian Chadd */ 1365ae2a0aa4SAdrian Chadd 1366ae2a0aa4SAdrian Chadd /* Disable the PCIe PHY, complete with workarounds */ 1367ae2a0aa4SAdrian Chadd ath_hal_enablepcie(sc->sc_ah, 1, 1); 1368d3ac945bSSam Leffler } 1369d3ac945bSSam Leffler 1370d3ac945bSSam Leffler /* 1371d3ac945bSSam Leffler * Reset the key cache since some parts do not reset the 1372d3ac945bSSam Leffler * contents on resume. First we clear all entries, then 1373d3ac945bSSam Leffler * re-load keys that the 802.11 layer assumes are setup 1374d3ac945bSSam Leffler * in h/w. 1375d3ac945bSSam Leffler */ 1376d3ac945bSSam Leffler static void 1377d3ac945bSSam Leffler ath_reset_keycache(struct ath_softc *sc) 1378d3ac945bSSam Leffler { 1379d3ac945bSSam Leffler struct ifnet *ifp = sc->sc_ifp; 1380d3ac945bSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 1381d3ac945bSSam Leffler struct ath_hal *ah = sc->sc_ah; 1382d3ac945bSSam Leffler int i; 1383d3ac945bSSam Leffler 1384d3ac945bSSam Leffler for (i = 0; i < sc->sc_keymax; i++) 1385d3ac945bSSam Leffler ath_hal_keyreset(ah, i); 1386d3ac945bSSam Leffler ieee80211_crypto_reload_keys(ic); 13875591b213SSam Leffler } 13885591b213SSam Leffler 13895591b213SSam Leffler void 13905591b213SSam Leffler ath_resume(struct ath_softc *sc) 13915591b213SSam Leffler { 1392fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 1393d3ac945bSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 1394d3ac945bSSam Leffler struct ath_hal *ah = sc->sc_ah; 1395d3ac945bSSam Leffler HAL_STATUS status; 13965591b213SSam Leffler 1397c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1398c42a7b7eSSam Leffler __func__, ifp->if_flags); 13995591b213SSam Leffler 1400d73df6d5SAdrian Chadd /* Re-enable PCIe, re-enable the PCIe bus */ 1401ae2a0aa4SAdrian Chadd ath_hal_enablepcie(ah, 0, 0); 1402d73df6d5SAdrian Chadd 1403d3ac945bSSam Leffler /* 1404d3ac945bSSam Leffler * Must reset the chip before we reload the 1405d3ac945bSSam Leffler * keycache as we were powered down on suspend. 1406d3ac945bSSam Leffler */ 1407054d7b69SSam Leffler ath_hal_reset(ah, sc->sc_opmode, 1408054d7b69SSam Leffler sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, 1409054d7b69SSam Leffler AH_FALSE, &status); 1410d3ac945bSSam Leffler ath_reset_keycache(sc); 14117e5eb44dSAdrian Chadd 14127e5eb44dSAdrian Chadd /* Let DFS at it in case it's a DFS channel */ 14137e5eb44dSAdrian Chadd ath_dfs_radar_enable(sc, ic->ic_curchan); 14147e5eb44dSAdrian Chadd 1415a497cd88SAdrian Chadd /* Restore the LED configuration */ 1416a497cd88SAdrian Chadd ath_led_config(sc); 1417a497cd88SAdrian Chadd ath_hal_setledstate(ah, HAL_LED_INIT); 1418a497cd88SAdrian Chadd 1419d1328898SAdrian Chadd if (sc->sc_resume_up) 1420021a0db5SAdrian Chadd ieee80211_resume_all(ic); 14212fd9aabbSAdrian Chadd 14222fd9aabbSAdrian Chadd /* XXX beacons ? */ 14236b59f5e3SSam Leffler } 14245591b213SSam Leffler 14255591b213SSam Leffler void 14265591b213SSam Leffler ath_shutdown(struct ath_softc *sc) 14275591b213SSam Leffler { 1428fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 14295591b213SSam Leffler 1430c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1431c42a7b7eSSam Leffler __func__, ifp->if_flags); 14325591b213SSam Leffler 14335591b213SSam Leffler ath_stop(ifp); 1434d3ac945bSSam Leffler /* NB: no point powering down chip as we're about to reboot */ 14355591b213SSam Leffler } 14365591b213SSam Leffler 1437c42a7b7eSSam Leffler /* 1438c42a7b7eSSam Leffler * Interrupt handler. Most of the actual processing is deferred. 1439c42a7b7eSSam Leffler */ 14405591b213SSam Leffler void 14415591b213SSam Leffler ath_intr(void *arg) 14425591b213SSam Leffler { 14435591b213SSam Leffler struct ath_softc *sc = arg; 1444fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 14455591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 14466f5fe81eSAdrian Chadd HAL_INT status = 0; 14478f939e79SAdrian Chadd uint32_t txqs; 14485591b213SSam Leffler 1449ef27340cSAdrian Chadd /* 1450ef27340cSAdrian Chadd * If we're inside a reset path, just print a warning and 1451ef27340cSAdrian Chadd * clear the ISR. The reset routine will finish it for us. 1452ef27340cSAdrian Chadd */ 1453ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 1454ef27340cSAdrian Chadd if (sc->sc_inreset_cnt) { 1455ef27340cSAdrian Chadd HAL_INT status; 1456ef27340cSAdrian Chadd ath_hal_getisr(ah, &status); /* clear ISR */ 1457ef27340cSAdrian Chadd ath_hal_intrset(ah, 0); /* disable further intr's */ 1458ef27340cSAdrian Chadd DPRINTF(sc, ATH_DEBUG_ANY, 1459ef27340cSAdrian Chadd "%s: in reset, ignoring: status=0x%x\n", 1460ef27340cSAdrian Chadd __func__, status); 1461ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 1462ef27340cSAdrian Chadd return; 1463ef27340cSAdrian Chadd } 1464ef27340cSAdrian Chadd 14655591b213SSam Leffler if (sc->sc_invalid) { 14665591b213SSam Leffler /* 1467b58b3803SSam Leffler * The hardware is not ready/present, don't touch anything. 1468b58b3803SSam Leffler * Note this can happen early on if the IRQ is shared. 14695591b213SSam Leffler */ 1470c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 1471ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 14725591b213SSam Leffler return; 14735591b213SSam Leffler } 1474ef27340cSAdrian Chadd if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */ 1475ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 1476fdd758d4SSam Leffler return; 1477ef27340cSAdrian Chadd } 1478ef27340cSAdrian Chadd 147968e8e04eSSam Leffler if ((ifp->if_flags & IFF_UP) == 0 || 148068e8e04eSSam Leffler (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 148168e8e04eSSam Leffler HAL_INT status; 148268e8e04eSSam Leffler 1483c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1484c42a7b7eSSam Leffler __func__, ifp->if_flags); 14855591b213SSam Leffler ath_hal_getisr(ah, &status); /* clear ISR */ 14865591b213SSam Leffler ath_hal_intrset(ah, 0); /* disable further intr's */ 1487ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 14885591b213SSam Leffler return; 14895591b213SSam Leffler } 1490ef27340cSAdrian Chadd 1491c42a7b7eSSam Leffler /* 1492c42a7b7eSSam Leffler * Figure out the reason(s) for the interrupt. Note 1493c42a7b7eSSam Leffler * that the hal returns a pseudo-ISR that may include 1494c42a7b7eSSam Leffler * bits we haven't explicitly enabled so we mask the 1495c42a7b7eSSam Leffler * value to insure we only process bits we requested. 1496c42a7b7eSSam Leffler */ 14975591b213SSam Leffler ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 1498c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 1499f52d3452SAdrian Chadd CTR1(ATH_KTR_INTR, "ath_intr: mask=0x%.8x", status); 150031fdf3d6SAdrian Chadd #ifdef ATH_KTR_INTR_DEBUG 1501f52d3452SAdrian Chadd CTR5(ATH_KTR_INTR, 1502f52d3452SAdrian Chadd "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x", 1503f52d3452SAdrian Chadd ah->ah_intrstate[0], 1504f52d3452SAdrian Chadd ah->ah_intrstate[1], 1505f52d3452SAdrian Chadd ah->ah_intrstate[2], 1506f52d3452SAdrian Chadd ah->ah_intrstate[3], 1507f52d3452SAdrian Chadd ah->ah_intrstate[6]); 150831fdf3d6SAdrian Chadd #endif 15099467e3f3SAdrian Chadd 15109467e3f3SAdrian Chadd /* Squirrel away SYNC interrupt debugging */ 15119467e3f3SAdrian Chadd if (ah->ah_syncstate != 0) { 15129467e3f3SAdrian Chadd int i; 15139467e3f3SAdrian Chadd for (i = 0; i < 32; i++) 15149467e3f3SAdrian Chadd if (ah->ah_syncstate & (i << i)) 15159467e3f3SAdrian Chadd sc->sc_intr_stats.sync_intr[i]++; 15169467e3f3SAdrian Chadd } 15179467e3f3SAdrian Chadd 1518ecddff40SSam Leffler status &= sc->sc_imask; /* discard unasked for bits */ 15196f5fe81eSAdrian Chadd 15206f5fe81eSAdrian Chadd /* Short-circuit un-handled interrupts */ 1521ef27340cSAdrian Chadd if (status == 0x0) { 1522ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 15236f5fe81eSAdrian Chadd return; 1524ef27340cSAdrian Chadd } 15256f5fe81eSAdrian Chadd 1526ef27340cSAdrian Chadd /* 1527ef27340cSAdrian Chadd * Take a note that we're inside the interrupt handler, so 1528ef27340cSAdrian Chadd * the reset routines know to wait. 1529ef27340cSAdrian Chadd */ 1530ef27340cSAdrian Chadd sc->sc_intr_cnt++; 1531ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 1532ef27340cSAdrian Chadd 1533ef27340cSAdrian Chadd /* 1534ef27340cSAdrian Chadd * Handle the interrupt. We won't run concurrent with the reset 1535ef27340cSAdrian Chadd * or channel change routines as they'll wait for sc_intr_cnt 1536ef27340cSAdrian Chadd * to be 0 before continuing. 1537ef27340cSAdrian Chadd */ 15385591b213SSam Leffler if (status & HAL_INT_FATAL) { 15395591b213SSam Leffler sc->sc_stats.ast_hardware++; 15405591b213SSam Leffler ath_hal_intrset(ah, 0); /* disable intr's until reset */ 1541f846cf42SAdrian Chadd taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask); 15425591b213SSam Leffler } else { 1543c42a7b7eSSam Leffler if (status & HAL_INT_SWBA) { 1544c42a7b7eSSam Leffler /* 1545c42a7b7eSSam Leffler * Software beacon alert--time to send a beacon. 1546c42a7b7eSSam Leffler * Handle beacon transmission directly; deferring 1547c42a7b7eSSam Leffler * this is too slow to meet timing constraints 1548c42a7b7eSSam Leffler * under load. 1549c42a7b7eSSam Leffler */ 1550584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 155110ad9a77SSam Leffler if (sc->sc_tdma) { 155210ad9a77SSam Leffler if (sc->sc_tdmaswba == 0) { 155310ad9a77SSam Leffler struct ieee80211com *ic = ifp->if_l2com; 155410ad9a77SSam Leffler struct ieee80211vap *vap = 155510ad9a77SSam Leffler TAILQ_FIRST(&ic->ic_vaps); 155610ad9a77SSam Leffler ath_tdma_beacon_send(sc, vap); 155710ad9a77SSam Leffler sc->sc_tdmaswba = 155810ad9a77SSam Leffler vap->iv_tdma->tdma_bintval; 155910ad9a77SSam Leffler } else 156010ad9a77SSam Leffler sc->sc_tdmaswba--; 156110ad9a77SSam Leffler } else 156210ad9a77SSam Leffler #endif 1563339ccfb3SSam Leffler { 1564c42a7b7eSSam Leffler ath_beacon_proc(sc, 0); 1565339ccfb3SSam Leffler #ifdef IEEE80211_SUPPORT_SUPERG 1566339ccfb3SSam Leffler /* 1567339ccfb3SSam Leffler * Schedule the rx taskq in case there's no 1568339ccfb3SSam Leffler * traffic so any frames held on the staging 1569339ccfb3SSam Leffler * queue are aged and potentially flushed. 1570339ccfb3SSam Leffler */ 1571339ccfb3SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1572339ccfb3SSam Leffler #endif 1573339ccfb3SSam Leffler } 1574c42a7b7eSSam Leffler } 15755591b213SSam Leffler if (status & HAL_INT_RXEOL) { 15768f939e79SAdrian Chadd int imask; 1577f52d3452SAdrian Chadd CTR0(ATH_KTR_ERR, "ath_intr: RXEOL"); 1578ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 15795591b213SSam Leffler /* 15805591b213SSam Leffler * NB: the hardware should re-read the link when 15815591b213SSam Leffler * RXE bit is written, but it doesn't work at 15825591b213SSam Leffler * least on older hardware revs. 15835591b213SSam Leffler */ 15845591b213SSam Leffler sc->sc_stats.ast_rxeol++; 158573f895fcSAdrian Chadd /* 158673f895fcSAdrian Chadd * Disable RXEOL/RXORN - prevent an interrupt 158773f895fcSAdrian Chadd * storm until the PCU logic can be reset. 15881fdadc0fSAdrian Chadd * In case the interface is reset some other 15891fdadc0fSAdrian Chadd * way before "sc_kickpcu" is called, don't 15901fdadc0fSAdrian Chadd * modify sc_imask - that way if it is reset 15911fdadc0fSAdrian Chadd * by a call to ath_reset() somehow, the 15921fdadc0fSAdrian Chadd * interrupt mask will be correctly reprogrammed. 159373f895fcSAdrian Chadd */ 15948f939e79SAdrian Chadd imask = sc->sc_imask; 15951fdadc0fSAdrian Chadd imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); 15961fdadc0fSAdrian Chadd ath_hal_intrset(ah, imask); 15971fdadc0fSAdrian Chadd /* 15988f939e79SAdrian Chadd * Only blank sc_rxlink if we've not yet kicked 15998f939e79SAdrian Chadd * the PCU. 16008f939e79SAdrian Chadd * 16018f939e79SAdrian Chadd * This isn't entirely correct - the correct solution 16028f939e79SAdrian Chadd * would be to have a PCU lock and engage that for 16038f939e79SAdrian Chadd * the duration of the PCU fiddling; which would include 16048f939e79SAdrian Chadd * running the RX process. Otherwise we could end up 16058f939e79SAdrian Chadd * messing up the RX descriptor chain and making the 16068f939e79SAdrian Chadd * RX desc list much shorter. 16078f939e79SAdrian Chadd */ 16088f939e79SAdrian Chadd if (! sc->sc_kickpcu) 16098f939e79SAdrian Chadd sc->sc_rxlink = NULL; 16108f939e79SAdrian Chadd sc->sc_kickpcu = 1; 16118f939e79SAdrian Chadd /* 16121fdadc0fSAdrian Chadd * Enqueue an RX proc, to handled whatever 16131fdadc0fSAdrian Chadd * is in the RX queue. 16141fdadc0fSAdrian Chadd * This will then kick the PCU. 16151fdadc0fSAdrian Chadd */ 16161fdadc0fSAdrian Chadd taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1617ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 16185591b213SSam Leffler } 16195591b213SSam Leffler if (status & HAL_INT_TXURN) { 16205591b213SSam Leffler sc->sc_stats.ast_txurn++; 16215591b213SSam Leffler /* bump tx trigger level */ 16225591b213SSam Leffler ath_hal_updatetxtriglevel(ah, AH_TRUE); 16235591b213SSam Leffler } 1624bcbb08ceSAdrian Chadd /* 1625bcbb08ceSAdrian Chadd * Handle both the legacy and RX EDMA interrupt bits. 1626bcbb08ceSAdrian Chadd * Note that HAL_INT_RXLP is also HAL_INT_RXDESC. 1627bcbb08ceSAdrian Chadd */ 1628bcbb08ceSAdrian Chadd if (status & (HAL_INT_RX | HAL_INT_RXHP | HAL_INT_RXLP)) { 16298f939e79SAdrian Chadd sc->sc_stats.ast_rx_intr++; 16300bbf5441SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 16318f939e79SAdrian Chadd } 16328f939e79SAdrian Chadd if (status & HAL_INT_TX) { 16338f939e79SAdrian Chadd sc->sc_stats.ast_tx_intr++; 16348f939e79SAdrian Chadd /* 16358f939e79SAdrian Chadd * Grab all the currently set bits in the HAL txq bitmap 16368f939e79SAdrian Chadd * and blank them. This is the only place we should be 16378f939e79SAdrian Chadd * doing this. 16388f939e79SAdrian Chadd */ 1639ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 16408f939e79SAdrian Chadd txqs = 0xffffffff; 16418f939e79SAdrian Chadd ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); 16428f939e79SAdrian Chadd sc->sc_txq_active |= txqs; 16430bbf5441SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 1644ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 16458f939e79SAdrian Chadd } 16465591b213SSam Leffler if (status & HAL_INT_BMISS) { 16475591b213SSam Leffler sc->sc_stats.ast_bmiss++; 16480bbf5441SSam Leffler taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 16495591b213SSam Leffler } 16506ad02dbaSAdrian Chadd if (status & HAL_INT_GTT) 16516ad02dbaSAdrian Chadd sc->sc_stats.ast_tx_timeout++; 16525594f5c0SAdrian Chadd if (status & HAL_INT_CST) 16535594f5c0SAdrian Chadd sc->sc_stats.ast_tx_cst++; 1654c42a7b7eSSam Leffler if (status & HAL_INT_MIB) { 1655c42a7b7eSSam Leffler sc->sc_stats.ast_mib++; 1656ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 1657c42a7b7eSSam Leffler /* 1658c42a7b7eSSam Leffler * Disable interrupts until we service the MIB 1659c42a7b7eSSam Leffler * interrupt; otherwise it will continue to fire. 1660c42a7b7eSSam Leffler */ 1661c42a7b7eSSam Leffler ath_hal_intrset(ah, 0); 1662c42a7b7eSSam Leffler /* 1663c42a7b7eSSam Leffler * Let the hal handle the event. We assume it will 1664c42a7b7eSSam Leffler * clear whatever condition caused the interrupt. 1665c42a7b7eSSam Leffler */ 1666ffa2cab6SSam Leffler ath_hal_mibevent(ah, &sc->sc_halstats); 16678f939e79SAdrian Chadd /* 16688f939e79SAdrian Chadd * Don't reset the interrupt if we've just 16698f939e79SAdrian Chadd * kicked the PCU, or we may get a nested 16708f939e79SAdrian Chadd * RXEOL before the rxproc has had a chance 16718f939e79SAdrian Chadd * to run. 16728f939e79SAdrian Chadd */ 16738f939e79SAdrian Chadd if (sc->sc_kickpcu == 0) 1674c42a7b7eSSam Leffler ath_hal_intrset(ah, sc->sc_imask); 1675ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 1676c42a7b7eSSam Leffler } 16779c4fc1e8SSam Leffler if (status & HAL_INT_RXORN) { 16789c4fc1e8SSam Leffler /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ 1679f52d3452SAdrian Chadd CTR0(ATH_KTR_ERR, "ath_intr: RXORN"); 16809c4fc1e8SSam Leffler sc->sc_stats.ast_rxorn++; 16819c4fc1e8SSam Leffler } 16825591b213SSam Leffler } 1683ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 1684ef27340cSAdrian Chadd sc->sc_intr_cnt--; 1685ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 16865591b213SSam Leffler } 16875591b213SSam Leffler 16885591b213SSam Leffler static void 16895591b213SSam Leffler ath_fatal_proc(void *arg, int pending) 16905591b213SSam Leffler { 16915591b213SSam Leffler struct ath_softc *sc = arg; 1692fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 169316c8acaaSSam Leffler u_int32_t *state; 169416c8acaaSSam Leffler u_int32_t len; 169568e8e04eSSam Leffler void *sp; 16965591b213SSam Leffler 1697c42a7b7eSSam Leffler if_printf(ifp, "hardware error; resetting\n"); 169816c8acaaSSam Leffler /* 169916c8acaaSSam Leffler * Fatal errors are unrecoverable. Typically these 170016c8acaaSSam Leffler * are caused by DMA errors. Collect h/w state from 170116c8acaaSSam Leffler * the hal so we can diagnose what's going on. 170216c8acaaSSam Leffler */ 170368e8e04eSSam Leffler if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { 170416c8acaaSSam Leffler KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 170568e8e04eSSam Leffler state = sp; 170616c8acaaSSam Leffler if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", 170716c8acaaSSam Leffler state[0], state[1] , state[2], state[3], 170816c8acaaSSam Leffler state[4], state[5]); 170916c8acaaSSam Leffler } 1710517526efSAdrian Chadd ath_reset(ifp, ATH_RESET_NOLOSS); 17115591b213SSam Leffler } 17125591b213SSam Leffler 17135591b213SSam Leffler static void 1714b032f27cSSam Leffler ath_bmiss_vap(struct ieee80211vap *vap) 17155591b213SSam Leffler { 171659fbb257SSam Leffler /* 171759fbb257SSam Leffler * Workaround phantom bmiss interrupts by sanity-checking 171859fbb257SSam Leffler * the time of our last rx'd frame. If it is within the 171959fbb257SSam Leffler * beacon miss interval then ignore the interrupt. If it's 172059fbb257SSam Leffler * truly a bmiss we'll get another interrupt soon and that'll 172159fbb257SSam Leffler * be dispatched up for processing. Note this applies only 172259fbb257SSam Leffler * for h/w beacon miss events. 172359fbb257SSam Leffler */ 172459fbb257SSam Leffler if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { 1725a7ace843SSam Leffler struct ifnet *ifp = vap->iv_ic->ic_ifp; 1726a7ace843SSam Leffler struct ath_softc *sc = ifp->if_softc; 1727d7736e13SSam Leffler u_int64_t lastrx = sc->sc_lastrx; 1728d7736e13SSam Leffler u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 172980767531SAdrian Chadd /* XXX should take a locked ref to iv_bss */ 1730d7736e13SSam Leffler u_int bmisstimeout = 1731b032f27cSSam Leffler vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; 1732d7736e13SSam Leffler 1733d7736e13SSam Leffler DPRINTF(sc, ATH_DEBUG_BEACON, 1734d7736e13SSam Leffler "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 1735d7736e13SSam Leffler __func__, (unsigned long long) tsf, 1736d7736e13SSam Leffler (unsigned long long)(tsf - lastrx), 1737d7736e13SSam Leffler (unsigned long long) lastrx, bmisstimeout); 173859fbb257SSam Leffler 173959fbb257SSam Leffler if (tsf - lastrx <= bmisstimeout) { 1740d7736e13SSam Leffler sc->sc_stats.ast_bmiss_phantom++; 174159fbb257SSam Leffler return; 174259fbb257SSam Leffler } 174359fbb257SSam Leffler } 174459fbb257SSam Leffler ATH_VAP(vap)->av_bmiss(vap); 1745e585d188SSam Leffler } 1746b032f27cSSam Leffler 1747459bc4f0SSam Leffler static int 1748459bc4f0SSam Leffler ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) 1749459bc4f0SSam Leffler { 1750459bc4f0SSam Leffler uint32_t rsize; 1751459bc4f0SSam Leffler void *sp; 1752459bc4f0SSam Leffler 175325c96056SAdrian Chadd if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) 1754459bc4f0SSam Leffler return 0; 1755459bc4f0SSam Leffler KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); 1756459bc4f0SSam Leffler *hangs = *(uint32_t *)sp; 1757459bc4f0SSam Leffler return 1; 1758459bc4f0SSam Leffler } 1759459bc4f0SSam Leffler 1760b032f27cSSam Leffler static void 1761b032f27cSSam Leffler ath_bmiss_proc(void *arg, int pending) 1762b032f27cSSam Leffler { 1763b032f27cSSam Leffler struct ath_softc *sc = arg; 1764b032f27cSSam Leffler struct ifnet *ifp = sc->sc_ifp; 1765459bc4f0SSam Leffler uint32_t hangs; 1766b032f27cSSam Leffler 1767b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 1768459bc4f0SSam Leffler 1769459bc4f0SSam Leffler if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { 17704fa8d4efSDaniel Eischen if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs); 1771517526efSAdrian Chadd ath_reset(ifp, ATH_RESET_NOLOSS); 1772459bc4f0SSam Leffler } else 1773b032f27cSSam Leffler ieee80211_beacon_miss(ifp->if_l2com); 17745591b213SSam Leffler } 17755591b213SSam Leffler 1776724c193aSSam Leffler /* 1777b032f27cSSam Leffler * Handle TKIP MIC setup to deal hardware that doesn't do MIC 1778b032f27cSSam Leffler * calcs together with WME. If necessary disable the crypto 1779b032f27cSSam Leffler * hardware and mark the 802.11 state so keys will be setup 1780b032f27cSSam Leffler * with the MIC work done in software. 1781b032f27cSSam Leffler */ 1782b032f27cSSam Leffler static void 1783b032f27cSSam Leffler ath_settkipmic(struct ath_softc *sc) 1784b032f27cSSam Leffler { 1785b032f27cSSam Leffler struct ifnet *ifp = sc->sc_ifp; 1786b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 1787b032f27cSSam Leffler 1788b032f27cSSam Leffler if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { 1789b032f27cSSam Leffler if (ic->ic_flags & IEEE80211_F_WME) { 1790b032f27cSSam Leffler ath_hal_settkipmic(sc->sc_ah, AH_FALSE); 1791b032f27cSSam Leffler ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; 1792b032f27cSSam Leffler } else { 1793b032f27cSSam Leffler ath_hal_settkipmic(sc->sc_ah, AH_TRUE); 1794b032f27cSSam Leffler ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 1795b032f27cSSam Leffler } 1796b032f27cSSam Leffler } 1797b032f27cSSam Leffler } 1798b032f27cSSam Leffler 17995591b213SSam Leffler static void 18005591b213SSam Leffler ath_init(void *arg) 18015591b213SSam Leffler { 18025591b213SSam Leffler struct ath_softc *sc = (struct ath_softc *) arg; 1803fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 1804b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 18055591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 18065591b213SSam Leffler HAL_STATUS status; 18075591b213SSam Leffler 1808c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1809c42a7b7eSSam Leffler __func__, ifp->if_flags); 18105591b213SSam Leffler 1811f0b2a0beSSam Leffler ATH_LOCK(sc); 18125591b213SSam Leffler /* 18135591b213SSam Leffler * Stop anything previously setup. This is safe 18145591b213SSam Leffler * whether this is the first time through or not. 18155591b213SSam Leffler */ 1816c42a7b7eSSam Leffler ath_stop_locked(ifp); 18175591b213SSam Leffler 18185591b213SSam Leffler /* 18195591b213SSam Leffler * The basic interface to setting the hardware in a good 18205591b213SSam Leffler * state is ``reset''. On return the hardware is known to 18215591b213SSam Leffler * be powered up and with interrupts disabled. This must 18225591b213SSam Leffler * be followed by initialization of the appropriate bits 18235591b213SSam Leffler * and then setup of the interrupt mask. 18245591b213SSam Leffler */ 1825b032f27cSSam Leffler ath_settkipmic(sc); 182659efa8b5SSam Leffler if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) { 18275591b213SSam Leffler if_printf(ifp, "unable to reset hardware; hal status %u\n", 18285591b213SSam Leffler status); 1829b032f27cSSam Leffler ATH_UNLOCK(sc); 1830b032f27cSSam Leffler return; 18315591b213SSam Leffler } 1832b032f27cSSam Leffler ath_chan_change(sc, ic->ic_curchan); 18335591b213SSam Leffler 183448237774SAdrian Chadd /* Let DFS at it in case it's a DFS channel */ 183548237774SAdrian Chadd ath_dfs_radar_enable(sc, ic->ic_curchan); 183648237774SAdrian Chadd 18375591b213SSam Leffler /* 1838c59005e9SSam Leffler * Likewise this is set during reset so update 1839c59005e9SSam Leffler * state cached in the driver. 1840c59005e9SSam Leffler */ 1841c59005e9SSam Leffler sc->sc_diversity = ath_hal_getdiversity(ah); 18422dc7fcc4SSam Leffler sc->sc_lastlongcal = 0; 18432dc7fcc4SSam Leffler sc->sc_resetcal = 1; 18442dc7fcc4SSam Leffler sc->sc_lastcalreset = 0; 1845a108ab63SAdrian Chadd sc->sc_lastani = 0; 1846a108ab63SAdrian Chadd sc->sc_lastshortcal = 0; 1847a108ab63SAdrian Chadd sc->sc_doresetcal = AH_FALSE; 18482fd9aabbSAdrian Chadd /* 18492fd9aabbSAdrian Chadd * Beacon timers were cleared here; give ath_newstate() 18502fd9aabbSAdrian Chadd * a hint that the beacon timers should be poked when 18512fd9aabbSAdrian Chadd * things transition to the RUN state. 18522fd9aabbSAdrian Chadd */ 18532fd9aabbSAdrian Chadd sc->sc_beacons = 0; 1854c42a7b7eSSam Leffler 1855c42a7b7eSSam Leffler /* 18565591b213SSam Leffler * Setup the hardware after reset: the key cache 18575591b213SSam Leffler * is filled as needed and the receive engine is 18585591b213SSam Leffler * set going. Frame transmit is handled entirely 18595591b213SSam Leffler * in the frame output path; there's nothing to do 18605591b213SSam Leffler * here except setup the interrupt mask. 18615591b213SSam Leffler */ 18625591b213SSam Leffler if (ath_startrecv(sc) != 0) { 18635591b213SSam Leffler if_printf(ifp, "unable to start recv logic\n"); 1864b032f27cSSam Leffler ATH_UNLOCK(sc); 1865b032f27cSSam Leffler return; 18665591b213SSam Leffler } 18675591b213SSam Leffler 18685591b213SSam Leffler /* 18695591b213SSam Leffler * Enable interrupts. 18705591b213SSam Leffler */ 18715591b213SSam Leffler sc->sc_imask = HAL_INT_RX | HAL_INT_TX 18725591b213SSam Leffler | HAL_INT_RXEOL | HAL_INT_RXORN 18735591b213SSam Leffler | HAL_INT_FATAL | HAL_INT_GLOBAL; 1874bcbb08ceSAdrian Chadd 1875bcbb08ceSAdrian Chadd /* 1876bcbb08ceSAdrian Chadd * Enable RX EDMA bits. Note these overlap with 1877bcbb08ceSAdrian Chadd * HAL_INT_RX and HAL_INT_RXDESC respectively. 1878bcbb08ceSAdrian Chadd */ 1879bcbb08ceSAdrian Chadd if (sc->sc_isedma) 1880bcbb08ceSAdrian Chadd sc->sc_imask |= (HAL_INT_RXHP | HAL_INT_RXLP); 1881bcbb08ceSAdrian Chadd 1882c42a7b7eSSam Leffler /* 1883c42a7b7eSSam Leffler * Enable MIB interrupts when there are hardware phy counters. 1884c42a7b7eSSam Leffler * Note we only do this (at the moment) for station mode. 1885c42a7b7eSSam Leffler */ 1886c42a7b7eSSam Leffler if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 1887c42a7b7eSSam Leffler sc->sc_imask |= HAL_INT_MIB; 18885591b213SSam Leffler 18895594f5c0SAdrian Chadd /* Enable global TX timeout and carrier sense timeout if available */ 18906ad02dbaSAdrian Chadd if (ath_hal_gtxto_supported(ah)) 18913788ebedSAdrian Chadd sc->sc_imask |= HAL_INT_GTT; 1892d0a0ebc6SAdrian Chadd 1893d0a0ebc6SAdrian Chadd DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", 1894d0a0ebc6SAdrian Chadd __func__, sc->sc_imask); 18956ad02dbaSAdrian Chadd 189613f4c340SRobert Watson ifp->if_drv_flags |= IFF_DRV_RUNNING; 18972e986da5SSam Leffler callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 1898b032f27cSSam Leffler ath_hal_intrset(ah, sc->sc_imask); 18995591b213SSam Leffler 1900b032f27cSSam Leffler ATH_UNLOCK(sc); 1901b032f27cSSam Leffler 190286e07743SSam Leffler #ifdef ATH_TX99_DIAG 190386e07743SSam Leffler if (sc->sc_tx99 != NULL) 190486e07743SSam Leffler sc->sc_tx99->start(sc->sc_tx99); 190586e07743SSam Leffler else 190686e07743SSam Leffler #endif 1907b032f27cSSam Leffler ieee80211_start_all(ic); /* start all vap's */ 19085591b213SSam Leffler } 19095591b213SSam Leffler 19105591b213SSam Leffler static void 1911c42a7b7eSSam Leffler ath_stop_locked(struct ifnet *ifp) 19125591b213SSam Leffler { 19135591b213SSam Leffler struct ath_softc *sc = ifp->if_softc; 19145591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 19155591b213SSam Leffler 1916c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 1917c42a7b7eSSam Leffler __func__, sc->sc_invalid, ifp->if_flags); 19185591b213SSam Leffler 1919c42a7b7eSSam Leffler ATH_LOCK_ASSERT(sc); 192013f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 19215591b213SSam Leffler /* 19225591b213SSam Leffler * Shutdown the hardware and driver: 1923c42a7b7eSSam Leffler * reset 802.11 state machine 19245591b213SSam Leffler * turn off timers 1925c42a7b7eSSam Leffler * disable interrupts 1926c42a7b7eSSam Leffler * turn off the radio 19275591b213SSam Leffler * clear transmit machinery 19285591b213SSam Leffler * clear receive machinery 19295591b213SSam Leffler * drain and release tx queues 19305591b213SSam Leffler * reclaim beacon resources 19315591b213SSam Leffler * power down hardware 19325591b213SSam Leffler * 19335591b213SSam Leffler * Note that some of this work is not possible if the 19345591b213SSam Leffler * hardware is gone (invalid). 19355591b213SSam Leffler */ 193686e07743SSam Leffler #ifdef ATH_TX99_DIAG 193786e07743SSam Leffler if (sc->sc_tx99 != NULL) 193886e07743SSam Leffler sc->sc_tx99->stop(sc->sc_tx99); 193986e07743SSam Leffler #endif 19402e986da5SSam Leffler callout_stop(&sc->sc_wd_ch); 19412e986da5SSam Leffler sc->sc_wd_timer = 0; 194213f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1943c42a7b7eSSam Leffler if (!sc->sc_invalid) { 19443e50ec2cSSam Leffler if (sc->sc_softled) { 19453e50ec2cSSam Leffler callout_stop(&sc->sc_ledtimer); 19463e50ec2cSSam Leffler ath_hal_gpioset(ah, sc->sc_ledpin, 19473e50ec2cSSam Leffler !sc->sc_ledon); 19483e50ec2cSSam Leffler sc->sc_blinking = 0; 19493e50ec2cSSam Leffler } 19505591b213SSam Leffler ath_hal_intrset(ah, 0); 1951c42a7b7eSSam Leffler } 1952517526efSAdrian Chadd ath_draintxq(sc, ATH_RESET_DEFAULT); 1953c42a7b7eSSam Leffler if (!sc->sc_invalid) { 19549a842e8bSAdrian Chadd ath_stoprecv(sc, 1); 1955c42a7b7eSSam Leffler ath_hal_phydisable(ah); 1956c42a7b7eSSam Leffler } else 19575591b213SSam Leffler sc->sc_rxlink = NULL; 1958b032f27cSSam Leffler ath_beacon_free(sc); /* XXX not needed */ 1959c42a7b7eSSam Leffler } 1960c42a7b7eSSam Leffler } 1961c42a7b7eSSam Leffler 1962ef27340cSAdrian Chadd #define MAX_TXRX_ITERATIONS 1000 1963ef27340cSAdrian Chadd static void 196421008bf1SAdrian Chadd ath_txrx_stop_locked(struct ath_softc *sc) 1965ef27340cSAdrian Chadd { 1966ef27340cSAdrian Chadd int i = MAX_TXRX_ITERATIONS; 1967ef27340cSAdrian Chadd 1968ef27340cSAdrian Chadd ATH_UNLOCK_ASSERT(sc); 196921008bf1SAdrian Chadd ATH_PCU_LOCK_ASSERT(sc); 197021008bf1SAdrian Chadd 1971ef27340cSAdrian Chadd /* 1972ef27340cSAdrian Chadd * Sleep until all the pending operations have completed. 1973ef27340cSAdrian Chadd * 1974ef27340cSAdrian Chadd * The caller must ensure that reset has been incremented 1975ef27340cSAdrian Chadd * or the pending operations may continue being queued. 1976ef27340cSAdrian Chadd */ 1977ef27340cSAdrian Chadd while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt || 1978ef27340cSAdrian Chadd sc->sc_txstart_cnt || sc->sc_intr_cnt) { 1979ef27340cSAdrian Chadd if (i <= 0) 1980ef27340cSAdrian Chadd break; 1981a2d8240dSAdrian Chadd msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 1); 1982ef27340cSAdrian Chadd i--; 1983ef27340cSAdrian Chadd } 1984ef27340cSAdrian Chadd 1985ef27340cSAdrian Chadd if (i <= 0) 1986ef27340cSAdrian Chadd device_printf(sc->sc_dev, 1987ef27340cSAdrian Chadd "%s: didn't finish after %d iterations\n", 1988ef27340cSAdrian Chadd __func__, MAX_TXRX_ITERATIONS); 1989ef27340cSAdrian Chadd } 1990ef27340cSAdrian Chadd #undef MAX_TXRX_ITERATIONS 1991ef27340cSAdrian Chadd 1992e78719adSAdrian Chadd #if 0 1993ef27340cSAdrian Chadd static void 199421008bf1SAdrian Chadd ath_txrx_stop(struct ath_softc *sc) 199521008bf1SAdrian Chadd { 199621008bf1SAdrian Chadd ATH_UNLOCK_ASSERT(sc); 199721008bf1SAdrian Chadd ATH_PCU_UNLOCK_ASSERT(sc); 199821008bf1SAdrian Chadd 199921008bf1SAdrian Chadd ATH_PCU_LOCK(sc); 200021008bf1SAdrian Chadd ath_txrx_stop_locked(sc); 200121008bf1SAdrian Chadd ATH_PCU_UNLOCK(sc); 200221008bf1SAdrian Chadd } 2003e78719adSAdrian Chadd #endif 200421008bf1SAdrian Chadd 200521008bf1SAdrian Chadd static void 2006ef27340cSAdrian Chadd ath_txrx_start(struct ath_softc *sc) 2007ef27340cSAdrian Chadd { 2008ef27340cSAdrian Chadd 2009ef27340cSAdrian Chadd taskqueue_unblock(sc->sc_tq); 2010ef27340cSAdrian Chadd } 2011ef27340cSAdrian Chadd 2012ee321975SAdrian Chadd /* 2013ee321975SAdrian Chadd * Grab the reset lock, and wait around until noone else 2014ee321975SAdrian Chadd * is trying to do anything with it. 2015ee321975SAdrian Chadd * 2016ee321975SAdrian Chadd * This is totally horrible but we can't hold this lock for 2017ee321975SAdrian Chadd * long enough to do TX/RX or we end up with net80211/ip stack 2018ee321975SAdrian Chadd * LORs and eventual deadlock. 2019ee321975SAdrian Chadd * 2020ee321975SAdrian Chadd * "dowait" signals whether to spin, waiting for the reset 2021ee321975SAdrian Chadd * lock count to reach 0. This should (for now) only be used 2022ee321975SAdrian Chadd * during the reset path, as the rest of the code may not 2023ee321975SAdrian Chadd * be locking-reentrant enough to behave correctly. 2024ee321975SAdrian Chadd * 2025ee321975SAdrian Chadd * Another, cleaner way should be found to serialise all of 2026ee321975SAdrian Chadd * these operations. 2027ee321975SAdrian Chadd */ 2028ee321975SAdrian Chadd #define MAX_RESET_ITERATIONS 10 2029ee321975SAdrian Chadd static int 2030ee321975SAdrian Chadd ath_reset_grablock(struct ath_softc *sc, int dowait) 2031ee321975SAdrian Chadd { 2032ee321975SAdrian Chadd int w = 0; 2033ee321975SAdrian Chadd int i = MAX_RESET_ITERATIONS; 2034ee321975SAdrian Chadd 2035ee321975SAdrian Chadd ATH_PCU_LOCK_ASSERT(sc); 2036ee321975SAdrian Chadd do { 2037ee321975SAdrian Chadd if (sc->sc_inreset_cnt == 0) { 2038ee321975SAdrian Chadd w = 1; 2039ee321975SAdrian Chadd break; 2040ee321975SAdrian Chadd } 2041ee321975SAdrian Chadd if (dowait == 0) { 2042ee321975SAdrian Chadd w = 0; 2043ee321975SAdrian Chadd break; 2044ee321975SAdrian Chadd } 2045ee321975SAdrian Chadd ATH_PCU_UNLOCK(sc); 2046ee321975SAdrian Chadd pause("ath_reset_grablock", 1); 2047ee321975SAdrian Chadd i--; 2048ee321975SAdrian Chadd ATH_PCU_LOCK(sc); 2049ee321975SAdrian Chadd } while (i > 0); 2050ee321975SAdrian Chadd 2051ee321975SAdrian Chadd /* 2052ee321975SAdrian Chadd * We always increment the refcounter, regardless 2053ee321975SAdrian Chadd * of whether we succeeded to get it in an exclusive 2054ee321975SAdrian Chadd * way. 2055ee321975SAdrian Chadd */ 2056ee321975SAdrian Chadd sc->sc_inreset_cnt++; 2057ee321975SAdrian Chadd 2058ee321975SAdrian Chadd if (i <= 0) 2059ee321975SAdrian Chadd device_printf(sc->sc_dev, 2060ee321975SAdrian Chadd "%s: didn't finish after %d iterations\n", 2061ee321975SAdrian Chadd __func__, MAX_RESET_ITERATIONS); 2062ee321975SAdrian Chadd 2063ee321975SAdrian Chadd if (w == 0) 2064ee321975SAdrian Chadd device_printf(sc->sc_dev, 2065ee321975SAdrian Chadd "%s: warning, recursive reset path!\n", 2066ee321975SAdrian Chadd __func__); 2067ee321975SAdrian Chadd 2068ee321975SAdrian Chadd return w; 2069ee321975SAdrian Chadd } 2070ee321975SAdrian Chadd #undef MAX_RESET_ITERATIONS 2071ee321975SAdrian Chadd 2072ee321975SAdrian Chadd /* 2073ee321975SAdrian Chadd * XXX TODO: write ath_reset_releaselock 2074ee321975SAdrian Chadd */ 2075ee321975SAdrian Chadd 2076c42a7b7eSSam Leffler static void 2077c42a7b7eSSam Leffler ath_stop(struct ifnet *ifp) 2078c42a7b7eSSam Leffler { 2079c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 2080c42a7b7eSSam Leffler 2081c42a7b7eSSam Leffler ATH_LOCK(sc); 2082c42a7b7eSSam Leffler ath_stop_locked(ifp); 2083f0b2a0beSSam Leffler ATH_UNLOCK(sc); 20845591b213SSam Leffler } 20855591b213SSam Leffler 20865591b213SSam Leffler /* 20875591b213SSam Leffler * Reset the hardware w/o losing operational state. This is 20885591b213SSam Leffler * basically a more efficient way of doing ath_stop, ath_init, 20895591b213SSam Leffler * followed by state transitions to the current 802.11 2090c42a7b7eSSam Leffler * operational state. Used to recover from various errors and 2091c42a7b7eSSam Leffler * to reset or reload hardware state. 20925591b213SSam Leffler */ 20936079fdbeSAdrian Chadd int 2094517526efSAdrian Chadd ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type) 20955591b213SSam Leffler { 2096c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 2097b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 20985591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 20995591b213SSam Leffler HAL_STATUS status; 2100ef27340cSAdrian Chadd int i; 21015591b213SSam Leffler 2102f52d3452SAdrian Chadd DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 210316d4de92SAdrian Chadd 2104ee321975SAdrian Chadd /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */ 2105ef27340cSAdrian Chadd ATH_PCU_UNLOCK_ASSERT(sc); 2106ef27340cSAdrian Chadd ATH_UNLOCK_ASSERT(sc); 2107ef27340cSAdrian Chadd 2108d52f7132SAdrian Chadd /* Try to (stop any further TX/RX from occuring */ 2109d52f7132SAdrian Chadd taskqueue_block(sc->sc_tq); 2110d52f7132SAdrian Chadd 2111ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 2112e78719adSAdrian Chadd ath_hal_intrset(ah, 0); /* disable interrupts */ 2113e78719adSAdrian Chadd ath_txrx_stop_locked(sc); /* Ensure TX/RX is stopped */ 2114ee321975SAdrian Chadd if (ath_reset_grablock(sc, 1) == 0) { 2115ee321975SAdrian Chadd device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 2116ef27340cSAdrian Chadd __func__); 2117ef27340cSAdrian Chadd } 2118ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 2119ef27340cSAdrian Chadd 2120f52d3452SAdrian Chadd /* 21219a842e8bSAdrian Chadd * Should now wait for pending TX/RX to complete 21229a842e8bSAdrian Chadd * and block future ones from occuring. This needs to be 21239a842e8bSAdrian Chadd * done before the TX queue is drained. 2124f52d3452SAdrian Chadd */ 2125ef27340cSAdrian Chadd ath_draintxq(sc, reset_type); /* stop xmit side */ 2126ef27340cSAdrian Chadd 2127ef27340cSAdrian Chadd /* 2128ef27340cSAdrian Chadd * Regardless of whether we're doing a no-loss flush or 2129ef27340cSAdrian Chadd * not, stop the PCU and handle what's in the RX queue. 2130ef27340cSAdrian Chadd * That way frames aren't dropped which shouldn't be. 2131ef27340cSAdrian Chadd */ 21329a842e8bSAdrian Chadd ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS)); 2133f8cc9b09SAdrian Chadd ath_rx_flush(sc); 2134ef27340cSAdrian Chadd 2135b032f27cSSam Leffler ath_settkipmic(sc); /* configure TKIP MIC handling */ 21365591b213SSam Leffler /* NB: indicate channel change so we do a full reset */ 213759efa8b5SSam Leffler if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status)) 21385591b213SSam Leffler if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 21395591b213SSam Leffler __func__, status); 2140c59005e9SSam Leffler sc->sc_diversity = ath_hal_getdiversity(ah); 214148237774SAdrian Chadd 214248237774SAdrian Chadd /* Let DFS at it in case it's a DFS channel */ 214348237774SAdrian Chadd ath_dfs_radar_enable(sc, ic->ic_curchan); 214448237774SAdrian Chadd 214568e8e04eSSam Leffler if (ath_startrecv(sc) != 0) /* restart recv */ 214668e8e04eSSam Leffler if_printf(ifp, "%s: unable to start recv logic\n", __func__); 2147c42a7b7eSSam Leffler /* 2148c42a7b7eSSam Leffler * We may be doing a reset in response to an ioctl 2149c42a7b7eSSam Leffler * that changes the channel so update any state that 2150c42a7b7eSSam Leffler * might change as a result. 2151c42a7b7eSSam Leffler */ 2152724c193aSSam Leffler ath_chan_change(sc, ic->ic_curchan); 2153c89b957aSSam Leffler if (sc->sc_beacons) { /* restart beacons */ 2154584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 215510ad9a77SSam Leffler if (sc->sc_tdma) 215610ad9a77SSam Leffler ath_tdma_config(sc, NULL); 215710ad9a77SSam Leffler else 215810ad9a77SSam Leffler #endif 2159c89b957aSSam Leffler ath_beacon_config(sc, NULL); 216010ad9a77SSam Leffler } 2161c42a7b7eSSam Leffler 2162ef27340cSAdrian Chadd /* 2163ef27340cSAdrian Chadd * Release the reset lock and re-enable interrupts here. 2164ef27340cSAdrian Chadd * If an interrupt was being processed in ath_intr(), 2165ef27340cSAdrian Chadd * it would disable interrupts at this point. So we have 2166ef27340cSAdrian Chadd * to atomically enable interrupts and decrement the 2167ef27340cSAdrian Chadd * reset counter - this way ath_intr() doesn't end up 2168ef27340cSAdrian Chadd * disabling interrupts without a corresponding enable 2169ef27340cSAdrian Chadd * in the rest or channel change path. 2170ef27340cSAdrian Chadd */ 2171ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 2172ef27340cSAdrian Chadd sc->sc_inreset_cnt--; 2173ef27340cSAdrian Chadd /* XXX only do this if sc_inreset_cnt == 0? */ 2174ef27340cSAdrian Chadd ath_hal_intrset(ah, sc->sc_imask); 2175ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 2176ef27340cSAdrian Chadd 2177ef27340cSAdrian Chadd /* 2178ef27340cSAdrian Chadd * TX and RX can be started here. If it were started with 2179ef27340cSAdrian Chadd * sc_inreset_cnt > 0, the TX and RX path would abort. 2180ef27340cSAdrian Chadd * Thus if this is a nested call through the reset or 2181ef27340cSAdrian Chadd * channel change code, TX completion will occur but 2182ef27340cSAdrian Chadd * RX completion and ath_start / ath_tx_start will not 2183ef27340cSAdrian Chadd * run. 2184ef27340cSAdrian Chadd */ 2185ef27340cSAdrian Chadd 2186ef27340cSAdrian Chadd /* Restart TX/RX as needed */ 2187ef27340cSAdrian Chadd ath_txrx_start(sc); 2188ef27340cSAdrian Chadd 2189ef27340cSAdrian Chadd /* XXX Restart TX completion and pending TX */ 2190ef27340cSAdrian Chadd if (reset_type == ATH_RESET_NOLOSS) { 2191ef27340cSAdrian Chadd for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 2192ef27340cSAdrian Chadd if (ATH_TXQ_SETUP(sc, i)) { 2193ef27340cSAdrian Chadd ATH_TXQ_LOCK(&sc->sc_txq[i]); 2194ef27340cSAdrian Chadd ath_txq_restart_dma(sc, &sc->sc_txq[i]); 2195ef27340cSAdrian Chadd ath_txq_sched(sc, &sc->sc_txq[i]); 2196ef27340cSAdrian Chadd ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 2197ef27340cSAdrian Chadd } 2198ef27340cSAdrian Chadd } 2199ef27340cSAdrian Chadd } 2200ef27340cSAdrian Chadd 2201ef27340cSAdrian Chadd /* 2202ef27340cSAdrian Chadd * This may have been set during an ath_start() call which 2203ef27340cSAdrian Chadd * set this once it detected a concurrent TX was going on. 2204ef27340cSAdrian Chadd * So, clear it. 2205ef27340cSAdrian Chadd */ 2206e4e7938aSAdrian Chadd IF_LOCK(&ifp->if_snd); 2207ef27340cSAdrian Chadd ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2208e4e7938aSAdrian Chadd IF_UNLOCK(&ifp->if_snd); 2209ef27340cSAdrian Chadd 2210ef27340cSAdrian Chadd /* Handle any frames in the TX queue */ 2211ef27340cSAdrian Chadd /* 2212ef27340cSAdrian Chadd * XXX should this be done by the caller, rather than 2213ef27340cSAdrian Chadd * ath_reset() ? 2214ef27340cSAdrian Chadd */ 2215c42a7b7eSSam Leffler ath_start(ifp); /* restart xmit */ 2216c42a7b7eSSam Leffler return 0; 22175591b213SSam Leffler } 22185591b213SSam Leffler 221968e8e04eSSam Leffler static int 2220b032f27cSSam Leffler ath_reset_vap(struct ieee80211vap *vap, u_long cmd) 2221b032f27cSSam Leffler { 22224b54a231SSam Leffler struct ieee80211com *ic = vap->iv_ic; 22234b54a231SSam Leffler struct ifnet *ifp = ic->ic_ifp; 22244b54a231SSam Leffler struct ath_softc *sc = ifp->if_softc; 22254b54a231SSam Leffler struct ath_hal *ah = sc->sc_ah; 22264b54a231SSam Leffler 22274b54a231SSam Leffler switch (cmd) { 22284b54a231SSam Leffler case IEEE80211_IOC_TXPOWER: 22294b54a231SSam Leffler /* 22304b54a231SSam Leffler * If per-packet TPC is enabled, then we have nothing 22314b54a231SSam Leffler * to do; otherwise we need to force the global limit. 22324b54a231SSam Leffler * All this can happen directly; no need to reset. 22334b54a231SSam Leffler */ 22344b54a231SSam Leffler if (!ath_hal_gettpc(ah)) 22354b54a231SSam Leffler ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 22364b54a231SSam Leffler return 0; 22374b54a231SSam Leffler } 2238517526efSAdrian Chadd /* XXX? Full or NOLOSS? */ 2239517526efSAdrian Chadd return ath_reset(ifp, ATH_RESET_FULL); 2240b032f27cSSam Leffler } 2241b032f27cSSam Leffler 2242b8e788a5SAdrian Chadd struct ath_buf * 2243af33d486SAdrian Chadd _ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype) 224410ad9a77SSam Leffler { 224510ad9a77SSam Leffler struct ath_buf *bf; 224610ad9a77SSam Leffler 224710ad9a77SSam Leffler ATH_TXBUF_LOCK_ASSERT(sc); 224810ad9a77SSam Leffler 2249af33d486SAdrian Chadd if (btype == ATH_BUFTYPE_MGMT) 2250af33d486SAdrian Chadd bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt); 2251af33d486SAdrian Chadd else 22526b349e5aSAdrian Chadd bf = TAILQ_FIRST(&sc->sc_txbuf); 2253af33d486SAdrian Chadd 2254e346b073SAdrian Chadd if (bf == NULL) { 2255e346b073SAdrian Chadd sc->sc_stats.ast_tx_getnobuf++; 2256e346b073SAdrian Chadd } else { 2257e346b073SAdrian Chadd if (bf->bf_flags & ATH_BUF_BUSY) { 2258e346b073SAdrian Chadd sc->sc_stats.ast_tx_getbusybuf++; 2259e346b073SAdrian Chadd bf = NULL; 2260e346b073SAdrian Chadd } 2261e346b073SAdrian Chadd } 2262e346b073SAdrian Chadd 2263af33d486SAdrian Chadd if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) { 2264af33d486SAdrian Chadd if (btype == ATH_BUFTYPE_MGMT) 2265af33d486SAdrian Chadd TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list); 226623ced6c1SAdrian Chadd else { 2267af33d486SAdrian Chadd TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 226823ced6c1SAdrian Chadd sc->sc_txbuf_cnt--; 226923ced6c1SAdrian Chadd 227023ced6c1SAdrian Chadd /* 227123ced6c1SAdrian Chadd * This shuldn't happen; however just to be 227223ced6c1SAdrian Chadd * safe print a warning and fudge the txbuf 227323ced6c1SAdrian Chadd * count. 227423ced6c1SAdrian Chadd */ 227523ced6c1SAdrian Chadd if (sc->sc_txbuf_cnt < 0) { 227623ced6c1SAdrian Chadd device_printf(sc->sc_dev, 227723ced6c1SAdrian Chadd "%s: sc_txbuf_cnt < 0?\n", 227823ced6c1SAdrian Chadd __func__); 227923ced6c1SAdrian Chadd sc->sc_txbuf_cnt = 0; 228023ced6c1SAdrian Chadd } 228123ced6c1SAdrian Chadd } 2282af33d486SAdrian Chadd } else 228310ad9a77SSam Leffler bf = NULL; 2284e346b073SAdrian Chadd 228510ad9a77SSam Leffler if (bf == NULL) { 2286af33d486SAdrian Chadd /* XXX should check which list, mgmt or otherwise */ 228710ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, 22886b349e5aSAdrian Chadd TAILQ_FIRST(&sc->sc_txbuf) == NULL ? 228910ad9a77SSam Leffler "out of xmit buffers" : "xmit buffer busy"); 2290e346b073SAdrian Chadd return NULL; 229110ad9a77SSam Leffler } 2292e346b073SAdrian Chadd 2293af33d486SAdrian Chadd /* XXX TODO: should do this at buffer list initialisation */ 2294af33d486SAdrian Chadd /* XXX (then, ensure the buffer has the right flag set) */ 2295af33d486SAdrian Chadd if (btype == ATH_BUFTYPE_MGMT) 2296af33d486SAdrian Chadd bf->bf_flags |= ATH_BUF_MGMT; 2297af33d486SAdrian Chadd else 2298af33d486SAdrian Chadd bf->bf_flags &= (~ATH_BUF_MGMT); 2299af33d486SAdrian Chadd 2300e346b073SAdrian Chadd /* Valid bf here; clear some basic fields */ 2301e346b073SAdrian Chadd bf->bf_next = NULL; /* XXX just to be sure */ 2302e346b073SAdrian Chadd bf->bf_last = NULL; /* XXX again, just to be sure */ 2303e346b073SAdrian Chadd bf->bf_comp = NULL; /* XXX again, just to be sure */ 2304e346b073SAdrian Chadd bzero(&bf->bf_state, sizeof(bf->bf_state)); 2305e346b073SAdrian Chadd 230610ad9a77SSam Leffler return bf; 230710ad9a77SSam Leffler } 230810ad9a77SSam Leffler 2309e346b073SAdrian Chadd /* 2310e346b073SAdrian Chadd * When retrying a software frame, buffers marked ATH_BUF_BUSY 2311e346b073SAdrian Chadd * can't be thrown back on the queue as they could still be 2312e346b073SAdrian Chadd * in use by the hardware. 2313e346b073SAdrian Chadd * 2314e346b073SAdrian Chadd * This duplicates the buffer, or returns NULL. 2315e346b073SAdrian Chadd * 2316e346b073SAdrian Chadd * The descriptor is also copied but the link pointers and 2317e346b073SAdrian Chadd * the DMA segments aren't copied; this frame should thus 2318e346b073SAdrian Chadd * be again passed through the descriptor setup/chain routines 2319e346b073SAdrian Chadd * so the link is correct. 2320e346b073SAdrian Chadd * 2321e346b073SAdrian Chadd * The caller must free the buffer using ath_freebuf(). 2322e346b073SAdrian Chadd * 2323e346b073SAdrian Chadd * XXX TODO: this call shouldn't fail as it'll cause packet loss 2324e346b073SAdrian Chadd * XXX in the TX pathway when retries are needed. 2325e346b073SAdrian Chadd * XXX Figure out how to keep some buffers free, or factor the 2326e346b073SAdrian Chadd * XXX number of busy buffers into the xmit path (ath_start()) 2327e346b073SAdrian Chadd * XXX so we don't over-commit. 2328e346b073SAdrian Chadd */ 2329e346b073SAdrian Chadd struct ath_buf * 2330e346b073SAdrian Chadd ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf) 2331e346b073SAdrian Chadd { 2332e346b073SAdrian Chadd struct ath_buf *tbf; 2333e346b073SAdrian Chadd 2334af33d486SAdrian Chadd tbf = ath_getbuf(sc, 2335af33d486SAdrian Chadd (bf->bf_flags & ATH_BUF_MGMT) ? 2336af33d486SAdrian Chadd ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL); 2337e346b073SAdrian Chadd if (tbf == NULL) 2338e346b073SAdrian Chadd return NULL; /* XXX failure? Why? */ 2339e346b073SAdrian Chadd 2340e346b073SAdrian Chadd /* Copy basics */ 2341e346b073SAdrian Chadd tbf->bf_next = NULL; 2342e346b073SAdrian Chadd tbf->bf_nseg = bf->bf_nseg; 2343e346b073SAdrian Chadd tbf->bf_flags = bf->bf_flags & ~ATH_BUF_BUSY; 2344e346b073SAdrian Chadd tbf->bf_status = bf->bf_status; 2345e346b073SAdrian Chadd tbf->bf_m = bf->bf_m; 2346e346b073SAdrian Chadd tbf->bf_node = bf->bf_node; 2347e346b073SAdrian Chadd /* will be setup by the chain/setup function */ 2348e346b073SAdrian Chadd tbf->bf_lastds = NULL; 2349e346b073SAdrian Chadd /* for now, last == self */ 2350e346b073SAdrian Chadd tbf->bf_last = tbf; 2351e346b073SAdrian Chadd tbf->bf_comp = bf->bf_comp; 2352e346b073SAdrian Chadd 2353e346b073SAdrian Chadd /* NOTE: DMA segments will be setup by the setup/chain functions */ 2354e346b073SAdrian Chadd 2355e346b073SAdrian Chadd /* The caller has to re-init the descriptor + links */ 2356e346b073SAdrian Chadd 2357e346b073SAdrian Chadd /* Copy state */ 2358e346b073SAdrian Chadd memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state)); 2359e346b073SAdrian Chadd 2360e346b073SAdrian Chadd return tbf; 2361e346b073SAdrian Chadd } 2362e346b073SAdrian Chadd 2363b8e788a5SAdrian Chadd struct ath_buf * 2364af33d486SAdrian Chadd ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype) 236510ad9a77SSam Leffler { 236610ad9a77SSam Leffler struct ath_buf *bf; 236710ad9a77SSam Leffler 236810ad9a77SSam Leffler ATH_TXBUF_LOCK(sc); 2369af33d486SAdrian Chadd bf = _ath_getbuf_locked(sc, btype); 2370af33d486SAdrian Chadd /* 2371af33d486SAdrian Chadd * If a mgmt buffer was requested but we're out of those, 2372af33d486SAdrian Chadd * try requesting a normal one. 2373af33d486SAdrian Chadd */ 2374af33d486SAdrian Chadd if (bf == NULL && btype == ATH_BUFTYPE_MGMT) 2375af33d486SAdrian Chadd bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 2376e4e7938aSAdrian Chadd ATH_TXBUF_UNLOCK(sc); 237710ad9a77SSam Leffler if (bf == NULL) { 237810ad9a77SSam Leffler struct ifnet *ifp = sc->sc_ifp; 237910ad9a77SSam Leffler 238010ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); 238110ad9a77SSam Leffler sc->sc_stats.ast_tx_qstop++; 2382e4e7938aSAdrian Chadd IF_LOCK(&ifp->if_snd); 238310ad9a77SSam Leffler ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2384e4e7938aSAdrian Chadd IF_UNLOCK(&ifp->if_snd); 238510ad9a77SSam Leffler } 238610ad9a77SSam Leffler return bf; 238710ad9a77SSam Leffler } 238810ad9a77SSam Leffler 2389e60c4fc2SAdrian Chadd void 23905591b213SSam Leffler ath_start(struct ifnet *ifp) 23915591b213SSam Leffler { 23925591b213SSam Leffler struct ath_softc *sc = ifp->if_softc; 23935591b213SSam Leffler struct ieee80211_node *ni; 23945591b213SSam Leffler struct ath_buf *bf; 239568e8e04eSSam Leffler struct mbuf *m, *next; 239668e8e04eSSam Leffler ath_bufhead frags; 23975591b213SSam Leffler 239813f4c340SRobert Watson if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) 23995591b213SSam Leffler return; 2400ef27340cSAdrian Chadd 2401ef27340cSAdrian Chadd /* XXX is it ok to hold the ATH_LOCK here? */ 2402ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 2403ef27340cSAdrian Chadd if (sc->sc_inreset_cnt > 0) { 2404ef27340cSAdrian Chadd device_printf(sc->sc_dev, 2405ef27340cSAdrian Chadd "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2406ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 2407e4e7938aSAdrian Chadd IF_LOCK(&ifp->if_snd); 240823ced6c1SAdrian Chadd sc->sc_stats.ast_tx_qstop++; 2409e4e7938aSAdrian Chadd ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2410e4e7938aSAdrian Chadd IF_UNLOCK(&ifp->if_snd); 2411ef27340cSAdrian Chadd return; 2412ef27340cSAdrian Chadd } 2413ef27340cSAdrian Chadd sc->sc_txstart_cnt++; 2414ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 2415ef27340cSAdrian Chadd 24165591b213SSam Leffler for (;;) { 241723ced6c1SAdrian Chadd ATH_TXBUF_LOCK(sc); 241823ced6c1SAdrian Chadd if (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree) { 241923ced6c1SAdrian Chadd /* XXX increment counter? */ 242023ced6c1SAdrian Chadd ATH_TXBUF_UNLOCK(sc); 242123ced6c1SAdrian Chadd IF_LOCK(&ifp->if_snd); 242223ced6c1SAdrian Chadd ifp->if_drv_flags |= IFF_DRV_OACTIVE; 242323ced6c1SAdrian Chadd IF_UNLOCK(&ifp->if_snd); 242423ced6c1SAdrian Chadd break; 242523ced6c1SAdrian Chadd } 242623ced6c1SAdrian Chadd ATH_TXBUF_UNLOCK(sc); 242723ced6c1SAdrian Chadd 24285591b213SSam Leffler /* 24295591b213SSam Leffler * Grab a TX buffer and associated resources. 24305591b213SSam Leffler */ 2431af33d486SAdrian Chadd bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL); 243210ad9a77SSam Leffler if (bf == NULL) 24335591b213SSam Leffler break; 24342b9411e2SSam Leffler 2435b032f27cSSam Leffler IFQ_DEQUEUE(&ifp->if_snd, m); 2436b032f27cSSam Leffler if (m == NULL) { 2437b032f27cSSam Leffler ATH_TXBUF_LOCK(sc); 2438e1a50456SAdrian Chadd ath_returnbuf_head(sc, bf); 2439b032f27cSSam Leffler ATH_TXBUF_UNLOCK(sc); 2440b032f27cSSam Leffler break; 2441b032f27cSSam Leffler } 2442b032f27cSSam Leffler ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 244368e8e04eSSam Leffler /* 244468e8e04eSSam Leffler * Check for fragmentation. If this frame 244568e8e04eSSam Leffler * has been broken up verify we have enough 244668e8e04eSSam Leffler * buffers to send all the fragments so all 244768e8e04eSSam Leffler * go out or none... 244868e8e04eSSam Leffler */ 24496b349e5aSAdrian Chadd TAILQ_INIT(&frags); 245068e8e04eSSam Leffler if ((m->m_flags & M_FRAG) && 245168e8e04eSSam Leffler !ath_txfrag_setup(sc, &frags, m, ni)) { 245268e8e04eSSam Leffler DPRINTF(sc, ATH_DEBUG_XMIT, 245368e8e04eSSam Leffler "%s: out of txfrag buffers\n", __func__); 245436c6be9aSSam Leffler sc->sc_stats.ast_tx_nofrag++; 24559cb93076SSam Leffler ifp->if_oerrors++; 245668e8e04eSSam Leffler ath_freetx(m); 245768e8e04eSSam Leffler goto bad; 245868e8e04eSSam Leffler } 2459339ccfb3SSam Leffler ifp->if_opackets++; 246068e8e04eSSam Leffler nextfrag: 246168e8e04eSSam Leffler /* 246268e8e04eSSam Leffler * Pass the frame to the h/w for transmission. 246368e8e04eSSam Leffler * Fragmented frames have each frag chained together 246468e8e04eSSam Leffler * with m_nextpkt. We know there are sufficient ath_buf's 246568e8e04eSSam Leffler * to send all the frags because of work done by 246668e8e04eSSam Leffler * ath_txfrag_setup. We leave m_nextpkt set while 246768e8e04eSSam Leffler * calling ath_tx_start so it can use it to extend the 246868e8e04eSSam Leffler * the tx duration to cover the subsequent frag and 246968e8e04eSSam Leffler * so it can reclaim all the mbufs in case of an error; 247068e8e04eSSam Leffler * ath_tx_start clears m_nextpkt once it commits to 247168e8e04eSSam Leffler * handing the frame to the hardware. 247268e8e04eSSam Leffler */ 247368e8e04eSSam Leffler next = m->m_nextpkt; 24745591b213SSam Leffler if (ath_tx_start(sc, ni, bf, m)) { 24755591b213SSam Leffler bad: 24765591b213SSam Leffler ifp->if_oerrors++; 2477c42a7b7eSSam Leffler reclaim: 247868e8e04eSSam Leffler bf->bf_m = NULL; 247968e8e04eSSam Leffler bf->bf_node = NULL; 2480c42a7b7eSSam Leffler ATH_TXBUF_LOCK(sc); 2481e1a50456SAdrian Chadd ath_returnbuf_head(sc, bf); 248268e8e04eSSam Leffler ath_txfrag_cleanup(sc, &frags, ni); 2483c42a7b7eSSam Leffler ATH_TXBUF_UNLOCK(sc); 2484c42a7b7eSSam Leffler if (ni != NULL) 2485c42a7b7eSSam Leffler ieee80211_free_node(ni); 24865591b213SSam Leffler continue; 24875591b213SSam Leffler } 248868e8e04eSSam Leffler if (next != NULL) { 248968e8e04eSSam Leffler /* 249068e8e04eSSam Leffler * Beware of state changing between frags. 249168e8e04eSSam Leffler * XXX check sta power-save state? 249268e8e04eSSam Leffler */ 2493b032f27cSSam Leffler if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { 249468e8e04eSSam Leffler DPRINTF(sc, ATH_DEBUG_XMIT, 249568e8e04eSSam Leffler "%s: flush fragmented packet, state %s\n", 249668e8e04eSSam Leffler __func__, 2497b032f27cSSam Leffler ieee80211_state_name[ni->ni_vap->iv_state]); 249868e8e04eSSam Leffler ath_freetx(next); 249968e8e04eSSam Leffler goto reclaim; 250068e8e04eSSam Leffler } 250168e8e04eSSam Leffler m = next; 25026b349e5aSAdrian Chadd bf = TAILQ_FIRST(&frags); 250368e8e04eSSam Leffler KASSERT(bf != NULL, ("no buf for txfrag")); 25046b349e5aSAdrian Chadd TAILQ_REMOVE(&frags, bf, bf_list); 250568e8e04eSSam Leffler goto nextfrag; 250668e8e04eSSam Leffler } 25075591b213SSam Leffler 25082e986da5SSam Leffler sc->sc_wd_timer = 5; 25095591b213SSam Leffler } 2510ef27340cSAdrian Chadd 2511ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 2512ef27340cSAdrian Chadd sc->sc_txstart_cnt--; 2513ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 25145591b213SSam Leffler } 25155591b213SSam Leffler 25165591b213SSam Leffler static int 25175591b213SSam Leffler ath_media_change(struct ifnet *ifp) 25185591b213SSam Leffler { 2519b032f27cSSam Leffler int error = ieee80211_media_change(ifp); 2520b032f27cSSam Leffler /* NB: only the fixed rate can change and that doesn't need a reset */ 2521b032f27cSSam Leffler return (error == ENETRESET ? 0 : error); 25225591b213SSam Leffler } 25235591b213SSam Leffler 2524c42a7b7eSSam Leffler /* 2525c42a7b7eSSam Leffler * Block/unblock tx+rx processing while a key change is done. 2526c42a7b7eSSam Leffler * We assume the caller serializes key management operations 2527c42a7b7eSSam Leffler * so we only need to worry about synchronization with other 2528c42a7b7eSSam Leffler * uses that originate in the driver. 2529c42a7b7eSSam Leffler */ 2530c42a7b7eSSam Leffler static void 2531b032f27cSSam Leffler ath_key_update_begin(struct ieee80211vap *vap) 2532c42a7b7eSSam Leffler { 2533b032f27cSSam Leffler struct ifnet *ifp = vap->iv_ic->ic_ifp; 2534c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 2535c42a7b7eSSam Leffler 2536c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2537b032f27cSSam Leffler taskqueue_block(sc->sc_tq); 2538c42a7b7eSSam Leffler IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ 2539c42a7b7eSSam Leffler } 2540c42a7b7eSSam Leffler 2541c42a7b7eSSam Leffler static void 2542b032f27cSSam Leffler ath_key_update_end(struct ieee80211vap *vap) 2543c42a7b7eSSam Leffler { 2544b032f27cSSam Leffler struct ifnet *ifp = vap->iv_ic->ic_ifp; 2545c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 2546c42a7b7eSSam Leffler 2547c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2548c42a7b7eSSam Leffler IF_UNLOCK(&ifp->if_snd); 2549b032f27cSSam Leffler taskqueue_unblock(sc->sc_tq); 2550c42a7b7eSSam Leffler } 25515591b213SSam Leffler 2552b032f27cSSam Leffler static void 2553b032f27cSSam Leffler ath_update_promisc(struct ifnet *ifp) 2554b032f27cSSam Leffler { 2555b032f27cSSam Leffler struct ath_softc *sc = ifp->if_softc; 2556b032f27cSSam Leffler u_int32_t rfilt; 2557b032f27cSSam Leffler 2558b032f27cSSam Leffler /* configure rx filter */ 2559b032f27cSSam Leffler rfilt = ath_calcrxfilter(sc); 2560b032f27cSSam Leffler ath_hal_setrxfilter(sc->sc_ah, rfilt); 2561b032f27cSSam Leffler 2562b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); 2563b032f27cSSam Leffler } 2564b032f27cSSam Leffler 2565b032f27cSSam Leffler static void 2566b032f27cSSam Leffler ath_update_mcast(struct ifnet *ifp) 2567b032f27cSSam Leffler { 2568b032f27cSSam Leffler struct ath_softc *sc = ifp->if_softc; 2569b032f27cSSam Leffler u_int32_t mfilt[2]; 2570b032f27cSSam Leffler 2571b032f27cSSam Leffler /* calculate and install multicast filter */ 2572b032f27cSSam Leffler if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 2573b032f27cSSam Leffler struct ifmultiaddr *ifma; 2574b032f27cSSam Leffler /* 2575b032f27cSSam Leffler * Merge multicast addresses to form the hardware filter. 2576b032f27cSSam Leffler */ 2577b032f27cSSam Leffler mfilt[0] = mfilt[1] = 0; 2578eb956cd0SRobert Watson if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */ 2579b032f27cSSam Leffler TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2580b032f27cSSam Leffler caddr_t dl; 2581b032f27cSSam Leffler u_int32_t val; 2582b032f27cSSam Leffler u_int8_t pos; 2583b032f27cSSam Leffler 2584b032f27cSSam Leffler /* calculate XOR of eight 6bit values */ 2585b032f27cSSam Leffler dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 2586b032f27cSSam Leffler val = LE_READ_4(dl + 0); 2587b032f27cSSam Leffler pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2588b032f27cSSam Leffler val = LE_READ_4(dl + 3); 2589b032f27cSSam Leffler pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2590b032f27cSSam Leffler pos &= 0x3f; 2591b032f27cSSam Leffler mfilt[pos / 32] |= (1 << (pos % 32)); 2592b032f27cSSam Leffler } 2593eb956cd0SRobert Watson if_maddr_runlock(ifp); 2594b032f27cSSam Leffler } else 2595b032f27cSSam Leffler mfilt[0] = mfilt[1] = ~0; 2596b032f27cSSam Leffler ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); 2597b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", 2598b032f27cSSam Leffler __func__, mfilt[0], mfilt[1]); 25994bc0e754SSam Leffler } 26004bc0e754SSam Leffler 2601e60c4fc2SAdrian Chadd void 26025591b213SSam Leffler ath_mode_init(struct ath_softc *sc) 26035591b213SSam Leffler { 2604fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 2605b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 2606b032f27cSSam Leffler u_int32_t rfilt; 26075591b213SSam Leffler 26084bc0e754SSam Leffler /* configure rx filter */ 260968e8e04eSSam Leffler rfilt = ath_calcrxfilter(sc); 26104bc0e754SSam Leffler ath_hal_setrxfilter(ah, rfilt); 26114bc0e754SSam Leffler 26125591b213SSam Leffler /* configure operational mode */ 2613c42a7b7eSSam Leffler ath_hal_setopmode(ah); 2614c42a7b7eSSam Leffler 26153d184db2SAdrian Chadd DPRINTF(sc, ATH_DEBUG_STATE | ATH_DEBUG_MODE, 26163d184db2SAdrian Chadd "%s: ah=%p, ifp=%p, if_addr=%p\n", 26173d184db2SAdrian Chadd __func__, 26183d184db2SAdrian Chadd ah, 26193d184db2SAdrian Chadd ifp, 26203d184db2SAdrian Chadd (ifp == NULL) ? NULL : ifp->if_addr); 26213d184db2SAdrian Chadd 262229aca940SSam Leffler /* handle any link-level address change */ 262329aca940SSam Leffler ath_hal_setmac(ah, IF_LLADDR(ifp)); 26245591b213SSam Leffler 26255591b213SSam Leffler /* calculate and install multicast filter */ 2626b032f27cSSam Leffler ath_update_mcast(ifp); 26275591b213SSam Leffler } 26285591b213SSam Leffler 2629c42a7b7eSSam Leffler /* 2630c42a7b7eSSam Leffler * Set the slot time based on the current setting. 2631c42a7b7eSSam Leffler */ 2632ba5c15d9SAdrian Chadd void 2633c42a7b7eSSam Leffler ath_setslottime(struct ath_softc *sc) 2634c42a7b7eSSam Leffler { 2635b032f27cSSam Leffler struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2636c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 2637aaa70f2fSSam Leffler u_int usec; 2638c42a7b7eSSam Leffler 2639aaa70f2fSSam Leffler if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 2640aaa70f2fSSam Leffler usec = 13; 2641aaa70f2fSSam Leffler else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 2642aaa70f2fSSam Leffler usec = 21; 2643724c193aSSam Leffler else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 2644724c193aSSam Leffler /* honor short/long slot time only in 11g */ 2645724c193aSSam Leffler /* XXX shouldn't honor on pure g or turbo g channel */ 2646724c193aSSam Leffler if (ic->ic_flags & IEEE80211_F_SHSLOT) 2647aaa70f2fSSam Leffler usec = HAL_SLOT_TIME_9; 2648aaa70f2fSSam Leffler else 2649aaa70f2fSSam Leffler usec = HAL_SLOT_TIME_20; 2650724c193aSSam Leffler } else 2651724c193aSSam Leffler usec = HAL_SLOT_TIME_9; 2652aaa70f2fSSam Leffler 2653aaa70f2fSSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, 2654aaa70f2fSSam Leffler "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 2655aaa70f2fSSam Leffler __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 2656aaa70f2fSSam Leffler ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 2657aaa70f2fSSam Leffler 2658aaa70f2fSSam Leffler ath_hal_setslottime(ah, usec); 2659c42a7b7eSSam Leffler sc->sc_updateslot = OK; 2660c42a7b7eSSam Leffler } 2661c42a7b7eSSam Leffler 2662c42a7b7eSSam Leffler /* 2663c42a7b7eSSam Leffler * Callback from the 802.11 layer to update the 2664c42a7b7eSSam Leffler * slot time based on the current setting. 2665c42a7b7eSSam Leffler */ 2666c42a7b7eSSam Leffler static void 2667c42a7b7eSSam Leffler ath_updateslot(struct ifnet *ifp) 2668c42a7b7eSSam Leffler { 2669c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 2670b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 2671c42a7b7eSSam Leffler 2672c42a7b7eSSam Leffler /* 2673c42a7b7eSSam Leffler * When not coordinating the BSS, change the hardware 2674c42a7b7eSSam Leffler * immediately. For other operation we defer the change 2675c42a7b7eSSam Leffler * until beacon updates have propagated to the stations. 2676c42a7b7eSSam Leffler */ 267759aa14a9SRui Paulo if (ic->ic_opmode == IEEE80211_M_HOSTAP || 267859aa14a9SRui Paulo ic->ic_opmode == IEEE80211_M_MBSS) 2679c42a7b7eSSam Leffler sc->sc_updateslot = UPDATE; 2680c42a7b7eSSam Leffler else 2681c42a7b7eSSam Leffler ath_setslottime(sc); 2682c42a7b7eSSam Leffler } 2683c42a7b7eSSam Leffler 2684c42a7b7eSSam Leffler /* 2685622b3fd2SSam Leffler * Append the contents of src to dst; both queues 2686622b3fd2SSam Leffler * are assumed to be locked. 2687622b3fd2SSam Leffler */ 2688ba5c15d9SAdrian Chadd void 2689622b3fd2SSam Leffler ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 2690622b3fd2SSam Leffler { 2691e86fd7a7SAdrian Chadd 2692e86fd7a7SAdrian Chadd ATH_TXQ_LOCK_ASSERT(dst); 2693e86fd7a7SAdrian Chadd ATH_TXQ_LOCK_ASSERT(src); 2694e86fd7a7SAdrian Chadd 26956b349e5aSAdrian Chadd TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); 2696622b3fd2SSam Leffler dst->axq_link = src->axq_link; 2697622b3fd2SSam Leffler src->axq_link = NULL; 2698622b3fd2SSam Leffler dst->axq_depth += src->axq_depth; 26996edf1dc7SAdrian Chadd dst->axq_aggr_depth += src->axq_aggr_depth; 2700622b3fd2SSam Leffler src->axq_depth = 0; 27016edf1dc7SAdrian Chadd src->axq_aggr_depth = 0; 2702622b3fd2SSam Leffler } 2703622b3fd2SSam Leffler 2704622b3fd2SSam Leffler /* 2705d52f7132SAdrian Chadd * Reset the hardware, with no loss. 2706d52f7132SAdrian Chadd * 2707d52f7132SAdrian Chadd * This can't be used for a general case reset. 2708d52f7132SAdrian Chadd */ 2709d52f7132SAdrian Chadd static void 2710d52f7132SAdrian Chadd ath_reset_proc(void *arg, int pending) 2711d52f7132SAdrian Chadd { 2712d52f7132SAdrian Chadd struct ath_softc *sc = arg; 2713d52f7132SAdrian Chadd struct ifnet *ifp = sc->sc_ifp; 2714d52f7132SAdrian Chadd 2715d52f7132SAdrian Chadd #if 0 2716d52f7132SAdrian Chadd if_printf(ifp, "%s: resetting\n", __func__); 2717d52f7132SAdrian Chadd #endif 2718d52f7132SAdrian Chadd ath_reset(ifp, ATH_RESET_NOLOSS); 2719d52f7132SAdrian Chadd } 2720d52f7132SAdrian Chadd 2721d52f7132SAdrian Chadd /* 2722c42a7b7eSSam Leffler * Reset the hardware after detecting beacons have stopped. 2723c42a7b7eSSam Leffler */ 2724c42a7b7eSSam Leffler static void 2725c42a7b7eSSam Leffler ath_bstuck_proc(void *arg, int pending) 2726c42a7b7eSSam Leffler { 2727c42a7b7eSSam Leffler struct ath_softc *sc = arg; 2728fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 272916d4de92SAdrian Chadd uint32_t hangs = 0; 273016d4de92SAdrian Chadd 273116d4de92SAdrian Chadd if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) 273216d4de92SAdrian Chadd if_printf(ifp, "bb hang detected (0x%x)\n", hangs); 2733c42a7b7eSSam Leffler 2734c42a7b7eSSam Leffler if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 2735c42a7b7eSSam Leffler sc->sc_bmisscount); 2736c2e34459SSam Leffler sc->sc_stats.ast_bstuck++; 273716d4de92SAdrian Chadd /* 273816d4de92SAdrian Chadd * This assumes that there's no simultaneous channel mode change 273916d4de92SAdrian Chadd * occuring. 274016d4de92SAdrian Chadd */ 2741517526efSAdrian Chadd ath_reset(ifp, ATH_RESET_NOLOSS); 2742c42a7b7eSSam Leffler } 2743c42a7b7eSSam Leffler 27445591b213SSam Leffler static void 27455591b213SSam Leffler ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 27465591b213SSam Leffler { 27475591b213SSam Leffler bus_addr_t *paddr = (bus_addr_t*) arg; 2748d77367bfSSam Leffler KASSERT(error == 0, ("error %u on bus_dma callback", error)); 27495591b213SSam Leffler *paddr = segs->ds_addr; 27505591b213SSam Leffler } 27515591b213SSam Leffler 27523d184db2SAdrian Chadd int 2753c42a7b7eSSam Leffler ath_descdma_setup(struct ath_softc *sc, 2754c42a7b7eSSam Leffler struct ath_descdma *dd, ath_bufhead *head, 2755c42a7b7eSSam Leffler const char *name, int nbuf, int ndesc) 2756c42a7b7eSSam Leffler { 2757c42a7b7eSSam Leffler #define DS2PHYS(_dd, _ds) \ 2758c42a7b7eSSam Leffler ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 275945abcd6cSAdrian Chadd #define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 276045abcd6cSAdrian Chadd ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 2761fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 276245abcd6cSAdrian Chadd uint8_t *ds; 2763c42a7b7eSSam Leffler struct ath_buf *bf; 2764c42a7b7eSSam Leffler int i, bsize, error; 276545abcd6cSAdrian Chadd int desc_len; 276645abcd6cSAdrian Chadd 276745abcd6cSAdrian Chadd desc_len = sizeof(struct ath_desc); 2768c42a7b7eSSam Leffler 2769c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n", 2770c42a7b7eSSam Leffler __func__, name, nbuf, ndesc); 2771c42a7b7eSSam Leffler 2772c42a7b7eSSam Leffler dd->dd_name = name; 277345abcd6cSAdrian Chadd dd->dd_desc_len = desc_len * nbuf * ndesc; 277445abcd6cSAdrian Chadd 277545abcd6cSAdrian Chadd /* 277645abcd6cSAdrian Chadd * Merlin work-around: 277745abcd6cSAdrian Chadd * Descriptors that cross the 4KB boundary can't be used. 277845abcd6cSAdrian Chadd * Assume one skipped descriptor per 4KB page. 277945abcd6cSAdrian Chadd */ 278045abcd6cSAdrian Chadd if (! ath_hal_split4ktrans(sc->sc_ah)) { 278145abcd6cSAdrian Chadd int numdescpage = 4096 / (desc_len * ndesc); 278245abcd6cSAdrian Chadd dd->dd_desc_len = (nbuf / numdescpage + 1) * 4096; 278345abcd6cSAdrian Chadd } 2784c42a7b7eSSam Leffler 2785c42a7b7eSSam Leffler /* 2786c42a7b7eSSam Leffler * Setup DMA descriptor area. 2787c42a7b7eSSam Leffler */ 2788c2175ff5SMarius Strobl error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 2789c42a7b7eSSam Leffler PAGE_SIZE, 0, /* alignment, bounds */ 2790c42a7b7eSSam Leffler BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2791c42a7b7eSSam Leffler BUS_SPACE_MAXADDR, /* highaddr */ 2792c42a7b7eSSam Leffler NULL, NULL, /* filter, filterarg */ 2793c42a7b7eSSam Leffler dd->dd_desc_len, /* maxsize */ 2794c42a7b7eSSam Leffler 1, /* nsegments */ 27956ccb8ea7SSam Leffler dd->dd_desc_len, /* maxsegsize */ 2796c42a7b7eSSam Leffler BUS_DMA_ALLOCNOW, /* flags */ 2797c42a7b7eSSam Leffler NULL, /* lockfunc */ 2798c42a7b7eSSam Leffler NULL, /* lockarg */ 2799c42a7b7eSSam Leffler &dd->dd_dmat); 2800c42a7b7eSSam Leffler if (error != 0) { 2801c42a7b7eSSam Leffler if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); 2802c42a7b7eSSam Leffler return error; 2803c42a7b7eSSam Leffler } 2804c42a7b7eSSam Leffler 2805c42a7b7eSSam Leffler /* allocate descriptors */ 2806c42a7b7eSSam Leffler error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap); 2807c42a7b7eSSam Leffler if (error != 0) { 2808c42a7b7eSSam Leffler if_printf(ifp, "unable to create dmamap for %s descriptors, " 2809c42a7b7eSSam Leffler "error %u\n", dd->dd_name, error); 2810c42a7b7eSSam Leffler goto fail0; 2811c42a7b7eSSam Leffler } 2812c42a7b7eSSam Leffler 2813c42a7b7eSSam Leffler error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 28140553a01fSSam Leffler BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 28150553a01fSSam Leffler &dd->dd_dmamap); 2816c42a7b7eSSam Leffler if (error != 0) { 2817c42a7b7eSSam Leffler if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 2818c42a7b7eSSam Leffler "error %u\n", nbuf * ndesc, dd->dd_name, error); 2819c42a7b7eSSam Leffler goto fail1; 2820c42a7b7eSSam Leffler } 2821c42a7b7eSSam Leffler 2822c42a7b7eSSam Leffler error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 2823c42a7b7eSSam Leffler dd->dd_desc, dd->dd_desc_len, 2824c42a7b7eSSam Leffler ath_load_cb, &dd->dd_desc_paddr, 2825c42a7b7eSSam Leffler BUS_DMA_NOWAIT); 2826c42a7b7eSSam Leffler if (error != 0) { 2827c42a7b7eSSam Leffler if_printf(ifp, "unable to map %s descriptors, error %u\n", 2828c42a7b7eSSam Leffler dd->dd_name, error); 2829c42a7b7eSSam Leffler goto fail2; 2830c42a7b7eSSam Leffler } 2831c42a7b7eSSam Leffler 283245abcd6cSAdrian Chadd ds = (uint8_t *) dd->dd_desc; 2833c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 2834c42a7b7eSSam Leffler __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, 2835c42a7b7eSSam Leffler (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); 2836c42a7b7eSSam Leffler 2837ebecf802SSam Leffler /* allocate rx buffers */ 2838c42a7b7eSSam Leffler bsize = sizeof(struct ath_buf) * nbuf; 2839c42a7b7eSSam Leffler bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 2840c42a7b7eSSam Leffler if (bf == NULL) { 2841c42a7b7eSSam Leffler if_printf(ifp, "malloc of %s buffers failed, size %u\n", 2842c42a7b7eSSam Leffler dd->dd_name, bsize); 2843c42a7b7eSSam Leffler goto fail3; 2844c42a7b7eSSam Leffler } 2845c42a7b7eSSam Leffler dd->dd_bufptr = bf; 2846c42a7b7eSSam Leffler 28476b349e5aSAdrian Chadd TAILQ_INIT(head); 284845abcd6cSAdrian Chadd for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * desc_len)) { 284945abcd6cSAdrian Chadd bf->bf_desc = (struct ath_desc *) ds; 2850c42a7b7eSSam Leffler bf->bf_daddr = DS2PHYS(dd, ds); 285145abcd6cSAdrian Chadd if (! ath_hal_split4ktrans(sc->sc_ah)) { 285245abcd6cSAdrian Chadd /* 285345abcd6cSAdrian Chadd * Merlin WAR: Skip descriptor addresses which 285445abcd6cSAdrian Chadd * cause 4KB boundary crossing along any point 285545abcd6cSAdrian Chadd * in the descriptor. 285645abcd6cSAdrian Chadd */ 285745abcd6cSAdrian Chadd if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr, 285845abcd6cSAdrian Chadd desc_len * ndesc)) { 285945abcd6cSAdrian Chadd /* Start at the next page */ 286045abcd6cSAdrian Chadd ds += 0x1000 - (bf->bf_daddr & 0xFFF); 286145abcd6cSAdrian Chadd bf->bf_desc = (struct ath_desc *) ds; 286245abcd6cSAdrian Chadd bf->bf_daddr = DS2PHYS(dd, ds); 286345abcd6cSAdrian Chadd } 286445abcd6cSAdrian Chadd } 2865c42a7b7eSSam Leffler error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 2866c42a7b7eSSam Leffler &bf->bf_dmamap); 2867c42a7b7eSSam Leffler if (error != 0) { 2868c42a7b7eSSam Leffler if_printf(ifp, "unable to create dmamap for %s " 2869c42a7b7eSSam Leffler "buffer %u, error %u\n", dd->dd_name, i, error); 2870c42a7b7eSSam Leffler ath_descdma_cleanup(sc, dd, head); 2871c42a7b7eSSam Leffler return error; 2872c42a7b7eSSam Leffler } 28736edf1dc7SAdrian Chadd bf->bf_lastds = bf->bf_desc; /* Just an initial value */ 28746b349e5aSAdrian Chadd TAILQ_INSERT_TAIL(head, bf, bf_list); 2875c42a7b7eSSam Leffler } 2876c42a7b7eSSam Leffler return 0; 2877c42a7b7eSSam Leffler fail3: 2878c42a7b7eSSam Leffler bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 2879c42a7b7eSSam Leffler fail2: 2880c42a7b7eSSam Leffler bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 2881c42a7b7eSSam Leffler fail1: 2882c42a7b7eSSam Leffler bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 2883c42a7b7eSSam Leffler fail0: 2884c42a7b7eSSam Leffler bus_dma_tag_destroy(dd->dd_dmat); 2885c42a7b7eSSam Leffler memset(dd, 0, sizeof(*dd)); 2886c42a7b7eSSam Leffler return error; 2887c42a7b7eSSam Leffler #undef DS2PHYS 288845abcd6cSAdrian Chadd #undef ATH_DESC_4KB_BOUND_CHECK 2889c42a7b7eSSam Leffler } 2890c42a7b7eSSam Leffler 289139abbd9bSAdrian Chadd /* 289239abbd9bSAdrian Chadd * Allocate ath_buf entries but no descriptor contents. 289339abbd9bSAdrian Chadd * 289439abbd9bSAdrian Chadd * This is for RX EDMA where the descriptors are the header part of 289539abbd9bSAdrian Chadd * the RX buffer. 289639abbd9bSAdrian Chadd */ 289739abbd9bSAdrian Chadd int 289839abbd9bSAdrian Chadd ath_descdma_setup_rx_edma(struct ath_softc *sc, 289939abbd9bSAdrian Chadd struct ath_descdma *dd, ath_bufhead *head, 290039abbd9bSAdrian Chadd const char *name, int nbuf, int rx_status_len) 290139abbd9bSAdrian Chadd { 290239abbd9bSAdrian Chadd struct ifnet *ifp = sc->sc_ifp; 290339abbd9bSAdrian Chadd struct ath_buf *bf; 290439abbd9bSAdrian Chadd int i, bsize, error; 290539abbd9bSAdrian Chadd 290639abbd9bSAdrian Chadd DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers\n", 290739abbd9bSAdrian Chadd __func__, name, nbuf); 290839abbd9bSAdrian Chadd 290939abbd9bSAdrian Chadd dd->dd_name = name; 291039abbd9bSAdrian Chadd /* 291139abbd9bSAdrian Chadd * This is (mostly) purely for show. We're not allocating any actual 291239abbd9bSAdrian Chadd * descriptors here as EDMA RX has the descriptor be part 291339abbd9bSAdrian Chadd * of the RX buffer. 291439abbd9bSAdrian Chadd * 291539abbd9bSAdrian Chadd * However, dd_desc_len is used by ath_descdma_free() to determine 291639abbd9bSAdrian Chadd * whether we have already freed this DMA mapping. 291739abbd9bSAdrian Chadd */ 291839abbd9bSAdrian Chadd dd->dd_desc_len = rx_status_len; 291939abbd9bSAdrian Chadd 292039abbd9bSAdrian Chadd /* allocate rx buffers */ 292139abbd9bSAdrian Chadd bsize = sizeof(struct ath_buf) * nbuf; 292239abbd9bSAdrian Chadd bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 292339abbd9bSAdrian Chadd if (bf == NULL) { 292439abbd9bSAdrian Chadd if_printf(ifp, "malloc of %s buffers failed, size %u\n", 292539abbd9bSAdrian Chadd dd->dd_name, bsize); 2926*b5b60f35SAdrian Chadd error = ENOMEM; 292739abbd9bSAdrian Chadd goto fail3; 292839abbd9bSAdrian Chadd } 292939abbd9bSAdrian Chadd dd->dd_bufptr = bf; 293039abbd9bSAdrian Chadd 293139abbd9bSAdrian Chadd TAILQ_INIT(head); 293239abbd9bSAdrian Chadd for (i = 0; i < nbuf; i++, bf++) { 293339abbd9bSAdrian Chadd bf->bf_desc = NULL; 293439abbd9bSAdrian Chadd bf->bf_daddr = 0; 293539abbd9bSAdrian Chadd bf->bf_lastds = NULL; /* Just an initial value */ 293639abbd9bSAdrian Chadd 293739abbd9bSAdrian Chadd error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 293839abbd9bSAdrian Chadd &bf->bf_dmamap); 293939abbd9bSAdrian Chadd if (error != 0) { 294039abbd9bSAdrian Chadd if_printf(ifp, "unable to create dmamap for %s " 294139abbd9bSAdrian Chadd "buffer %u, error %u\n", dd->dd_name, i, error); 294239abbd9bSAdrian Chadd ath_descdma_cleanup(sc, dd, head); 294339abbd9bSAdrian Chadd return error; 294439abbd9bSAdrian Chadd } 294539abbd9bSAdrian Chadd TAILQ_INSERT_TAIL(head, bf, bf_list); 294639abbd9bSAdrian Chadd } 294739abbd9bSAdrian Chadd return 0; 294839abbd9bSAdrian Chadd fail3: 294939abbd9bSAdrian Chadd memset(dd, 0, sizeof(*dd)); 295039abbd9bSAdrian Chadd return error; 295139abbd9bSAdrian Chadd } 295239abbd9bSAdrian Chadd 29533d184db2SAdrian Chadd void 2954c42a7b7eSSam Leffler ath_descdma_cleanup(struct ath_softc *sc, 2955c42a7b7eSSam Leffler struct ath_descdma *dd, ath_bufhead *head) 2956c42a7b7eSSam Leffler { 2957c42a7b7eSSam Leffler struct ath_buf *bf; 2958c42a7b7eSSam Leffler struct ieee80211_node *ni; 2959c42a7b7eSSam Leffler 29608d467c41SAdrian Chadd if (dd->dd_dmamap != 0) { 2961c42a7b7eSSam Leffler bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 2962c42a7b7eSSam Leffler bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 2963c42a7b7eSSam Leffler bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 2964c42a7b7eSSam Leffler bus_dma_tag_destroy(dd->dd_dmat); 29658d467c41SAdrian Chadd } 2966c42a7b7eSSam Leffler 29676b349e5aSAdrian Chadd TAILQ_FOREACH(bf, head, bf_list) { 2968c42a7b7eSSam Leffler if (bf->bf_m) { 2969c42a7b7eSSam Leffler m_freem(bf->bf_m); 2970c42a7b7eSSam Leffler bf->bf_m = NULL; 2971c42a7b7eSSam Leffler } 2972c42a7b7eSSam Leffler if (bf->bf_dmamap != NULL) { 2973c42a7b7eSSam Leffler bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 2974c42a7b7eSSam Leffler bf->bf_dmamap = NULL; 2975c42a7b7eSSam Leffler } 2976c42a7b7eSSam Leffler ni = bf->bf_node; 2977c42a7b7eSSam Leffler bf->bf_node = NULL; 2978c42a7b7eSSam Leffler if (ni != NULL) { 2979c42a7b7eSSam Leffler /* 2980c42a7b7eSSam Leffler * Reclaim node reference. 2981c42a7b7eSSam Leffler */ 2982c42a7b7eSSam Leffler ieee80211_free_node(ni); 2983c42a7b7eSSam Leffler } 2984c42a7b7eSSam Leffler } 2985c42a7b7eSSam Leffler 29866b349e5aSAdrian Chadd TAILQ_INIT(head); 2987c42a7b7eSSam Leffler free(dd->dd_bufptr, M_ATHDEV); 2988c42a7b7eSSam Leffler memset(dd, 0, sizeof(*dd)); 2989c42a7b7eSSam Leffler } 2990c42a7b7eSSam Leffler 2991c42a7b7eSSam Leffler static int 29925591b213SSam Leffler ath_desc_alloc(struct ath_softc *sc) 29935591b213SSam Leffler { 2994c42a7b7eSSam Leffler int error; 29955591b213SSam Leffler 2996c42a7b7eSSam Leffler error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 2997e2d787faSSam Leffler "tx", ath_txbuf, ATH_TXDESC); 2998c42a7b7eSSam Leffler if (error != 0) { 29995591b213SSam Leffler return error; 3000c42a7b7eSSam Leffler } 300123ced6c1SAdrian Chadd sc->sc_txbuf_cnt = ath_txbuf; 3002c42a7b7eSSam Leffler 3003af33d486SAdrian Chadd error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt, 3004af33d486SAdrian Chadd "tx_mgmt", ath_txbuf_mgmt, ATH_TXDESC); 3005af33d486SAdrian Chadd if (error != 0) { 3006af33d486SAdrian Chadd ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3007af33d486SAdrian Chadd return error; 3008af33d486SAdrian Chadd } 3009af33d486SAdrian Chadd 3010af33d486SAdrian Chadd /* 3011af33d486SAdrian Chadd * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the 3012af33d486SAdrian Chadd * flag doesn't have to be set in ath_getbuf_locked(). 3013af33d486SAdrian Chadd */ 3014af33d486SAdrian Chadd 3015c42a7b7eSSam Leffler error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 3016b032f27cSSam Leffler "beacon", ATH_BCBUF, 1); 3017c42a7b7eSSam Leffler if (error != 0) { 3018af33d486SAdrian Chadd ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3019af33d486SAdrian Chadd ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 3020af33d486SAdrian Chadd &sc->sc_txbuf_mgmt); 3021c42a7b7eSSam Leffler return error; 3022c42a7b7eSSam Leffler } 30235591b213SSam Leffler return 0; 30245591b213SSam Leffler } 30255591b213SSam Leffler 30265591b213SSam Leffler static void 30275591b213SSam Leffler ath_desc_free(struct ath_softc *sc) 30285591b213SSam Leffler { 30295591b213SSam Leffler 3030c42a7b7eSSam Leffler if (sc->sc_bdma.dd_desc_len != 0) 3031c42a7b7eSSam Leffler ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 3032c42a7b7eSSam Leffler if (sc->sc_txdma.dd_desc_len != 0) 3033c42a7b7eSSam Leffler ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3034af33d486SAdrian Chadd if (sc->sc_txdma_mgmt.dd_desc_len != 0) 3035af33d486SAdrian Chadd ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 3036af33d486SAdrian Chadd &sc->sc_txbuf_mgmt); 30375591b213SSam Leffler } 30385591b213SSam Leffler 30395591b213SSam Leffler static struct ieee80211_node * 304038c208f8SSam Leffler ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 30415591b213SSam Leffler { 304238c208f8SSam Leffler struct ieee80211com *ic = vap->iv_ic; 3043c42a7b7eSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 3044c42a7b7eSSam Leffler const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 3045c42a7b7eSSam Leffler struct ath_node *an; 3046c42a7b7eSSam Leffler 3047c42a7b7eSSam Leffler an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 3048c42a7b7eSSam Leffler if (an == NULL) { 3049c42a7b7eSSam Leffler /* XXX stat+msg */ 3050de5af704SSam Leffler return NULL; 30515591b213SSam Leffler } 3052c42a7b7eSSam Leffler ath_rate_node_init(sc, an); 30535591b213SSam Leffler 30543dd85b26SAdrian Chadd /* Setup the mutex - there's no associd yet so set the name to NULL */ 30553dd85b26SAdrian Chadd snprintf(an->an_name, sizeof(an->an_name), "%s: node %p", 30563dd85b26SAdrian Chadd device_get_nameunit(sc->sc_dev), an); 30573dd85b26SAdrian Chadd mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); 30583dd85b26SAdrian Chadd 3059eb6f0de0SAdrian Chadd /* XXX setup ath_tid */ 3060eb6f0de0SAdrian Chadd ath_tx_tid_init(sc, an); 3061eb6f0de0SAdrian Chadd 3062c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); 3063c42a7b7eSSam Leffler return &an->an_node; 3064c42a7b7eSSam Leffler } 3065c42a7b7eSSam Leffler 30665591b213SSam Leffler static void 30674afa805eSAdrian Chadd ath_node_cleanup(struct ieee80211_node *ni) 30684afa805eSAdrian Chadd { 30694afa805eSAdrian Chadd struct ieee80211com *ic = ni->ni_ic; 30704afa805eSAdrian Chadd struct ath_softc *sc = ic->ic_ifp->if_softc; 30714afa805eSAdrian Chadd 30724afa805eSAdrian Chadd /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */ 3073eb6f0de0SAdrian Chadd ath_tx_node_flush(sc, ATH_NODE(ni)); 30744afa805eSAdrian Chadd ath_rate_node_cleanup(sc, ATH_NODE(ni)); 30754afa805eSAdrian Chadd sc->sc_node_cleanup(ni); 30764afa805eSAdrian Chadd } 30774afa805eSAdrian Chadd 30784afa805eSAdrian Chadd static void 3079c42a7b7eSSam Leffler ath_node_free(struct ieee80211_node *ni) 30805591b213SSam Leffler { 3081c42a7b7eSSam Leffler struct ieee80211com *ic = ni->ni_ic; 3082c42a7b7eSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 30831e774079SSam Leffler 3084c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni); 30853dd85b26SAdrian Chadd mtx_destroy(&ATH_NODE(ni)->an_mtx); 3086c42a7b7eSSam Leffler sc->sc_node_free(ni); 30875591b213SSam Leffler } 30885591b213SSam Leffler 308968e8e04eSSam Leffler static void 309068e8e04eSSam Leffler ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) 309168e8e04eSSam Leffler { 309268e8e04eSSam Leffler struct ieee80211com *ic = ni->ni_ic; 309368e8e04eSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 309468e8e04eSSam Leffler struct ath_hal *ah = sc->sc_ah; 309568e8e04eSSam Leffler 3096b032f27cSSam Leffler *rssi = ic->ic_node_getrssi(ni); 309759efa8b5SSam Leffler if (ni->ni_chan != IEEE80211_CHAN_ANYC) 309859efa8b5SSam Leffler *noise = ath_hal_getchannoise(ah, ni->ni_chan); 309959efa8b5SSam Leffler else 310068e8e04eSSam Leffler *noise = -95; /* nominally correct */ 310168e8e04eSSam Leffler } 310268e8e04eSSam Leffler 3103c42a7b7eSSam Leffler /* 3104c42a7b7eSSam Leffler * Set the default antenna. 3105c42a7b7eSSam Leffler */ 3106e60c4fc2SAdrian Chadd void 3107c42a7b7eSSam Leffler ath_setdefantenna(struct ath_softc *sc, u_int antenna) 3108c42a7b7eSSam Leffler { 3109c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 3110c42a7b7eSSam Leffler 3111c42a7b7eSSam Leffler /* XXX block beacon interrupts */ 3112c42a7b7eSSam Leffler ath_hal_setdefantenna(ah, antenna); 3113c42a7b7eSSam Leffler if (sc->sc_defant != antenna) 3114c42a7b7eSSam Leffler sc->sc_stats.ast_ant_defswitch++; 3115c42a7b7eSSam Leffler sc->sc_defant = antenna; 3116c42a7b7eSSam Leffler sc->sc_rxotherant = 0; 3117c42a7b7eSSam Leffler } 3118c42a7b7eSSam Leffler 31195463c4a4SSam Leffler static void 3120622b3fd2SSam Leffler ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 3121622b3fd2SSam Leffler { 3122622b3fd2SSam Leffler txq->axq_qnum = qnum; 3123339ccfb3SSam Leffler txq->axq_ac = 0; 3124622b3fd2SSam Leffler txq->axq_depth = 0; 312516d4de92SAdrian Chadd txq->axq_aggr_depth = 0; 3126622b3fd2SSam Leffler txq->axq_intrcnt = 0; 3127622b3fd2SSam Leffler txq->axq_link = NULL; 31286b349e5aSAdrian Chadd txq->axq_softc = sc; 31296b349e5aSAdrian Chadd TAILQ_INIT(&txq->axq_q); 31306b349e5aSAdrian Chadd TAILQ_INIT(&txq->axq_tidq); 3131622b3fd2SSam Leffler ATH_TXQ_LOCK_INIT(sc, txq); 3132622b3fd2SSam Leffler } 3133622b3fd2SSam Leffler 31345591b213SSam Leffler /* 3135c42a7b7eSSam Leffler * Setup a h/w transmit queue. 31365591b213SSam Leffler */ 3137c42a7b7eSSam Leffler static struct ath_txq * 3138c42a7b7eSSam Leffler ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 3139c42a7b7eSSam Leffler { 3140c42a7b7eSSam Leffler #define N(a) (sizeof(a)/sizeof(a[0])) 3141c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 3142c42a7b7eSSam Leffler HAL_TXQ_INFO qi; 3143c42a7b7eSSam Leffler int qnum; 3144c42a7b7eSSam Leffler 3145c42a7b7eSSam Leffler memset(&qi, 0, sizeof(qi)); 3146c42a7b7eSSam Leffler qi.tqi_subtype = subtype; 3147c42a7b7eSSam Leffler qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 3148c42a7b7eSSam Leffler qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 3149c42a7b7eSSam Leffler qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 3150c42a7b7eSSam Leffler /* 3151c42a7b7eSSam Leffler * Enable interrupts only for EOL and DESC conditions. 3152c42a7b7eSSam Leffler * We mark tx descriptors to receive a DESC interrupt 3153c42a7b7eSSam Leffler * when a tx queue gets deep; otherwise waiting for the 3154c42a7b7eSSam Leffler * EOL to reap descriptors. Note that this is done to 3155c42a7b7eSSam Leffler * reduce interrupt load and this only defers reaping 3156c42a7b7eSSam Leffler * descriptors, never transmitting frames. Aside from 3157c42a7b7eSSam Leffler * reducing interrupts this also permits more concurrency. 3158c42a7b7eSSam Leffler * The only potential downside is if the tx queue backs 3159c42a7b7eSSam Leffler * up in which case the top half of the kernel may backup 3160c42a7b7eSSam Leffler * due to a lack of tx descriptors. 3161c42a7b7eSSam Leffler */ 3162bd5a9920SSam Leffler qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE; 3163c42a7b7eSSam Leffler qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 3164c42a7b7eSSam Leffler if (qnum == -1) { 3165c42a7b7eSSam Leffler /* 3166c42a7b7eSSam Leffler * NB: don't print a message, this happens 3167a614e076SSam Leffler * normally on parts with too few tx queues 3168c42a7b7eSSam Leffler */ 3169c42a7b7eSSam Leffler return NULL; 3170c42a7b7eSSam Leffler } 3171c42a7b7eSSam Leffler if (qnum >= N(sc->sc_txq)) { 31726891c875SPeter Wemm device_printf(sc->sc_dev, 31736891c875SPeter Wemm "hal qnum %u out of range, max %zu!\n", 3174c42a7b7eSSam Leffler qnum, N(sc->sc_txq)); 3175c42a7b7eSSam Leffler ath_hal_releasetxqueue(ah, qnum); 3176c42a7b7eSSam Leffler return NULL; 3177c42a7b7eSSam Leffler } 3178c42a7b7eSSam Leffler if (!ATH_TXQ_SETUP(sc, qnum)) { 3179622b3fd2SSam Leffler ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 3180c42a7b7eSSam Leffler sc->sc_txqsetup |= 1<<qnum; 3181c42a7b7eSSam Leffler } 3182c42a7b7eSSam Leffler return &sc->sc_txq[qnum]; 3183c42a7b7eSSam Leffler #undef N 3184c42a7b7eSSam Leffler } 3185c42a7b7eSSam Leffler 3186c42a7b7eSSam Leffler /* 3187c42a7b7eSSam Leffler * Setup a hardware data transmit queue for the specified 3188c42a7b7eSSam Leffler * access control. The hal may not support all requested 3189c42a7b7eSSam Leffler * queues in which case it will return a reference to a 3190c42a7b7eSSam Leffler * previously setup queue. We record the mapping from ac's 3191c42a7b7eSSam Leffler * to h/w queues for use by ath_tx_start and also track 3192c42a7b7eSSam Leffler * the set of h/w queues being used to optimize work in the 3193c42a7b7eSSam Leffler * transmit interrupt handler and related routines. 3194c42a7b7eSSam Leffler */ 3195c42a7b7eSSam Leffler static int 3196c42a7b7eSSam Leffler ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 3197c42a7b7eSSam Leffler { 3198c42a7b7eSSam Leffler #define N(a) (sizeof(a)/sizeof(a[0])) 3199c42a7b7eSSam Leffler struct ath_txq *txq; 3200c42a7b7eSSam Leffler 3201c42a7b7eSSam Leffler if (ac >= N(sc->sc_ac2q)) { 32026891c875SPeter Wemm device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 3203c42a7b7eSSam Leffler ac, N(sc->sc_ac2q)); 3204c42a7b7eSSam Leffler return 0; 3205c42a7b7eSSam Leffler } 3206c42a7b7eSSam Leffler txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 3207c42a7b7eSSam Leffler if (txq != NULL) { 3208339ccfb3SSam Leffler txq->axq_ac = ac; 3209c42a7b7eSSam Leffler sc->sc_ac2q[ac] = txq; 3210c42a7b7eSSam Leffler return 1; 3211c42a7b7eSSam Leffler } else 3212c42a7b7eSSam Leffler return 0; 3213c42a7b7eSSam Leffler #undef N 3214c42a7b7eSSam Leffler } 3215c42a7b7eSSam Leffler 3216c42a7b7eSSam Leffler /* 3217c42a7b7eSSam Leffler * Update WME parameters for a transmit queue. 3218c42a7b7eSSam Leffler */ 3219c42a7b7eSSam Leffler static int 3220c42a7b7eSSam Leffler ath_txq_update(struct ath_softc *sc, int ac) 3221c42a7b7eSSam Leffler { 3222c42a7b7eSSam Leffler #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 3223c42a7b7eSSam Leffler #define ATH_TXOP_TO_US(v) (v<<5) 3224b032f27cSSam Leffler struct ifnet *ifp = sc->sc_ifp; 3225b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 3226c42a7b7eSSam Leffler struct ath_txq *txq = sc->sc_ac2q[ac]; 3227c42a7b7eSSam Leffler struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 3228c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 3229c42a7b7eSSam Leffler HAL_TXQ_INFO qi; 3230c42a7b7eSSam Leffler 3231c42a7b7eSSam Leffler ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 3232584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 323310ad9a77SSam Leffler if (sc->sc_tdma) { 323410ad9a77SSam Leffler /* 323510ad9a77SSam Leffler * AIFS is zero so there's no pre-transmit wait. The 323610ad9a77SSam Leffler * burst time defines the slot duration and is configured 323709be6601SSam Leffler * through net80211. The QCU is setup to not do post-xmit 323810ad9a77SSam Leffler * back off, lockout all lower-priority QCU's, and fire 323910ad9a77SSam Leffler * off the DMA beacon alert timer which is setup based 324010ad9a77SSam Leffler * on the slot configuration. 324110ad9a77SSam Leffler */ 324210ad9a77SSam Leffler qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 324310ad9a77SSam Leffler | HAL_TXQ_TXERRINT_ENABLE 324410ad9a77SSam Leffler | HAL_TXQ_TXURNINT_ENABLE 324510ad9a77SSam Leffler | HAL_TXQ_TXEOLINT_ENABLE 324610ad9a77SSam Leffler | HAL_TXQ_DBA_GATED 324710ad9a77SSam Leffler | HAL_TXQ_BACKOFF_DISABLE 324810ad9a77SSam Leffler | HAL_TXQ_ARB_LOCKOUT_GLOBAL 324910ad9a77SSam Leffler ; 325010ad9a77SSam Leffler qi.tqi_aifs = 0; 325110ad9a77SSam Leffler /* XXX +dbaprep? */ 325210ad9a77SSam Leffler qi.tqi_readyTime = sc->sc_tdmaslotlen; 325310ad9a77SSam Leffler qi.tqi_burstTime = qi.tqi_readyTime; 325410ad9a77SSam Leffler } else { 325510ad9a77SSam Leffler #endif 325616d4de92SAdrian Chadd /* 325716d4de92SAdrian Chadd * XXX shouldn't this just use the default flags 325816d4de92SAdrian Chadd * used in the previous queue setup? 325916d4de92SAdrian Chadd */ 326010ad9a77SSam Leffler qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 326110ad9a77SSam Leffler | HAL_TXQ_TXERRINT_ENABLE 326210ad9a77SSam Leffler | HAL_TXQ_TXDESCINT_ENABLE 326310ad9a77SSam Leffler | HAL_TXQ_TXURNINT_ENABLE 32641f25c0f7SAdrian Chadd | HAL_TXQ_TXEOLINT_ENABLE 326510ad9a77SSam Leffler ; 3266c42a7b7eSSam Leffler qi.tqi_aifs = wmep->wmep_aifsn; 3267c42a7b7eSSam Leffler qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 3268c42a7b7eSSam Leffler qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 326910ad9a77SSam Leffler qi.tqi_readyTime = 0; 3270c42a7b7eSSam Leffler qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 3271584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 327210ad9a77SSam Leffler } 327310ad9a77SSam Leffler #endif 327410ad9a77SSam Leffler 327510ad9a77SSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, 327610ad9a77SSam Leffler "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", 327710ad9a77SSam Leffler __func__, txq->axq_qnum, qi.tqi_qflags, 327810ad9a77SSam Leffler qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); 3279c42a7b7eSSam Leffler 3280c42a7b7eSSam Leffler if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 3281b032f27cSSam Leffler if_printf(ifp, "unable to update hardware queue " 3282c42a7b7eSSam Leffler "parameters for %s traffic!\n", 3283c42a7b7eSSam Leffler ieee80211_wme_acnames[ac]); 3284c42a7b7eSSam Leffler return 0; 3285c42a7b7eSSam Leffler } else { 3286c42a7b7eSSam Leffler ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 3287c42a7b7eSSam Leffler return 1; 3288c42a7b7eSSam Leffler } 3289c42a7b7eSSam Leffler #undef ATH_TXOP_TO_US 3290c42a7b7eSSam Leffler #undef ATH_EXPONENT_TO_VALUE 3291c42a7b7eSSam Leffler } 3292c42a7b7eSSam Leffler 3293c42a7b7eSSam Leffler /* 3294c42a7b7eSSam Leffler * Callback from the 802.11 layer to update WME parameters. 3295c42a7b7eSSam Leffler */ 3296a35dae8dSAdrian Chadd int 3297c42a7b7eSSam Leffler ath_wme_update(struct ieee80211com *ic) 3298c42a7b7eSSam Leffler { 3299c42a7b7eSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 3300c42a7b7eSSam Leffler 3301c42a7b7eSSam Leffler return !ath_txq_update(sc, WME_AC_BE) || 3302c42a7b7eSSam Leffler !ath_txq_update(sc, WME_AC_BK) || 3303c42a7b7eSSam Leffler !ath_txq_update(sc, WME_AC_VI) || 3304c42a7b7eSSam Leffler !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 3305c42a7b7eSSam Leffler } 3306c42a7b7eSSam Leffler 3307c42a7b7eSSam Leffler /* 3308c42a7b7eSSam Leffler * Reclaim resources for a setup queue. 3309c42a7b7eSSam Leffler */ 3310c42a7b7eSSam Leffler static void 3311c42a7b7eSSam Leffler ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 3312c42a7b7eSSam Leffler { 3313c42a7b7eSSam Leffler 3314c42a7b7eSSam Leffler ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 3315c42a7b7eSSam Leffler ATH_TXQ_LOCK_DESTROY(txq); 3316c42a7b7eSSam Leffler sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 3317c42a7b7eSSam Leffler } 3318c42a7b7eSSam Leffler 3319c42a7b7eSSam Leffler /* 3320c42a7b7eSSam Leffler * Reclaim all tx queue resources. 3321c42a7b7eSSam Leffler */ 3322c42a7b7eSSam Leffler static void 3323c42a7b7eSSam Leffler ath_tx_cleanup(struct ath_softc *sc) 3324c42a7b7eSSam Leffler { 3325c42a7b7eSSam Leffler int i; 3326c42a7b7eSSam Leffler 3327c42a7b7eSSam Leffler ATH_TXBUF_LOCK_DESTROY(sc); 3328c42a7b7eSSam Leffler for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3329c42a7b7eSSam Leffler if (ATH_TXQ_SETUP(sc, i)) 3330c42a7b7eSSam Leffler ath_tx_cleanupq(sc, &sc->sc_txq[i]); 3331c42a7b7eSSam Leffler } 33325591b213SSam Leffler 333399d258fdSSam Leffler /* 3334ab06fdf2SSam Leffler * Return h/w rate index for an IEEE rate (w/o basic rate bit) 3335ab06fdf2SSam Leffler * using the current rates in sc_rixmap. 33368b5341deSSam Leffler */ 3337b8e788a5SAdrian Chadd int 3338ab06fdf2SSam Leffler ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) 33398b5341deSSam Leffler { 3340ab06fdf2SSam Leffler int rix = sc->sc_rixmap[rate]; 3341ab06fdf2SSam Leffler /* NB: return lowest rix for invalid rate */ 3342ab06fdf2SSam Leffler return (rix == 0xff ? 0 : rix); 33438b5341deSSam Leffler } 33448b5341deSSam Leffler 33459352fb7aSAdrian Chadd static void 33469352fb7aSAdrian Chadd ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts, 33479352fb7aSAdrian Chadd struct ath_buf *bf) 33489352fb7aSAdrian Chadd { 33499352fb7aSAdrian Chadd struct ieee80211_node *ni = bf->bf_node; 33509352fb7aSAdrian Chadd struct ifnet *ifp = sc->sc_ifp; 33519352fb7aSAdrian Chadd struct ieee80211com *ic = ifp->if_l2com; 33529352fb7aSAdrian Chadd int sr, lr, pri; 33539352fb7aSAdrian Chadd 33549352fb7aSAdrian Chadd if (ts->ts_status == 0) { 33559352fb7aSAdrian Chadd u_int8_t txant = ts->ts_antenna; 33569352fb7aSAdrian Chadd sc->sc_stats.ast_ant_tx[txant]++; 33579352fb7aSAdrian Chadd sc->sc_ant_tx[txant]++; 33589352fb7aSAdrian Chadd if (ts->ts_finaltsi != 0) 33599352fb7aSAdrian Chadd sc->sc_stats.ast_tx_altrate++; 33609352fb7aSAdrian Chadd pri = M_WME_GETAC(bf->bf_m); 33619352fb7aSAdrian Chadd if (pri >= WME_AC_VO) 33629352fb7aSAdrian Chadd ic->ic_wme.wme_hipri_traffic++; 3363875a9451SAdrian Chadd if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) 33649352fb7aSAdrian Chadd ni->ni_inact = ni->ni_inact_reload; 33659352fb7aSAdrian Chadd } else { 33669352fb7aSAdrian Chadd if (ts->ts_status & HAL_TXERR_XRETRY) 33679352fb7aSAdrian Chadd sc->sc_stats.ast_tx_xretries++; 33689352fb7aSAdrian Chadd if (ts->ts_status & HAL_TXERR_FIFO) 33699352fb7aSAdrian Chadd sc->sc_stats.ast_tx_fifoerr++; 33709352fb7aSAdrian Chadd if (ts->ts_status & HAL_TXERR_FILT) 33719352fb7aSAdrian Chadd sc->sc_stats.ast_tx_filtered++; 33729352fb7aSAdrian Chadd if (ts->ts_status & HAL_TXERR_XTXOP) 33739352fb7aSAdrian Chadd sc->sc_stats.ast_tx_xtxop++; 33749352fb7aSAdrian Chadd if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) 33759352fb7aSAdrian Chadd sc->sc_stats.ast_tx_timerexpired++; 33769352fb7aSAdrian Chadd 33779352fb7aSAdrian Chadd if (ts->ts_status & HAL_TX_DATA_UNDERRUN) 33789352fb7aSAdrian Chadd sc->sc_stats.ast_tx_data_underrun++; 33799352fb7aSAdrian Chadd if (ts->ts_status & HAL_TX_DELIM_UNDERRUN) 33809352fb7aSAdrian Chadd sc->sc_stats.ast_tx_delim_underrun++; 33819352fb7aSAdrian Chadd 33829352fb7aSAdrian Chadd if (bf->bf_m->m_flags & M_FF) 33839352fb7aSAdrian Chadd sc->sc_stats.ast_ff_txerr++; 33849352fb7aSAdrian Chadd } 33859352fb7aSAdrian Chadd /* XXX when is this valid? */ 33869352fb7aSAdrian Chadd if (ts->ts_status & HAL_TX_DESC_CFG_ERR) 33879352fb7aSAdrian Chadd sc->sc_stats.ast_tx_desccfgerr++; 33889352fb7aSAdrian Chadd 33899352fb7aSAdrian Chadd sr = ts->ts_shortretry; 33909352fb7aSAdrian Chadd lr = ts->ts_longretry; 33919352fb7aSAdrian Chadd sc->sc_stats.ast_tx_shortretry += sr; 33929352fb7aSAdrian Chadd sc->sc_stats.ast_tx_longretry += lr; 33939352fb7aSAdrian Chadd 33949352fb7aSAdrian Chadd } 33959352fb7aSAdrian Chadd 33969352fb7aSAdrian Chadd /* 33979352fb7aSAdrian Chadd * The default completion. If fail is 1, this means 33989352fb7aSAdrian Chadd * "please don't retry the frame, and just return -1 status 33999352fb7aSAdrian Chadd * to the net80211 stack. 34009352fb7aSAdrian Chadd */ 34019352fb7aSAdrian Chadd void 34029352fb7aSAdrian Chadd ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 34039352fb7aSAdrian Chadd { 34049352fb7aSAdrian Chadd struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 34059352fb7aSAdrian Chadd int st; 34069352fb7aSAdrian Chadd 34079352fb7aSAdrian Chadd if (fail == 1) 34089352fb7aSAdrian Chadd st = -1; 34099352fb7aSAdrian Chadd else 3410875a9451SAdrian Chadd st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ? 34119352fb7aSAdrian Chadd ts->ts_status : HAL_TXERR_XRETRY; 34129352fb7aSAdrian Chadd 34139352fb7aSAdrian Chadd if (bf->bf_state.bfs_dobaw) 34149352fb7aSAdrian Chadd device_printf(sc->sc_dev, 3415a66d5089SAdrian Chadd "%s: bf %p: seqno %d: dobaw should've been cleared!\n", 3416a66d5089SAdrian Chadd __func__, 3417a66d5089SAdrian Chadd bf, 3418a66d5089SAdrian Chadd SEQNO(bf->bf_state.bfs_seqno)); 34199352fb7aSAdrian Chadd if (bf->bf_next != NULL) 34209352fb7aSAdrian Chadd device_printf(sc->sc_dev, 3421a66d5089SAdrian Chadd "%s: bf %p: seqno %d: bf_next not NULL!\n", 3422a66d5089SAdrian Chadd __func__, 3423a66d5089SAdrian Chadd bf, 3424a66d5089SAdrian Chadd SEQNO(bf->bf_state.bfs_seqno)); 34259352fb7aSAdrian Chadd 34269352fb7aSAdrian Chadd /* 34279352fb7aSAdrian Chadd * Do any tx complete callback. Note this must 34289352fb7aSAdrian Chadd * be done before releasing the node reference. 34299352fb7aSAdrian Chadd * This will free the mbuf, release the net80211 34309352fb7aSAdrian Chadd * node and recycle the ath_buf. 34319352fb7aSAdrian Chadd */ 34329352fb7aSAdrian Chadd ath_tx_freebuf(sc, bf, st); 34339352fb7aSAdrian Chadd } 34349352fb7aSAdrian Chadd 34359352fb7aSAdrian Chadd /* 3436eb6f0de0SAdrian Chadd * Update rate control with the given completion status. 3437eb6f0de0SAdrian Chadd */ 3438eb6f0de0SAdrian Chadd void 3439eb6f0de0SAdrian Chadd ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 3440eb6f0de0SAdrian Chadd struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, 3441eb6f0de0SAdrian Chadd int nframes, int nbad) 3442eb6f0de0SAdrian Chadd { 3443eb6f0de0SAdrian Chadd struct ath_node *an; 3444eb6f0de0SAdrian Chadd 3445eb6f0de0SAdrian Chadd /* Only for unicast frames */ 3446eb6f0de0SAdrian Chadd if (ni == NULL) 3447eb6f0de0SAdrian Chadd return; 3448eb6f0de0SAdrian Chadd 3449eb6f0de0SAdrian Chadd an = ATH_NODE(ni); 3450eb6f0de0SAdrian Chadd 3451eb6f0de0SAdrian Chadd if ((ts->ts_status & HAL_TXERR_FILT) == 0) { 3452eb6f0de0SAdrian Chadd ATH_NODE_LOCK(an); 3453eb6f0de0SAdrian Chadd ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad); 3454eb6f0de0SAdrian Chadd ATH_NODE_UNLOCK(an); 3455eb6f0de0SAdrian Chadd } 3456eb6f0de0SAdrian Chadd } 3457eb6f0de0SAdrian Chadd 3458eb6f0de0SAdrian Chadd /* 34599352fb7aSAdrian Chadd * Update the busy status of the last frame on the free list. 34609352fb7aSAdrian Chadd * When doing TDMA, the busy flag tracks whether the hardware 34619352fb7aSAdrian Chadd * currently points to this buffer or not, and thus gated DMA 34629352fb7aSAdrian Chadd * may restart by re-reading the last descriptor in this 34639352fb7aSAdrian Chadd * buffer. 34649352fb7aSAdrian Chadd * 34659352fb7aSAdrian Chadd * This should be called in the completion function once one 34669352fb7aSAdrian Chadd * of the buffers has been used. 34679352fb7aSAdrian Chadd */ 34689352fb7aSAdrian Chadd static void 34699352fb7aSAdrian Chadd ath_tx_update_busy(struct ath_softc *sc) 34709352fb7aSAdrian Chadd { 34719352fb7aSAdrian Chadd struct ath_buf *last; 34729352fb7aSAdrian Chadd 34739352fb7aSAdrian Chadd /* 34749352fb7aSAdrian Chadd * Since the last frame may still be marked 34759352fb7aSAdrian Chadd * as ATH_BUF_BUSY, unmark it here before 34769352fb7aSAdrian Chadd * finishing the frame processing. 34779352fb7aSAdrian Chadd * Since we've completed a frame (aggregate 34789352fb7aSAdrian Chadd * or otherwise), the hardware has moved on 34799352fb7aSAdrian Chadd * and is no longer referencing the previous 34809352fb7aSAdrian Chadd * descriptor. 34819352fb7aSAdrian Chadd */ 34829352fb7aSAdrian Chadd ATH_TXBUF_LOCK_ASSERT(sc); 3483af33d486SAdrian Chadd last = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s); 3484af33d486SAdrian Chadd if (last != NULL) 3485af33d486SAdrian Chadd last->bf_flags &= ~ATH_BUF_BUSY; 34869352fb7aSAdrian Chadd last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 34879352fb7aSAdrian Chadd if (last != NULL) 34889352fb7aSAdrian Chadd last->bf_flags &= ~ATH_BUF_BUSY; 34899352fb7aSAdrian Chadd } 34909352fb7aSAdrian Chadd 349168e8e04eSSam Leffler /* 3492c42a7b7eSSam Leffler * Process completed xmit descriptors from the specified queue. 3493eb6f0de0SAdrian Chadd * Kick the packet scheduler if needed. This can occur from this 3494eb6f0de0SAdrian Chadd * particular task. 3495c42a7b7eSSam Leffler */ 3496d7736e13SSam Leffler static int 349796ff485dSAdrian Chadd ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) 34985591b213SSam Leffler { 34995591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 35009352fb7aSAdrian Chadd struct ath_buf *bf; 35016edf1dc7SAdrian Chadd struct ath_desc *ds; 350265f9edeeSSam Leffler struct ath_tx_status *ts; 35035591b213SSam Leffler struct ieee80211_node *ni; 3504eb6f0de0SAdrian Chadd struct ath_node *an; 350553e98d5aSAdrian Chadd #ifdef IEEE80211_SUPPORT_SUPERG 350643faa6b2SAdrian Chadd struct ieee80211com *ic = sc->sc_ifp->if_l2com; 350753e98d5aSAdrian Chadd #endif /* IEEE80211_SUPPORT_SUPERG */ 35089352fb7aSAdrian Chadd int nacked; 35095591b213SSam Leffler HAL_STATUS status; 35105591b213SSam Leffler 3511c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 3512c42a7b7eSSam Leffler __func__, txq->axq_qnum, 3513c42a7b7eSSam Leffler (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 3514c42a7b7eSSam Leffler txq->axq_link); 3515d7736e13SSam Leffler nacked = 0; 35165591b213SSam Leffler for (;;) { 3517c42a7b7eSSam Leffler ATH_TXQ_LOCK(txq); 3518c42a7b7eSSam Leffler txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 35196b349e5aSAdrian Chadd bf = TAILQ_FIRST(&txq->axq_q); 35205591b213SSam Leffler if (bf == NULL) { 3521c42a7b7eSSam Leffler ATH_TXQ_UNLOCK(txq); 35225591b213SSam Leffler break; 35235591b213SSam Leffler } 35246edf1dc7SAdrian Chadd ds = bf->bf_lastds; /* XXX must be setup correctly! */ 352565f9edeeSSam Leffler ts = &bf->bf_status.ds_txstat; 352665f9edeeSSam Leffler status = ath_hal_txprocdesc(ah, ds, ts); 3527a585a9a1SSam Leffler #ifdef ATH_DEBUG 3528c42a7b7eSSam Leffler if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 35296902009eSSam Leffler ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 35306902009eSSam Leffler status == HAL_OK); 3531d6b20023SAdrian Chadd else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0)) { 3532d6b20023SAdrian Chadd ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 3533d6b20023SAdrian Chadd status == HAL_OK); 3534d6b20023SAdrian Chadd } 35355591b213SSam Leffler #endif 35365591b213SSam Leffler if (status == HAL_EINPROGRESS) { 3537c42a7b7eSSam Leffler ATH_TXQ_UNLOCK(txq); 35385591b213SSam Leffler break; 35395591b213SSam Leffler } 35406b349e5aSAdrian Chadd ATH_TXQ_REMOVE(txq, bf, bf_list); 3541584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 354210ad9a77SSam Leffler if (txq->axq_depth > 0) { 354310ad9a77SSam Leffler /* 354410ad9a77SSam Leffler * More frames follow. Mark the buffer busy 354510ad9a77SSam Leffler * so it's not re-used while the hardware may 354610ad9a77SSam Leffler * still re-read the link field in the descriptor. 35476edf1dc7SAdrian Chadd * 35486edf1dc7SAdrian Chadd * Use the last buffer in an aggregate as that 35496edf1dc7SAdrian Chadd * is where the hardware may be - intermediate 35506edf1dc7SAdrian Chadd * descriptors won't be "busy". 355110ad9a77SSam Leffler */ 35526edf1dc7SAdrian Chadd bf->bf_last->bf_flags |= ATH_BUF_BUSY; 355310ad9a77SSam Leffler } else 355410ad9a77SSam Leffler #else 3555ebecf802SSam Leffler if (txq->axq_depth == 0) 355610ad9a77SSam Leffler #endif 35571539af1eSSam Leffler txq->axq_link = NULL; 35586edf1dc7SAdrian Chadd if (bf->bf_state.bfs_aggr) 35596edf1dc7SAdrian Chadd txq->axq_aggr_depth--; 35605591b213SSam Leffler 35615591b213SSam Leffler ni = bf->bf_node; 3562c42a7b7eSSam Leffler /* 35639352fb7aSAdrian Chadd * If unicast frame was ack'd update RSSI, 356484784be1SSam Leffler * including the last rx time used to 356584784be1SSam Leffler * workaround phantom bmiss interrupts. 3566d7736e13SSam Leffler */ 35679352fb7aSAdrian Chadd if (ni != NULL && ts->ts_status == 0 && 3568875a9451SAdrian Chadd ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { 3569d7736e13SSam Leffler nacked++; 357084784be1SSam Leffler sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 357184784be1SSam Leffler ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 357284784be1SSam Leffler ts->ts_rssi); 357384784be1SSam Leffler } 35749352fb7aSAdrian Chadd ATH_TXQ_UNLOCK(txq); 35759352fb7aSAdrian Chadd 35769352fb7aSAdrian Chadd /* If unicast frame, update general statistics */ 35779352fb7aSAdrian Chadd if (ni != NULL) { 3578eb6f0de0SAdrian Chadd an = ATH_NODE(ni); 35799352fb7aSAdrian Chadd /* update statistics */ 35809352fb7aSAdrian Chadd ath_tx_update_stats(sc, ts, bf); 3581d7736e13SSam Leffler } 35829352fb7aSAdrian Chadd 35830a915fadSSam Leffler /* 35849352fb7aSAdrian Chadd * Call the completion handler. 35859352fb7aSAdrian Chadd * The completion handler is responsible for 35869352fb7aSAdrian Chadd * calling the rate control code. 35879352fb7aSAdrian Chadd * 35889352fb7aSAdrian Chadd * Frames with no completion handler get the 35899352fb7aSAdrian Chadd * rate control code called here. 359068e8e04eSSam Leffler */ 35919352fb7aSAdrian Chadd if (bf->bf_comp == NULL) { 35929352fb7aSAdrian Chadd if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 3593875a9451SAdrian Chadd (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) { 35949352fb7aSAdrian Chadd /* 35959352fb7aSAdrian Chadd * XXX assume this isn't an aggregate 35969352fb7aSAdrian Chadd * frame. 35979352fb7aSAdrian Chadd */ 3598eb6f0de0SAdrian Chadd ath_tx_update_ratectrl(sc, ni, 3599eb6f0de0SAdrian Chadd bf->bf_state.bfs_rc, ts, 3600eb6f0de0SAdrian Chadd bf->bf_state.bfs_pktlen, 1, 3601eb6f0de0SAdrian Chadd (ts->ts_status == 0 ? 0 : 1)); 36025591b213SSam Leffler } 36039352fb7aSAdrian Chadd ath_tx_default_comp(sc, bf, 0); 36049352fb7aSAdrian Chadd } else 36059352fb7aSAdrian Chadd bf->bf_comp(sc, bf, 0); 36065591b213SSam Leffler } 3607339ccfb3SSam Leffler #ifdef IEEE80211_SUPPORT_SUPERG 360868e8e04eSSam Leffler /* 360968e8e04eSSam Leffler * Flush fast-frame staging queue when traffic slows. 361068e8e04eSSam Leffler */ 361168e8e04eSSam Leffler if (txq->axq_depth <= 1) 361204f19fd6SSam Leffler ieee80211_ff_flush(ic, txq->axq_ac); 3613339ccfb3SSam Leffler #endif 3614eb6f0de0SAdrian Chadd 3615eb6f0de0SAdrian Chadd /* Kick the TXQ scheduler */ 3616eb6f0de0SAdrian Chadd if (dosched) { 3617eb6f0de0SAdrian Chadd ATH_TXQ_LOCK(txq); 3618eb6f0de0SAdrian Chadd ath_txq_sched(sc, txq); 3619eb6f0de0SAdrian Chadd ATH_TXQ_UNLOCK(txq); 3620eb6f0de0SAdrian Chadd } 3621eb6f0de0SAdrian Chadd 3622d7736e13SSam Leffler return nacked; 3623d7736e13SSam Leffler } 3624d7736e13SSam Leffler 36258f939e79SAdrian Chadd #define TXQACTIVE(t, q) ( (t) & (1 << (q))) 3626c42a7b7eSSam Leffler 3627c42a7b7eSSam Leffler /* 3628c42a7b7eSSam Leffler * Deferred processing of transmit interrupt; special-cased 3629c42a7b7eSSam Leffler * for a single hardware transmit queue (e.g. 5210 and 5211). 3630c42a7b7eSSam Leffler */ 3631c42a7b7eSSam Leffler static void 3632c42a7b7eSSam Leffler ath_tx_proc_q0(void *arg, int npending) 3633c42a7b7eSSam Leffler { 3634c42a7b7eSSam Leffler struct ath_softc *sc = arg; 3635fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 36368f939e79SAdrian Chadd uint32_t txqs; 3637c42a7b7eSSam Leffler 3638ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 3639ef27340cSAdrian Chadd sc->sc_txproc_cnt++; 36408f939e79SAdrian Chadd txqs = sc->sc_txq_active; 36418f939e79SAdrian Chadd sc->sc_txq_active &= ~txqs; 3642ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 36438f939e79SAdrian Chadd 364496ff485dSAdrian Chadd if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) 36458f939e79SAdrian Chadd /* XXX why is lastrx updated in tx code? */ 3646d7736e13SSam Leffler sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 36478f939e79SAdrian Chadd if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 364896ff485dSAdrian Chadd ath_tx_processq(sc, sc->sc_cabq, 1); 3649e4e7938aSAdrian Chadd IF_LOCK(&ifp->if_snd); 365013f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3651e4e7938aSAdrian Chadd IF_UNLOCK(&ifp->if_snd); 36522e986da5SSam Leffler sc->sc_wd_timer = 0; 36535591b213SSam Leffler 36543e50ec2cSSam Leffler if (sc->sc_softled) 365546d4d74cSSam Leffler ath_led_event(sc, sc->sc_txrix); 36563e50ec2cSSam Leffler 3657ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 3658ef27340cSAdrian Chadd sc->sc_txproc_cnt--; 3659ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 3660ef27340cSAdrian Chadd 366114d33c7eSAdrian Chadd ath_tx_kick(sc); 36625591b213SSam Leffler } 36635591b213SSam Leffler 36645591b213SSam Leffler /* 3665c42a7b7eSSam Leffler * Deferred processing of transmit interrupt; special-cased 3666c42a7b7eSSam Leffler * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 36675591b213SSam Leffler */ 36685591b213SSam Leffler static void 3669c42a7b7eSSam Leffler ath_tx_proc_q0123(void *arg, int npending) 3670c42a7b7eSSam Leffler { 3671c42a7b7eSSam Leffler struct ath_softc *sc = arg; 3672fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 3673d7736e13SSam Leffler int nacked; 36748f939e79SAdrian Chadd uint32_t txqs; 36758f939e79SAdrian Chadd 3676ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 3677ef27340cSAdrian Chadd sc->sc_txproc_cnt++; 36788f939e79SAdrian Chadd txqs = sc->sc_txq_active; 36798f939e79SAdrian Chadd sc->sc_txq_active &= ~txqs; 3680ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 3681c42a7b7eSSam Leffler 3682c42a7b7eSSam Leffler /* 3683c42a7b7eSSam Leffler * Process each active queue. 3684c42a7b7eSSam Leffler */ 3685d7736e13SSam Leffler nacked = 0; 36868f939e79SAdrian Chadd if (TXQACTIVE(txqs, 0)) 368796ff485dSAdrian Chadd nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); 36888f939e79SAdrian Chadd if (TXQACTIVE(txqs, 1)) 368996ff485dSAdrian Chadd nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); 36908f939e79SAdrian Chadd if (TXQACTIVE(txqs, 2)) 369196ff485dSAdrian Chadd nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); 36928f939e79SAdrian Chadd if (TXQACTIVE(txqs, 3)) 369396ff485dSAdrian Chadd nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); 36948f939e79SAdrian Chadd if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 369596ff485dSAdrian Chadd ath_tx_processq(sc, sc->sc_cabq, 1); 3696d7736e13SSam Leffler if (nacked) 3697d7736e13SSam Leffler sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 3698c42a7b7eSSam Leffler 3699e4e7938aSAdrian Chadd IF_LOCK(&ifp->if_snd); 370013f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3701e4e7938aSAdrian Chadd IF_UNLOCK(&ifp->if_snd); 37022e986da5SSam Leffler sc->sc_wd_timer = 0; 3703c42a7b7eSSam Leffler 37043e50ec2cSSam Leffler if (sc->sc_softled) 370546d4d74cSSam Leffler ath_led_event(sc, sc->sc_txrix); 37063e50ec2cSSam Leffler 3707ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 3708ef27340cSAdrian Chadd sc->sc_txproc_cnt--; 3709ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 3710ef27340cSAdrian Chadd 371114d33c7eSAdrian Chadd ath_tx_kick(sc); 3712c42a7b7eSSam Leffler } 3713c42a7b7eSSam Leffler 3714c42a7b7eSSam Leffler /* 3715c42a7b7eSSam Leffler * Deferred processing of transmit interrupt. 3716c42a7b7eSSam Leffler */ 3717c42a7b7eSSam Leffler static void 3718c42a7b7eSSam Leffler ath_tx_proc(void *arg, int npending) 3719c42a7b7eSSam Leffler { 3720c42a7b7eSSam Leffler struct ath_softc *sc = arg; 3721fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 3722d7736e13SSam Leffler int i, nacked; 37238f939e79SAdrian Chadd uint32_t txqs; 37248f939e79SAdrian Chadd 3725ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 3726ef27340cSAdrian Chadd sc->sc_txproc_cnt++; 37278f939e79SAdrian Chadd txqs = sc->sc_txq_active; 37288f939e79SAdrian Chadd sc->sc_txq_active &= ~txqs; 3729ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 3730c42a7b7eSSam Leffler 3731c42a7b7eSSam Leffler /* 3732c42a7b7eSSam Leffler * Process each active queue. 3733c42a7b7eSSam Leffler */ 3734d7736e13SSam Leffler nacked = 0; 3735c42a7b7eSSam Leffler for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 37368f939e79SAdrian Chadd if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) 373796ff485dSAdrian Chadd nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); 3738d7736e13SSam Leffler if (nacked) 3739d7736e13SSam Leffler sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 3740c42a7b7eSSam Leffler 3741ef27340cSAdrian Chadd /* XXX check this inside of IF_LOCK? */ 3742e4e7938aSAdrian Chadd IF_LOCK(&ifp->if_snd); 374313f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3744e4e7938aSAdrian Chadd IF_UNLOCK(&ifp->if_snd); 37452e986da5SSam Leffler sc->sc_wd_timer = 0; 3746c42a7b7eSSam Leffler 37473e50ec2cSSam Leffler if (sc->sc_softled) 374846d4d74cSSam Leffler ath_led_event(sc, sc->sc_txrix); 37493e50ec2cSSam Leffler 3750ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 3751ef27340cSAdrian Chadd sc->sc_txproc_cnt--; 3752ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 3753ef27340cSAdrian Chadd 375414d33c7eSAdrian Chadd ath_tx_kick(sc); 3755c42a7b7eSSam Leffler } 375616d4de92SAdrian Chadd #undef TXQACTIVE 3757c42a7b7eSSam Leffler 37589352fb7aSAdrian Chadd /* 375903e9308fSAdrian Chadd * Deferred processing of TXQ rescheduling. 376003e9308fSAdrian Chadd */ 376103e9308fSAdrian Chadd static void 376203e9308fSAdrian Chadd ath_txq_sched_tasklet(void *arg, int npending) 376303e9308fSAdrian Chadd { 376403e9308fSAdrian Chadd struct ath_softc *sc = arg; 376503e9308fSAdrian Chadd int i; 376603e9308fSAdrian Chadd 376703e9308fSAdrian Chadd /* XXX is skipping ok? */ 376803e9308fSAdrian Chadd ATH_PCU_LOCK(sc); 376903e9308fSAdrian Chadd #if 0 377003e9308fSAdrian Chadd if (sc->sc_inreset_cnt > 0) { 377103e9308fSAdrian Chadd device_printf(sc->sc_dev, 377203e9308fSAdrian Chadd "%s: sc_inreset_cnt > 0; skipping\n", __func__); 377303e9308fSAdrian Chadd ATH_PCU_UNLOCK(sc); 377403e9308fSAdrian Chadd return; 377503e9308fSAdrian Chadd } 377603e9308fSAdrian Chadd #endif 377703e9308fSAdrian Chadd sc->sc_txproc_cnt++; 377803e9308fSAdrian Chadd ATH_PCU_UNLOCK(sc); 377903e9308fSAdrian Chadd 378003e9308fSAdrian Chadd for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 3781b5a9dfd5SAdrian Chadd if (ATH_TXQ_SETUP(sc, i)) { 3782b5a9dfd5SAdrian Chadd ATH_TXQ_LOCK(&sc->sc_txq[i]); 378303e9308fSAdrian Chadd ath_txq_sched(sc, &sc->sc_txq[i]); 3784b5a9dfd5SAdrian Chadd ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 3785b5a9dfd5SAdrian Chadd } 378603e9308fSAdrian Chadd } 378703e9308fSAdrian Chadd 378803e9308fSAdrian Chadd ATH_PCU_LOCK(sc); 378903e9308fSAdrian Chadd sc->sc_txproc_cnt--; 379003e9308fSAdrian Chadd ATH_PCU_UNLOCK(sc); 379103e9308fSAdrian Chadd } 379203e9308fSAdrian Chadd 3793e1a50456SAdrian Chadd void 3794e1a50456SAdrian Chadd ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf) 3795e1a50456SAdrian Chadd { 3796e1a50456SAdrian Chadd 3797e1a50456SAdrian Chadd ATH_TXBUF_LOCK_ASSERT(sc); 3798e1a50456SAdrian Chadd 3799af33d486SAdrian Chadd if (bf->bf_flags & ATH_BUF_MGMT) 3800af33d486SAdrian Chadd TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list); 380123ced6c1SAdrian Chadd else { 3802e1a50456SAdrian Chadd TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 380323ced6c1SAdrian Chadd sc->sc_txbuf_cnt++; 380423ced6c1SAdrian Chadd if (sc->sc_txbuf_cnt > ath_txbuf) { 380523ced6c1SAdrian Chadd device_printf(sc->sc_dev, 380623ced6c1SAdrian Chadd "%s: sc_txbuf_cnt > %d?\n", 380723ced6c1SAdrian Chadd __func__, 380823ced6c1SAdrian Chadd ath_txbuf); 380923ced6c1SAdrian Chadd sc->sc_txbuf_cnt = ath_txbuf; 381023ced6c1SAdrian Chadd } 381123ced6c1SAdrian Chadd } 3812e1a50456SAdrian Chadd } 3813e1a50456SAdrian Chadd 3814e1a50456SAdrian Chadd void 3815e1a50456SAdrian Chadd ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf) 3816e1a50456SAdrian Chadd { 3817e1a50456SAdrian Chadd 3818e1a50456SAdrian Chadd ATH_TXBUF_LOCK_ASSERT(sc); 3819e1a50456SAdrian Chadd 3820af33d486SAdrian Chadd if (bf->bf_flags & ATH_BUF_MGMT) 3821af33d486SAdrian Chadd TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list); 382223ced6c1SAdrian Chadd else { 3823e1a50456SAdrian Chadd TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 382423ced6c1SAdrian Chadd sc->sc_txbuf_cnt++; 382523ced6c1SAdrian Chadd if (sc->sc_txbuf_cnt > ATH_TXBUF) { 382623ced6c1SAdrian Chadd device_printf(sc->sc_dev, 382723ced6c1SAdrian Chadd "%s: sc_txbuf_cnt > %d?\n", 382823ced6c1SAdrian Chadd __func__, 382923ced6c1SAdrian Chadd ATH_TXBUF); 383023ced6c1SAdrian Chadd sc->sc_txbuf_cnt = ATH_TXBUF; 383123ced6c1SAdrian Chadd } 383223ced6c1SAdrian Chadd } 3833e1a50456SAdrian Chadd } 3834e1a50456SAdrian Chadd 383503e9308fSAdrian Chadd /* 38369352fb7aSAdrian Chadd * Return a buffer to the pool and update the 'busy' flag on the 38379352fb7aSAdrian Chadd * previous 'tail' entry. 38389352fb7aSAdrian Chadd * 38399352fb7aSAdrian Chadd * This _must_ only be called when the buffer is involved in a completed 38409352fb7aSAdrian Chadd * TX. The logic is that if it was part of an active TX, the previous 38419352fb7aSAdrian Chadd * buffer on the list is now not involved in a halted TX DMA queue, waiting 38429352fb7aSAdrian Chadd * for restart (eg for TDMA.) 38439352fb7aSAdrian Chadd * 38449352fb7aSAdrian Chadd * The caller must free the mbuf and recycle the node reference. 38459352fb7aSAdrian Chadd */ 38469352fb7aSAdrian Chadd void 38479352fb7aSAdrian Chadd ath_freebuf(struct ath_softc *sc, struct ath_buf *bf) 38489352fb7aSAdrian Chadd { 38499352fb7aSAdrian Chadd bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 38509352fb7aSAdrian Chadd bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); 38519352fb7aSAdrian Chadd 38529352fb7aSAdrian Chadd KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__)); 38539352fb7aSAdrian Chadd KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__)); 38549352fb7aSAdrian Chadd 38559352fb7aSAdrian Chadd ATH_TXBUF_LOCK(sc); 38569352fb7aSAdrian Chadd ath_tx_update_busy(sc); 3857e1a50456SAdrian Chadd ath_returnbuf_tail(sc, bf); 38589352fb7aSAdrian Chadd ATH_TXBUF_UNLOCK(sc); 38599352fb7aSAdrian Chadd } 38609352fb7aSAdrian Chadd 38619352fb7aSAdrian Chadd /* 38629352fb7aSAdrian Chadd * This is currently used by ath_tx_draintxq() and 38639352fb7aSAdrian Chadd * ath_tx_tid_free_pkts(). 38649352fb7aSAdrian Chadd * 38659352fb7aSAdrian Chadd * It recycles a single ath_buf. 38669352fb7aSAdrian Chadd */ 38679352fb7aSAdrian Chadd void 38689352fb7aSAdrian Chadd ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status) 38699352fb7aSAdrian Chadd { 38709352fb7aSAdrian Chadd struct ieee80211_node *ni = bf->bf_node; 38719352fb7aSAdrian Chadd struct mbuf *m0 = bf->bf_m; 38729352fb7aSAdrian Chadd 38739352fb7aSAdrian Chadd bf->bf_node = NULL; 38749352fb7aSAdrian Chadd bf->bf_m = NULL; 38759352fb7aSAdrian Chadd 38769352fb7aSAdrian Chadd /* Free the buffer, it's not needed any longer */ 38779352fb7aSAdrian Chadd ath_freebuf(sc, bf); 38789352fb7aSAdrian Chadd 38799352fb7aSAdrian Chadd if (ni != NULL) { 38809352fb7aSAdrian Chadd /* 38819352fb7aSAdrian Chadd * Do any callback and reclaim the node reference. 38829352fb7aSAdrian Chadd */ 38839352fb7aSAdrian Chadd if (m0->m_flags & M_TXCB) 38849352fb7aSAdrian Chadd ieee80211_process_callback(ni, m0, status); 38859352fb7aSAdrian Chadd ieee80211_free_node(ni); 38869352fb7aSAdrian Chadd } 38879352fb7aSAdrian Chadd m_freem(m0); 38889352fb7aSAdrian Chadd 38899352fb7aSAdrian Chadd /* 38909352fb7aSAdrian Chadd * XXX the buffer used to be freed -after-, but the DMA map was 38919352fb7aSAdrian Chadd * freed where ath_freebuf() now is. I've no idea what this 38929352fb7aSAdrian Chadd * will do. 38939352fb7aSAdrian Chadd */ 38949352fb7aSAdrian Chadd } 38959352fb7aSAdrian Chadd 38969352fb7aSAdrian Chadd void 3897c42a7b7eSSam Leffler ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 38985591b213SSam Leffler { 3899a585a9a1SSam Leffler #ifdef ATH_DEBUG 39005591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 3901d2f6ed15SSam Leffler #endif 39025591b213SSam Leffler struct ath_buf *bf; 39037a4c5ed9SSam Leffler u_int ix; 39045591b213SSam Leffler 3905c42a7b7eSSam Leffler /* 3906c42a7b7eSSam Leffler * NB: this assumes output has been stopped and 39075d61b5e8SSam Leffler * we do not need to block ath_tx_proc 3908c42a7b7eSSam Leffler */ 390910ad9a77SSam Leffler ATH_TXBUF_LOCK(sc); 39106b349e5aSAdrian Chadd bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 391110ad9a77SSam Leffler if (bf != NULL) 391210ad9a77SSam Leffler bf->bf_flags &= ~ATH_BUF_BUSY; 3913af33d486SAdrian Chadd bf = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s); 3914af33d486SAdrian Chadd if (bf != NULL) 3915af33d486SAdrian Chadd bf->bf_flags &= ~ATH_BUF_BUSY; 391610ad9a77SSam Leffler ATH_TXBUF_UNLOCK(sc); 39179352fb7aSAdrian Chadd 39187a4c5ed9SSam Leffler for (ix = 0;; ix++) { 3919c42a7b7eSSam Leffler ATH_TXQ_LOCK(txq); 39206b349e5aSAdrian Chadd bf = TAILQ_FIRST(&txq->axq_q); 39215591b213SSam Leffler if (bf == NULL) { 3922ebecf802SSam Leffler txq->axq_link = NULL; 3923c42a7b7eSSam Leffler ATH_TXQ_UNLOCK(txq); 39245591b213SSam Leffler break; 39255591b213SSam Leffler } 39266b349e5aSAdrian Chadd ATH_TXQ_REMOVE(txq, bf, bf_list); 39276edf1dc7SAdrian Chadd if (bf->bf_state.bfs_aggr) 39286edf1dc7SAdrian Chadd txq->axq_aggr_depth--; 3929a585a9a1SSam Leffler #ifdef ATH_DEBUG 39304a3ac3fcSSam Leffler if (sc->sc_debug & ATH_DEBUG_RESET) { 3931b032f27cSSam Leffler struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3932b032f27cSSam Leffler 39336902009eSSam Leffler ath_printtxbuf(sc, bf, txq->axq_qnum, ix, 39346edf1dc7SAdrian Chadd ath_hal_txprocdesc(ah, bf->bf_lastds, 393565f9edeeSSam Leffler &bf->bf_status.ds_txstat) == HAL_OK); 3936e40b6ab1SSam Leffler ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), 39374a3ac3fcSSam Leffler bf->bf_m->m_len, 0, -1); 39384a3ac3fcSSam Leffler } 3939a585a9a1SSam Leffler #endif /* ATH_DEBUG */ 394023428eafSSam Leffler /* 39419352fb7aSAdrian Chadd * Since we're now doing magic in the completion 39429352fb7aSAdrian Chadd * functions, we -must- call it for aggregation 39439352fb7aSAdrian Chadd * destinations or BAW tracking will get upset. 394423428eafSSam Leffler */ 39459352fb7aSAdrian Chadd /* 39469352fb7aSAdrian Chadd * Clear ATH_BUF_BUSY; the completion handler 39479352fb7aSAdrian Chadd * will free the buffer. 39489352fb7aSAdrian Chadd */ 39499352fb7aSAdrian Chadd ATH_TXQ_UNLOCK(txq); 395010ad9a77SSam Leffler bf->bf_flags &= ~ATH_BUF_BUSY; 39519352fb7aSAdrian Chadd if (bf->bf_comp) 39529352fb7aSAdrian Chadd bf->bf_comp(sc, bf, 1); 39539352fb7aSAdrian Chadd else 39549352fb7aSAdrian Chadd ath_tx_default_comp(sc, bf, 1); 39555591b213SSam Leffler } 39569352fb7aSAdrian Chadd 3957eb6f0de0SAdrian Chadd /* 3958eb6f0de0SAdrian Chadd * Drain software queued frames which are on 3959eb6f0de0SAdrian Chadd * active TIDs. 3960eb6f0de0SAdrian Chadd */ 3961eb6f0de0SAdrian Chadd ath_tx_txq_drain(sc, txq); 3962c42a7b7eSSam Leffler } 3963c42a7b7eSSam Leffler 3964c42a7b7eSSam Leffler static void 3965c42a7b7eSSam Leffler ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 3966c42a7b7eSSam Leffler { 3967c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 3968c42a7b7eSSam Leffler 3969c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 3970c42a7b7eSSam Leffler __func__, txq->axq_qnum, 39716891c875SPeter Wemm (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 39726891c875SPeter Wemm txq->axq_link); 39734a3ac3fcSSam Leffler (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 3974c42a7b7eSSam Leffler } 3975c42a7b7eSSam Leffler 39762d433424SAdrian Chadd static int 39772d433424SAdrian Chadd ath_stoptxdma(struct ath_softc *sc) 3978c42a7b7eSSam Leffler { 3979c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 3980c42a7b7eSSam Leffler int i; 3981c42a7b7eSSam Leffler 3982c42a7b7eSSam Leffler /* XXX return value */ 39832d433424SAdrian Chadd if (sc->sc_invalid) 39842d433424SAdrian Chadd return 0; 39852d433424SAdrian Chadd 3986c42a7b7eSSam Leffler if (!sc->sc_invalid) { 3987c42a7b7eSSam Leffler /* don't touch the hardware if marked invalid */ 39884a3ac3fcSSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 39894a3ac3fcSSam Leffler __func__, sc->sc_bhalq, 39904a3ac3fcSSam Leffler (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 39914a3ac3fcSSam Leffler NULL); 3992c42a7b7eSSam Leffler (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 3993c42a7b7eSSam Leffler for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3994c42a7b7eSSam Leffler if (ATH_TXQ_SETUP(sc, i)) 3995c42a7b7eSSam Leffler ath_tx_stopdma(sc, &sc->sc_txq[i]); 3996c42a7b7eSSam Leffler } 39972d433424SAdrian Chadd 39982d433424SAdrian Chadd return 1; 39992d433424SAdrian Chadd } 40002d433424SAdrian Chadd 40012d433424SAdrian Chadd /* 40022d433424SAdrian Chadd * Drain the transmit queues and reclaim resources. 40032d433424SAdrian Chadd */ 40042d433424SAdrian Chadd static void 40052d433424SAdrian Chadd ath_draintxq(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 40062d433424SAdrian Chadd { 40072d433424SAdrian Chadd #ifdef ATH_DEBUG 40082d433424SAdrian Chadd struct ath_hal *ah = sc->sc_ah; 40092d433424SAdrian Chadd #endif 40102d433424SAdrian Chadd struct ifnet *ifp = sc->sc_ifp; 40112d433424SAdrian Chadd int i; 40122d433424SAdrian Chadd 40132d433424SAdrian Chadd (void) ath_stoptxdma(sc); 40142d433424SAdrian Chadd 4015ef27340cSAdrian Chadd for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 4016ef27340cSAdrian Chadd /* 4017ef27340cSAdrian Chadd * XXX TODO: should we just handle the completed TX frames 4018ef27340cSAdrian Chadd * here, whether or not the reset is a full one or not? 4019ef27340cSAdrian Chadd */ 4020ef27340cSAdrian Chadd if (ATH_TXQ_SETUP(sc, i)) { 4021ef27340cSAdrian Chadd if (reset_type == ATH_RESET_NOLOSS) 4022ef27340cSAdrian Chadd ath_tx_processq(sc, &sc->sc_txq[i], 0); 4023ef27340cSAdrian Chadd else 4024c42a7b7eSSam Leffler ath_tx_draintxq(sc, &sc->sc_txq[i]); 4025ef27340cSAdrian Chadd } 4026ef27340cSAdrian Chadd } 40274a3ac3fcSSam Leffler #ifdef ATH_DEBUG 40284a3ac3fcSSam Leffler if (sc->sc_debug & ATH_DEBUG_RESET) { 40296b349e5aSAdrian Chadd struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); 40304a3ac3fcSSam Leffler if (bf != NULL && bf->bf_m != NULL) { 40316902009eSSam Leffler ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, 40326edf1dc7SAdrian Chadd ath_hal_txprocdesc(ah, bf->bf_lastds, 403365f9edeeSSam Leffler &bf->bf_status.ds_txstat) == HAL_OK); 4034e40b6ab1SSam Leffler ieee80211_dump_pkt(ifp->if_l2com, 4035e40b6ab1SSam Leffler mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 4036e40b6ab1SSam Leffler 0, -1); 40374a3ac3fcSSam Leffler } 40384a3ac3fcSSam Leffler } 40394a3ac3fcSSam Leffler #endif /* ATH_DEBUG */ 4040e4e7938aSAdrian Chadd IF_LOCK(&ifp->if_snd); 404113f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4042e4e7938aSAdrian Chadd IF_UNLOCK(&ifp->if_snd); 40432e986da5SSam Leffler sc->sc_wd_timer = 0; 40445591b213SSam Leffler } 40455591b213SSam Leffler 40465591b213SSam Leffler /* 4047c42a7b7eSSam Leffler * Update internal state after a channel change. 4048c42a7b7eSSam Leffler */ 4049c42a7b7eSSam Leffler static void 4050c42a7b7eSSam Leffler ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 4051c42a7b7eSSam Leffler { 4052c42a7b7eSSam Leffler enum ieee80211_phymode mode; 4053c42a7b7eSSam Leffler 4054c42a7b7eSSam Leffler /* 4055c42a7b7eSSam Leffler * Change channels and update the h/w rate map 4056c42a7b7eSSam Leffler * if we're switching; e.g. 11a to 11b/g. 4057c42a7b7eSSam Leffler */ 405868e8e04eSSam Leffler mode = ieee80211_chan2mode(chan); 4059c42a7b7eSSam Leffler if (mode != sc->sc_curmode) 4060c42a7b7eSSam Leffler ath_setcurmode(sc, mode); 406159efa8b5SSam Leffler sc->sc_curchan = chan; 4062c42a7b7eSSam Leffler } 4063c42a7b7eSSam Leffler 4064c42a7b7eSSam Leffler /* 40655591b213SSam Leffler * Set/change channels. If the channel is really being changed, 40664fa8d4efSDaniel Eischen * it's done by resetting the chip. To accomplish this we must 40675591b213SSam Leffler * first cleanup any pending DMA, then restart stuff after a la 40685591b213SSam Leffler * ath_init. 40695591b213SSam Leffler */ 40705591b213SSam Leffler static int 40715591b213SSam Leffler ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 40725591b213SSam Leffler { 4073b032f27cSSam Leffler struct ifnet *ifp = sc->sc_ifp; 4074b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 40755591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 4076ef27340cSAdrian Chadd int ret = 0; 4077ef27340cSAdrian Chadd 4078ef27340cSAdrian Chadd /* Treat this as an interface reset */ 4079d52f7132SAdrian Chadd ATH_PCU_UNLOCK_ASSERT(sc); 4080d52f7132SAdrian Chadd ATH_UNLOCK_ASSERT(sc); 4081d52f7132SAdrian Chadd 4082d52f7132SAdrian Chadd /* (Try to) stop TX/RX from occuring */ 4083d52f7132SAdrian Chadd taskqueue_block(sc->sc_tq); 4084d52f7132SAdrian Chadd 4085ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 4086e78719adSAdrian Chadd ath_hal_intrset(ah, 0); /* Stop new RX/TX completion */ 4087e78719adSAdrian Chadd ath_txrx_stop_locked(sc); /* Stop pending RX/TX completion */ 4088ee321975SAdrian Chadd if (ath_reset_grablock(sc, 1) == 0) { 4089ee321975SAdrian Chadd device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 4090ef27340cSAdrian Chadd __func__); 4091ee321975SAdrian Chadd } 4092ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 4093c42a7b7eSSam Leffler 409459efa8b5SSam Leffler DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", 409559efa8b5SSam Leffler __func__, ieee80211_chan2ieee(ic, chan), 409659efa8b5SSam Leffler chan->ic_freq, chan->ic_flags); 409759efa8b5SSam Leffler if (chan != sc->sc_curchan) { 4098c42a7b7eSSam Leffler HAL_STATUS status; 40995591b213SSam Leffler /* 41005591b213SSam Leffler * To switch channels clear any pending DMA operations; 41015591b213SSam Leffler * wait long enough for the RX fifo to drain, reset the 41025591b213SSam Leffler * hardware at the new frequency, and then re-enable 41035591b213SSam Leffler * the relevant bits of the h/w. 41045591b213SSam Leffler */ 4105ef27340cSAdrian Chadd #if 0 41065591b213SSam Leffler ath_hal_intrset(ah, 0); /* disable interrupts */ 4107ef27340cSAdrian Chadd #endif 41089a842e8bSAdrian Chadd ath_stoprecv(sc, 1); /* turn off frame recv */ 41099a842e8bSAdrian Chadd /* 41109a842e8bSAdrian Chadd * First, handle completed TX/RX frames. 41119a842e8bSAdrian Chadd */ 4112f8cc9b09SAdrian Chadd ath_rx_flush(sc); 41139a842e8bSAdrian Chadd ath_draintxq(sc, ATH_RESET_NOLOSS); 41149a842e8bSAdrian Chadd /* 41159a842e8bSAdrian Chadd * Next, flush the non-scheduled frames. 41169a842e8bSAdrian Chadd */ 4117517526efSAdrian Chadd ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */ 41189a842e8bSAdrian Chadd 411959efa8b5SSam Leffler if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) { 4120b032f27cSSam Leffler if_printf(ifp, "%s: unable to reset " 412179649302SGavin Atkinson "channel %u (%u MHz, flags 0x%x), hal status %u\n", 412259efa8b5SSam Leffler __func__, ieee80211_chan2ieee(ic, chan), 412359efa8b5SSam Leffler chan->ic_freq, chan->ic_flags, status); 4124ef27340cSAdrian Chadd ret = EIO; 4125ef27340cSAdrian Chadd goto finish; 41265591b213SSam Leffler } 4127c59005e9SSam Leffler sc->sc_diversity = ath_hal_getdiversity(ah); 4128c42a7b7eSSam Leffler 412948237774SAdrian Chadd /* Let DFS at it in case it's a DFS channel */ 4130398bca2eSAdrian Chadd ath_dfs_radar_enable(sc, chan); 413148237774SAdrian Chadd 41325591b213SSam Leffler /* 41335591b213SSam Leffler * Re-enable rx framework. 41345591b213SSam Leffler */ 41355591b213SSam Leffler if (ath_startrecv(sc) != 0) { 4136b032f27cSSam Leffler if_printf(ifp, "%s: unable to restart recv logic\n", 4137b032f27cSSam Leffler __func__); 4138ef27340cSAdrian Chadd ret = EIO; 4139ef27340cSAdrian Chadd goto finish; 41405591b213SSam Leffler } 41415591b213SSam Leffler 41425591b213SSam Leffler /* 41435591b213SSam Leffler * Change channels and update the h/w rate map 41445591b213SSam Leffler * if we're switching; e.g. 11a to 11b/g. 41455591b213SSam Leffler */ 4146c42a7b7eSSam Leffler ath_chan_change(sc, chan); 41470a915fadSSam Leffler 41480a915fadSSam Leffler /* 41492fd9aabbSAdrian Chadd * Reset clears the beacon timers; reset them 41502fd9aabbSAdrian Chadd * here if needed. 41512fd9aabbSAdrian Chadd */ 41522fd9aabbSAdrian Chadd if (sc->sc_beacons) { /* restart beacons */ 41532fd9aabbSAdrian Chadd #ifdef IEEE80211_SUPPORT_TDMA 41542fd9aabbSAdrian Chadd if (sc->sc_tdma) 41552fd9aabbSAdrian Chadd ath_tdma_config(sc, NULL); 41562fd9aabbSAdrian Chadd else 41572fd9aabbSAdrian Chadd #endif 41582fd9aabbSAdrian Chadd ath_beacon_config(sc, NULL); 41592fd9aabbSAdrian Chadd } 41602fd9aabbSAdrian Chadd 41612fd9aabbSAdrian Chadd /* 41620a915fadSSam Leffler * Re-enable interrupts. 41630a915fadSSam Leffler */ 4164e78719adSAdrian Chadd #if 0 41650a915fadSSam Leffler ath_hal_intrset(ah, sc->sc_imask); 4166ef27340cSAdrian Chadd #endif 41675591b213SSam Leffler } 4168ef27340cSAdrian Chadd 4169ef27340cSAdrian Chadd finish: 4170ef27340cSAdrian Chadd ATH_PCU_LOCK(sc); 4171ef27340cSAdrian Chadd sc->sc_inreset_cnt--; 4172ef27340cSAdrian Chadd /* XXX only do this if sc_inreset_cnt == 0? */ 4173ef27340cSAdrian Chadd ath_hal_intrset(ah, sc->sc_imask); 4174ef27340cSAdrian Chadd ATH_PCU_UNLOCK(sc); 4175ef27340cSAdrian Chadd 4176e4e7938aSAdrian Chadd IF_LOCK(&ifp->if_snd); 4177ef27340cSAdrian Chadd ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4178e4e7938aSAdrian Chadd IF_UNLOCK(&ifp->if_snd); 4179ef27340cSAdrian Chadd ath_txrx_start(sc); 4180ef27340cSAdrian Chadd /* XXX ath_start? */ 4181ef27340cSAdrian Chadd 4182ef27340cSAdrian Chadd return ret; 41835591b213SSam Leffler } 41845591b213SSam Leffler 41855591b213SSam Leffler /* 41865591b213SSam Leffler * Periodically recalibrate the PHY to account 41875591b213SSam Leffler * for temperature/environment changes. 41885591b213SSam Leffler */ 41895591b213SSam Leffler static void 41905591b213SSam Leffler ath_calibrate(void *arg) 41915591b213SSam Leffler { 41925591b213SSam Leffler struct ath_softc *sc = arg; 41935591b213SSam Leffler struct ath_hal *ah = sc->sc_ah; 41942dc7fcc4SSam Leffler struct ifnet *ifp = sc->sc_ifp; 41958d91de92SSam Leffler struct ieee80211com *ic = ifp->if_l2com; 41962dc7fcc4SSam Leffler HAL_BOOL longCal, isCalDone; 4197a108ab63SAdrian Chadd HAL_BOOL aniCal, shortCal = AH_FALSE; 41982dc7fcc4SSam Leffler int nextcal; 41995591b213SSam Leffler 42008d91de92SSam Leffler if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ 42018d91de92SSam Leffler goto restart; 42022dc7fcc4SSam Leffler longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); 4203a108ab63SAdrian Chadd aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); 4204a108ab63SAdrian Chadd if (sc->sc_doresetcal) 4205a108ab63SAdrian Chadd shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); 4206a108ab63SAdrian Chadd 4207a108ab63SAdrian Chadd DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); 4208a108ab63SAdrian Chadd if (aniCal) { 4209a108ab63SAdrian Chadd sc->sc_stats.ast_ani_cal++; 4210a108ab63SAdrian Chadd sc->sc_lastani = ticks; 4211a108ab63SAdrian Chadd ath_hal_ani_poll(ah, sc->sc_curchan); 4212a108ab63SAdrian Chadd } 4213a108ab63SAdrian Chadd 42142dc7fcc4SSam Leffler if (longCal) { 42155591b213SSam Leffler sc->sc_stats.ast_per_cal++; 42168197f57eSAdrian Chadd sc->sc_lastlongcal = ticks; 42175591b213SSam Leffler if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 42185591b213SSam Leffler /* 42195591b213SSam Leffler * Rfgain is out of bounds, reset the chip 42205591b213SSam Leffler * to load new gain values. 42215591b213SSam Leffler */ 4222370572d9SSam Leffler DPRINTF(sc, ATH_DEBUG_CALIBRATE, 4223370572d9SSam Leffler "%s: rfgain change\n", __func__); 42245591b213SSam Leffler sc->sc_stats.ast_per_rfgain++; 4225ef27340cSAdrian Chadd sc->sc_resetcal = 0; 4226ef27340cSAdrian Chadd sc->sc_doresetcal = AH_TRUE; 4227d52f7132SAdrian Chadd taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 4228d52f7132SAdrian Chadd callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 4229ef27340cSAdrian Chadd return; 42305591b213SSam Leffler } 42312dc7fcc4SSam Leffler /* 42322dc7fcc4SSam Leffler * If this long cal is after an idle period, then 42332dc7fcc4SSam Leffler * reset the data collection state so we start fresh. 42342dc7fcc4SSam Leffler */ 42352dc7fcc4SSam Leffler if (sc->sc_resetcal) { 423659efa8b5SSam Leffler (void) ath_hal_calreset(ah, sc->sc_curchan); 42372dc7fcc4SSam Leffler sc->sc_lastcalreset = ticks; 4238a108ab63SAdrian Chadd sc->sc_lastshortcal = ticks; 42392dc7fcc4SSam Leffler sc->sc_resetcal = 0; 4240a108ab63SAdrian Chadd sc->sc_doresetcal = AH_TRUE; 42412dc7fcc4SSam Leffler } 42422dc7fcc4SSam Leffler } 4243a108ab63SAdrian Chadd 4244a108ab63SAdrian Chadd /* Only call if we're doing a short/long cal, not for ANI calibration */ 4245a108ab63SAdrian Chadd if (shortCal || longCal) { 424659efa8b5SSam Leffler if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { 42472dc7fcc4SSam Leffler if (longCal) { 42482dc7fcc4SSam Leffler /* 42492dc7fcc4SSam Leffler * Calibrate noise floor data again in case of change. 42502dc7fcc4SSam Leffler */ 42512dc7fcc4SSam Leffler ath_hal_process_noisefloor(ah); 42522dc7fcc4SSam Leffler } 42532dc7fcc4SSam Leffler } else { 4254c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, 4255c42a7b7eSSam Leffler "%s: calibration of channel %u failed\n", 425659efa8b5SSam Leffler __func__, sc->sc_curchan->ic_freq); 42575591b213SSam Leffler sc->sc_stats.ast_per_calfail++; 42585591b213SSam Leffler } 4259a108ab63SAdrian Chadd if (shortCal) 4260a108ab63SAdrian Chadd sc->sc_lastshortcal = ticks; 4261a108ab63SAdrian Chadd } 42622dc7fcc4SSam Leffler if (!isCalDone) { 42638d91de92SSam Leffler restart: 42647b0c77ecSSam Leffler /* 42652dc7fcc4SSam Leffler * Use a shorter interval to potentially collect multiple 42662dc7fcc4SSam Leffler * data samples required to complete calibration. Once 42672dc7fcc4SSam Leffler * we're told the work is done we drop back to a longer 42682dc7fcc4SSam Leffler * interval between requests. We're more aggressive doing 42692dc7fcc4SSam Leffler * work when operating as an AP to improve operation right 42702dc7fcc4SSam Leffler * after startup. 42717b0c77ecSSam Leffler */ 4272a108ab63SAdrian Chadd sc->sc_lastshortcal = ticks; 4273a108ab63SAdrian Chadd nextcal = ath_shortcalinterval*hz/1000; 42742dc7fcc4SSam Leffler if (sc->sc_opmode != HAL_M_HOSTAP) 42752dc7fcc4SSam Leffler nextcal *= 10; 4276a108ab63SAdrian Chadd sc->sc_doresetcal = AH_TRUE; 42772dc7fcc4SSam Leffler } else { 4278a108ab63SAdrian Chadd /* nextcal should be the shortest time for next event */ 42792dc7fcc4SSam Leffler nextcal = ath_longcalinterval*hz; 42802dc7fcc4SSam Leffler if (sc->sc_lastcalreset == 0) 42812dc7fcc4SSam Leffler sc->sc_lastcalreset = sc->sc_lastlongcal; 42822dc7fcc4SSam Leffler else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) 42832dc7fcc4SSam Leffler sc->sc_resetcal = 1; /* setup reset next trip */ 4284a108ab63SAdrian Chadd sc->sc_doresetcal = AH_FALSE; 4285bd5a9920SSam Leffler } 4286a108ab63SAdrian Chadd /* ANI calibration may occur more often than short/long/resetcal */ 4287a108ab63SAdrian Chadd if (ath_anicalinterval > 0) 4288a108ab63SAdrian Chadd nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); 4289bd5a9920SSam Leffler 42902dc7fcc4SSam Leffler if (nextcal != 0) { 42912dc7fcc4SSam Leffler DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", 42922dc7fcc4SSam Leffler __func__, nextcal, isCalDone ? "" : "!"); 42932dc7fcc4SSam Leffler callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); 42942dc7fcc4SSam Leffler } else { 42952dc7fcc4SSam Leffler DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", 42962dc7fcc4SSam Leffler __func__); 42972dc7fcc4SSam Leffler /* NB: don't rearm timer */ 42982dc7fcc4SSam Leffler } 42995591b213SSam Leffler } 43005591b213SSam Leffler 430168e8e04eSSam Leffler static void 430268e8e04eSSam Leffler ath_scan_start(struct ieee80211com *ic) 430368e8e04eSSam Leffler { 430468e8e04eSSam Leffler struct ifnet *ifp = ic->ic_ifp; 430568e8e04eSSam Leffler struct ath_softc *sc = ifp->if_softc; 430668e8e04eSSam Leffler struct ath_hal *ah = sc->sc_ah; 430768e8e04eSSam Leffler u_int32_t rfilt; 430868e8e04eSSam Leffler 430968e8e04eSSam Leffler /* XXX calibration timer? */ 431068e8e04eSSam Leffler 4311c98cefc5SAdrian Chadd ATH_LOCK(sc); 431268e8e04eSSam Leffler sc->sc_scanning = 1; 431368e8e04eSSam Leffler sc->sc_syncbeacon = 0; 431468e8e04eSSam Leffler rfilt = ath_calcrxfilter(sc); 4315c98cefc5SAdrian Chadd ATH_UNLOCK(sc); 4316c98cefc5SAdrian Chadd 4317c98cefc5SAdrian Chadd ATH_PCU_LOCK(sc); 431868e8e04eSSam Leffler ath_hal_setrxfilter(ah, rfilt); 431968e8e04eSSam Leffler ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0); 4320c98cefc5SAdrian Chadd ATH_PCU_UNLOCK(sc); 432168e8e04eSSam Leffler 432268e8e04eSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", 432368e8e04eSSam Leffler __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr)); 432468e8e04eSSam Leffler } 432568e8e04eSSam Leffler 432668e8e04eSSam Leffler static void 432768e8e04eSSam Leffler ath_scan_end(struct ieee80211com *ic) 432868e8e04eSSam Leffler { 432968e8e04eSSam Leffler struct ifnet *ifp = ic->ic_ifp; 433068e8e04eSSam Leffler struct ath_softc *sc = ifp->if_softc; 433168e8e04eSSam Leffler struct ath_hal *ah = sc->sc_ah; 433268e8e04eSSam Leffler u_int32_t rfilt; 433368e8e04eSSam Leffler 4334c98cefc5SAdrian Chadd ATH_LOCK(sc); 433568e8e04eSSam Leffler sc->sc_scanning = 0; 433668e8e04eSSam Leffler rfilt = ath_calcrxfilter(sc); 4337c98cefc5SAdrian Chadd ATH_UNLOCK(sc); 4338c98cefc5SAdrian Chadd 4339c98cefc5SAdrian Chadd ATH_PCU_LOCK(sc); 434068e8e04eSSam Leffler ath_hal_setrxfilter(ah, rfilt); 434168e8e04eSSam Leffler ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 434268e8e04eSSam Leffler 434368e8e04eSSam Leffler ath_hal_process_noisefloor(ah); 4344c98cefc5SAdrian Chadd ATH_PCU_UNLOCK(sc); 434568e8e04eSSam Leffler 434668e8e04eSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 434768e8e04eSSam Leffler __func__, rfilt, ether_sprintf(sc->sc_curbssid), 434868e8e04eSSam Leffler sc->sc_curaid); 434968e8e04eSSam Leffler } 435068e8e04eSSam Leffler 4351fdd72b4aSAdrian Chadd #ifdef ATH_ENABLE_11N 4352e7200579SAdrian Chadd /* 4353e7200579SAdrian Chadd * For now, just do a channel change. 4354e7200579SAdrian Chadd * 4355e7200579SAdrian Chadd * Later, we'll go through the hard slog of suspending tx/rx, changing rate 4356e7200579SAdrian Chadd * control state and resetting the hardware without dropping frames out 4357e7200579SAdrian Chadd * of the queue. 4358e7200579SAdrian Chadd * 4359e7200579SAdrian Chadd * The unfortunate trouble here is making absolutely sure that the 4360e7200579SAdrian Chadd * channel width change has propagated enough so the hardware 4361e7200579SAdrian Chadd * absolutely isn't handed bogus frames for it's current operating 4362e7200579SAdrian Chadd * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and 4363e7200579SAdrian Chadd * does occur in parallel, we need to make certain we've blocked 4364e7200579SAdrian Chadd * any further ongoing TX (and RX, that can cause raw TX) 4365e7200579SAdrian Chadd * before we do this. 4366e7200579SAdrian Chadd */ 4367e7200579SAdrian Chadd static void 4368e7200579SAdrian Chadd ath_update_chw(struct ieee80211com *ic) 4369e7200579SAdrian Chadd { 4370e7200579SAdrian Chadd struct ifnet *ifp = ic->ic_ifp; 4371e7200579SAdrian Chadd struct ath_softc *sc = ifp->if_softc; 4372e7200579SAdrian Chadd 4373e7200579SAdrian Chadd DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__); 4374e7200579SAdrian Chadd ath_set_channel(ic); 4375e7200579SAdrian Chadd } 4376fdd72b4aSAdrian Chadd #endif /* ATH_ENABLE_11N */ 4377e7200579SAdrian Chadd 437868e8e04eSSam Leffler static void 437968e8e04eSSam Leffler ath_set_channel(struct ieee80211com *ic) 438068e8e04eSSam Leffler { 438168e8e04eSSam Leffler struct ifnet *ifp = ic->ic_ifp; 438268e8e04eSSam Leffler struct ath_softc *sc = ifp->if_softc; 438368e8e04eSSam Leffler 438468e8e04eSSam Leffler (void) ath_chan_set(sc, ic->ic_curchan); 438568e8e04eSSam Leffler /* 438668e8e04eSSam Leffler * If we are returning to our bss channel then mark state 438768e8e04eSSam Leffler * so the next recv'd beacon's tsf will be used to sync the 438868e8e04eSSam Leffler * beacon timers. Note that since we only hear beacons in 438968e8e04eSSam Leffler * sta/ibss mode this has no effect in other operating modes. 439068e8e04eSSam Leffler */ 4391a887b1e3SAdrian Chadd ATH_LOCK(sc); 439268e8e04eSSam Leffler if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) 439368e8e04eSSam Leffler sc->sc_syncbeacon = 1; 4394a887b1e3SAdrian Chadd ATH_UNLOCK(sc); 439568e8e04eSSam Leffler } 439668e8e04eSSam Leffler 4397b032f27cSSam Leffler /* 4398b032f27cSSam Leffler * Walk the vap list and check if there any vap's in RUN state. 4399b032f27cSSam Leffler */ 44005591b213SSam Leffler static int 4401b032f27cSSam Leffler ath_isanyrunningvaps(struct ieee80211vap *this) 44025591b213SSam Leffler { 4403b032f27cSSam Leffler struct ieee80211com *ic = this->iv_ic; 4404b032f27cSSam Leffler struct ieee80211vap *vap; 4405b032f27cSSam Leffler 4406b032f27cSSam Leffler IEEE80211_LOCK_ASSERT(ic); 4407b032f27cSSam Leffler 4408b032f27cSSam Leffler TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 4409309a3e45SSam Leffler if (vap != this && vap->iv_state >= IEEE80211_S_RUN) 4410b032f27cSSam Leffler return 1; 4411b032f27cSSam Leffler } 4412b032f27cSSam Leffler return 0; 4413b032f27cSSam Leffler } 4414b032f27cSSam Leffler 4415b032f27cSSam Leffler static int 4416b032f27cSSam Leffler ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 4417b032f27cSSam Leffler { 4418b032f27cSSam Leffler struct ieee80211com *ic = vap->iv_ic; 4419b032f27cSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 4420b032f27cSSam Leffler struct ath_vap *avp = ATH_VAP(vap); 442145bbf62fSSam Leffler struct ath_hal *ah = sc->sc_ah; 4422b032f27cSSam Leffler struct ieee80211_node *ni = NULL; 442368e8e04eSSam Leffler int i, error, stamode; 44245591b213SSam Leffler u_int32_t rfilt; 4425f52efb6dSAdrian Chadd int csa_run_transition = 0; 44265591b213SSam Leffler static const HAL_LED_STATE leds[] = { 44275591b213SSam Leffler HAL_LED_INIT, /* IEEE80211_S_INIT */ 44285591b213SSam Leffler HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 44295591b213SSam Leffler HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 44305591b213SSam Leffler HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 443177d5e068SSam Leffler HAL_LED_RUN, /* IEEE80211_S_CAC */ 44325591b213SSam Leffler HAL_LED_RUN, /* IEEE80211_S_RUN */ 443377d5e068SSam Leffler HAL_LED_RUN, /* IEEE80211_S_CSA */ 443477d5e068SSam Leffler HAL_LED_RUN, /* IEEE80211_S_SLEEP */ 44355591b213SSam Leffler }; 44365591b213SSam Leffler 4437c42a7b7eSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 4438b032f27cSSam Leffler ieee80211_state_name[vap->iv_state], 4439c42a7b7eSSam Leffler ieee80211_state_name[nstate]); 44405591b213SSam Leffler 4441107fdf96SAdrian Chadd /* 4442107fdf96SAdrian Chadd * net80211 _should_ have the comlock asserted at this point. 4443107fdf96SAdrian Chadd * There are some comments around the calls to vap->iv_newstate 4444107fdf96SAdrian Chadd * which indicate that it (newstate) may end up dropping the 4445107fdf96SAdrian Chadd * lock. This and the subsequent lock assert check after newstate 4446107fdf96SAdrian Chadd * are an attempt to catch these and figure out how/why. 4447107fdf96SAdrian Chadd */ 4448107fdf96SAdrian Chadd IEEE80211_LOCK_ASSERT(ic); 4449107fdf96SAdrian Chadd 4450f52efb6dSAdrian Chadd if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) 4451f52efb6dSAdrian Chadd csa_run_transition = 1; 4452f52efb6dSAdrian Chadd 44532e986da5SSam Leffler callout_drain(&sc->sc_cal_ch); 44545591b213SSam Leffler ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 44555591b213SSam Leffler 4456b032f27cSSam Leffler if (nstate == IEEE80211_S_SCAN) { 445758769f58SSam Leffler /* 4458b032f27cSSam Leffler * Scanning: turn off beacon miss and don't beacon. 4459b032f27cSSam Leffler * Mark beacon state so when we reach RUN state we'll 4460b032f27cSSam Leffler * [re]setup beacons. Unblock the task q thread so 4461b032f27cSSam Leffler * deferred interrupt processing is done. 446258769f58SSam Leffler */ 4463b032f27cSSam Leffler ath_hal_intrset(ah, 4464b032f27cSSam Leffler sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 44655591b213SSam Leffler sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 4466b032f27cSSam Leffler sc->sc_beacons = 0; 4467b032f27cSSam Leffler taskqueue_unblock(sc->sc_tq); 44685591b213SSam Leffler } 44695591b213SSam Leffler 447080767531SAdrian Chadd ni = ieee80211_ref_node(vap->iv_bss); 447168e8e04eSSam Leffler rfilt = ath_calcrxfilter(sc); 4472b032f27cSSam Leffler stamode = (vap->iv_opmode == IEEE80211_M_STA || 44737b916f89SSam Leffler vap->iv_opmode == IEEE80211_M_AHDEMO || 4474b032f27cSSam Leffler vap->iv_opmode == IEEE80211_M_IBSS); 447568e8e04eSSam Leffler if (stamode && nstate == IEEE80211_S_RUN) { 447668e8e04eSSam Leffler sc->sc_curaid = ni->ni_associd; 447768e8e04eSSam Leffler IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); 4478b032f27cSSam Leffler ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 4479b032f27cSSam Leffler } 448068e8e04eSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 4481b032f27cSSam Leffler __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); 448268e8e04eSSam Leffler ath_hal_setrxfilter(ah, rfilt); 448368e8e04eSSam Leffler 4484b032f27cSSam Leffler /* XXX is this to restore keycache on resume? */ 4485b032f27cSSam Leffler if (vap->iv_opmode != IEEE80211_M_STA && 4486b032f27cSSam Leffler (vap->iv_flags & IEEE80211_F_PRIVACY)) { 44875591b213SSam Leffler for (i = 0; i < IEEE80211_WEP_NKID; i++) 44885591b213SSam Leffler if (ath_hal_keyisvalid(ah, i)) 448968e8e04eSSam Leffler ath_hal_keysetmac(ah, i, ni->ni_bssid); 44905591b213SSam Leffler } 4491b032f27cSSam Leffler 4492b032f27cSSam Leffler /* 4493b032f27cSSam Leffler * Invoke the parent method to do net80211 work. 4494b032f27cSSam Leffler */ 4495b032f27cSSam Leffler error = avp->av_newstate(vap, nstate, arg); 4496b032f27cSSam Leffler if (error != 0) 4497b032f27cSSam Leffler goto bad; 4498c42a7b7eSSam Leffler 4499107fdf96SAdrian Chadd /* 4500107fdf96SAdrian Chadd * See above: ensure av_newstate() doesn't drop the lock 4501107fdf96SAdrian Chadd * on us. 4502107fdf96SAdrian Chadd */ 4503107fdf96SAdrian Chadd IEEE80211_LOCK_ASSERT(ic); 4504107fdf96SAdrian Chadd 450568e8e04eSSam Leffler if (nstate == IEEE80211_S_RUN) { 4506b032f27cSSam Leffler /* NB: collect bss node again, it may have changed */ 450780767531SAdrian Chadd ieee80211_free_node(ni); 450880767531SAdrian Chadd ni = ieee80211_ref_node(vap->iv_bss); 45095591b213SSam Leffler 4510b032f27cSSam Leffler DPRINTF(sc, ATH_DEBUG_STATE, 4511b032f27cSSam Leffler "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " 4512b032f27cSSam Leffler "capinfo 0x%04x chan %d\n", __func__, 4513b032f27cSSam Leffler vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), 4514b032f27cSSam Leffler ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); 4515b032f27cSSam Leffler 4516b032f27cSSam Leffler switch (vap->iv_opmode) { 4517584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 451810ad9a77SSam Leffler case IEEE80211_M_AHDEMO: 451910ad9a77SSam Leffler if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) 452010ad9a77SSam Leffler break; 452110ad9a77SSam Leffler /* fall thru... */ 452210ad9a77SSam Leffler #endif 4523e8fd88a3SSam Leffler case IEEE80211_M_HOSTAP: 4524e8fd88a3SSam Leffler case IEEE80211_M_IBSS: 452559aa14a9SRui Paulo case IEEE80211_M_MBSS: 45265591b213SSam Leffler /* 4527e8fd88a3SSam Leffler * Allocate and setup the beacon frame. 4528e8fd88a3SSam Leffler * 4529f818612bSSam Leffler * Stop any previous beacon DMA. This may be 4530f818612bSSam Leffler * necessary, for example, when an ibss merge 4531f818612bSSam Leffler * causes reconfiguration; there will be a state 4532f818612bSSam Leffler * transition from RUN->RUN that means we may 4533f818612bSSam Leffler * be called with beacon transmission active. 4534f818612bSSam Leffler */ 4535f818612bSSam Leffler ath_hal_stoptxdma(ah, sc->sc_bhalq); 4536b032f27cSSam Leffler 45375591b213SSam Leffler error = ath_beacon_alloc(sc, ni); 45385591b213SSam Leffler if (error != 0) 45395591b213SSam Leffler goto bad; 45407a04dc27SSam Leffler /* 454180d939bfSSam Leffler * If joining an adhoc network defer beacon timer 454280d939bfSSam Leffler * configuration to the next beacon frame so we 454380d939bfSSam Leffler * have a current TSF to use. Otherwise we're 4544b032f27cSSam Leffler * starting an ibss/bss so there's no need to delay; 4545b032f27cSSam Leffler * if this is the first vap moving to RUN state, then 4546b032f27cSSam Leffler * beacon state needs to be [re]configured. 45477a04dc27SSam Leffler */ 4548b032f27cSSam Leffler if (vap->iv_opmode == IEEE80211_M_IBSS && 4549b032f27cSSam Leffler ni->ni_tstamp.tsf != 0) { 455080d939bfSSam Leffler sc->sc_syncbeacon = 1; 4551b032f27cSSam Leffler } else if (!sc->sc_beacons) { 4552584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 455310ad9a77SSam Leffler if (vap->iv_caps & IEEE80211_C_TDMA) 455410ad9a77SSam Leffler ath_tdma_config(sc, vap); 455510ad9a77SSam Leffler else 455610ad9a77SSam Leffler #endif 4557b032f27cSSam Leffler ath_beacon_config(sc, vap); 4558b032f27cSSam Leffler sc->sc_beacons = 1; 4559b032f27cSSam Leffler } 4560e8fd88a3SSam Leffler break; 4561e8fd88a3SSam Leffler case IEEE80211_M_STA: 4562e8fd88a3SSam Leffler /* 456380d939bfSSam Leffler * Defer beacon timer configuration to the next 456480d939bfSSam Leffler * beacon frame so we have a current TSF to use 456580d939bfSSam Leffler * (any TSF collected when scanning is likely old). 4566f52efb6dSAdrian Chadd * However if it's due to a CSA -> RUN transition, 4567f52efb6dSAdrian Chadd * force a beacon update so we pick up a lack of 4568f52efb6dSAdrian Chadd * beacons from an AP in CAC and thus force a 4569f52efb6dSAdrian Chadd * scan. 45707a04dc27SSam Leffler */ 457180d939bfSSam Leffler sc->sc_syncbeacon = 1; 4572f52efb6dSAdrian Chadd if (csa_run_transition) 4573f52efb6dSAdrian Chadd ath_beacon_config(sc, vap); 4574e8fd88a3SSam Leffler break; 4575b032f27cSSam Leffler case IEEE80211_M_MONITOR: 4576b032f27cSSam Leffler /* 4577b032f27cSSam Leffler * Monitor mode vaps have only INIT->RUN and RUN->RUN 4578b032f27cSSam Leffler * transitions so we must re-enable interrupts here to 4579b032f27cSSam Leffler * handle the case of a single monitor mode vap. 4580b032f27cSSam Leffler */ 4581b032f27cSSam Leffler ath_hal_intrset(ah, sc->sc_imask); 4582b032f27cSSam Leffler break; 4583b032f27cSSam Leffler case IEEE80211_M_WDS: 4584b032f27cSSam Leffler break; 4585e8fd88a3SSam Leffler default: 4586e8fd88a3SSam Leffler break; 45875591b213SSam Leffler } 45885591b213SSam Leffler /* 45897b0c77ecSSam Leffler * Let the hal process statistics collected during a 45907b0c77ecSSam Leffler * scan so it can provide calibrated noise floor data. 45917b0c77ecSSam Leffler */ 45927b0c77ecSSam Leffler ath_hal_process_noisefloor(ah); 45937b0c77ecSSam Leffler /* 4594ffa2cab6SSam Leffler * Reset rssi stats; maybe not the best place... 4595ffa2cab6SSam Leffler */ 4596ffa2cab6SSam Leffler sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 4597ffa2cab6SSam Leffler sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 4598ffa2cab6SSam Leffler sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 459945bbf62fSSam Leffler /* 4600b032f27cSSam Leffler * Finally, start any timers and the task q thread 4601b032f27cSSam Leffler * (in case we didn't go through SCAN state). 460245bbf62fSSam Leffler */ 46032dc7fcc4SSam Leffler if (ath_longcalinterval != 0) { 4604c42a7b7eSSam Leffler /* start periodic recalibration timer */ 46052dc7fcc4SSam Leffler callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 46062dc7fcc4SSam Leffler } else { 46072dc7fcc4SSam Leffler DPRINTF(sc, ATH_DEBUG_CALIBRATE, 46082dc7fcc4SSam Leffler "%s: calibration disabled\n", __func__); 4609c42a7b7eSSam Leffler } 4610b032f27cSSam Leffler taskqueue_unblock(sc->sc_tq); 4611b032f27cSSam Leffler } else if (nstate == IEEE80211_S_INIT) { 4612b032f27cSSam Leffler /* 4613b032f27cSSam Leffler * If there are no vaps left in RUN state then 4614b032f27cSSam Leffler * shutdown host/driver operation: 4615b032f27cSSam Leffler * o disable interrupts 4616b032f27cSSam Leffler * o disable the task queue thread 4617b032f27cSSam Leffler * o mark beacon processing as stopped 4618b032f27cSSam Leffler */ 4619b032f27cSSam Leffler if (!ath_isanyrunningvaps(vap)) { 4620b032f27cSSam Leffler sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 4621b032f27cSSam Leffler /* disable interrupts */ 4622b032f27cSSam Leffler ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 4623b032f27cSSam Leffler taskqueue_block(sc->sc_tq); 4624b032f27cSSam Leffler sc->sc_beacons = 0; 4625b032f27cSSam Leffler } 4626584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 462710ad9a77SSam Leffler ath_hal_setcca(ah, AH_TRUE); 462810ad9a77SSam Leffler #endif 4629b032f27cSSam Leffler } 46305591b213SSam Leffler bad: 463180767531SAdrian Chadd ieee80211_free_node(ni); 46325591b213SSam Leffler return error; 46335591b213SSam Leffler } 46345591b213SSam Leffler 46355591b213SSam Leffler /* 4636e8fd88a3SSam Leffler * Allocate a key cache slot to the station so we can 4637e8fd88a3SSam Leffler * setup a mapping from key index to node. The key cache 4638e8fd88a3SSam Leffler * slot is needed for managing antenna state and for 4639e8fd88a3SSam Leffler * compression when stations do not use crypto. We do 4640e8fd88a3SSam Leffler * it uniliaterally here; if crypto is employed this slot 4641e8fd88a3SSam Leffler * will be reassigned. 4642e8fd88a3SSam Leffler */ 4643e8fd88a3SSam Leffler static void 4644e8fd88a3SSam Leffler ath_setup_stationkey(struct ieee80211_node *ni) 4645e8fd88a3SSam Leffler { 4646b032f27cSSam Leffler struct ieee80211vap *vap = ni->ni_vap; 4647b032f27cSSam Leffler struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 4648c1225b52SSam Leffler ieee80211_keyix keyix, rxkeyix; 4649e8fd88a3SSam Leffler 465080767531SAdrian Chadd /* XXX should take a locked ref to vap->iv_bss */ 4651b032f27cSSam Leffler if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 4652e8fd88a3SSam Leffler /* 4653e8fd88a3SSam Leffler * Key cache is full; we'll fall back to doing 4654e8fd88a3SSam Leffler * the more expensive lookup in software. Note 4655e8fd88a3SSam Leffler * this also means no h/w compression. 4656e8fd88a3SSam Leffler */ 4657e8fd88a3SSam Leffler /* XXX msg+statistic */ 4658e8fd88a3SSam Leffler } else { 4659c1225b52SSam Leffler /* XXX locking? */ 4660e8fd88a3SSam Leffler ni->ni_ucastkey.wk_keyix = keyix; 4661c1225b52SSam Leffler ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 466233052833SSam Leffler /* NB: must mark device key to get called back on delete */ 466333052833SSam Leffler ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; 4664d3ac945bSSam Leffler IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); 4665e8fd88a3SSam Leffler /* NB: this will create a pass-thru key entry */ 466655c7b877SAdrian Chadd ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss); 4667e8fd88a3SSam Leffler } 4668e8fd88a3SSam Leffler } 4669e8fd88a3SSam Leffler 4670e8fd88a3SSam Leffler /* 46715591b213SSam Leffler * Setup driver-specific state for a newly associated node. 46725591b213SSam Leffler * Note that we're called also on a re-associate, the isnew 46735591b213SSam Leffler * param tells us if this is the first time or not. 46745591b213SSam Leffler */ 46755591b213SSam Leffler static void 4676e9962332SSam Leffler ath_newassoc(struct ieee80211_node *ni, int isnew) 46775591b213SSam Leffler { 4678b032f27cSSam Leffler struct ath_node *an = ATH_NODE(ni); 4679b032f27cSSam Leffler struct ieee80211vap *vap = ni->ni_vap; 4680b032f27cSSam Leffler struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 4681c62362cbSSam Leffler const struct ieee80211_txparam *tp = ni->ni_txparms; 46825591b213SSam Leffler 4683ab06fdf2SSam Leffler an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); 4684ab06fdf2SSam Leffler an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); 4685b032f27cSSam Leffler 4686b032f27cSSam Leffler ath_rate_newassoc(sc, an, isnew); 4687e8fd88a3SSam Leffler if (isnew && 4688b032f27cSSam Leffler (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && 4689b032f27cSSam Leffler ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 4690e8fd88a3SSam Leffler ath_setup_stationkey(ni); 4691e8fd88a3SSam Leffler } 46925591b213SSam Leffler 46935591b213SSam Leffler static int 469459efa8b5SSam Leffler ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, 4695b032f27cSSam Leffler int nchans, struct ieee80211_channel chans[]) 4696b032f27cSSam Leffler { 4697b032f27cSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 4698b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 469959efa8b5SSam Leffler HAL_STATUS status; 4700b032f27cSSam Leffler 4701033022a9SSam Leffler DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 470259efa8b5SSam Leffler "%s: rd %u cc %u location %c%s\n", 470359efa8b5SSam Leffler __func__, reg->regdomain, reg->country, reg->location, 470459efa8b5SSam Leffler reg->ecm ? " ecm" : ""); 4705033022a9SSam Leffler 470659efa8b5SSam Leffler status = ath_hal_set_channels(ah, chans, nchans, 470759efa8b5SSam Leffler reg->country, reg->regdomain); 470859efa8b5SSam Leffler if (status != HAL_OK) { 470959efa8b5SSam Leffler DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", 471059efa8b5SSam Leffler __func__, status); 471159efa8b5SSam Leffler return EINVAL; /* XXX */ 4712b032f27cSSam Leffler } 47138db87e40SAdrian Chadd 4714b032f27cSSam Leffler return 0; 4715b032f27cSSam Leffler } 4716b032f27cSSam Leffler 4717b032f27cSSam Leffler static void 4718b032f27cSSam Leffler ath_getradiocaps(struct ieee80211com *ic, 47195fe9f044SSam Leffler int maxchans, int *nchans, struct ieee80211_channel chans[]) 4720b032f27cSSam Leffler { 4721b032f27cSSam Leffler struct ath_softc *sc = ic->ic_ifp->if_softc; 4722b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 4723b032f27cSSam Leffler 472459efa8b5SSam Leffler DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", 472559efa8b5SSam Leffler __func__, SKU_DEBUG, CTRY_DEFAULT); 4726033022a9SSam Leffler 472759efa8b5SSam Leffler /* XXX check return */ 472859efa8b5SSam Leffler (void) ath_hal_getchannels(ah, chans, maxchans, nchans, 472959efa8b5SSam Leffler HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); 4730033022a9SSam Leffler 4731b032f27cSSam Leffler } 4732b032f27cSSam Leffler 4733b032f27cSSam Leffler static int 4734b032f27cSSam Leffler ath_getchannels(struct ath_softc *sc) 4735b032f27cSSam Leffler { 4736b032f27cSSam Leffler struct ifnet *ifp = sc->sc_ifp; 4737b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 4738b032f27cSSam Leffler struct ath_hal *ah = sc->sc_ah; 473959efa8b5SSam Leffler HAL_STATUS status; 4740b032f27cSSam Leffler 4741b032f27cSSam Leffler /* 474259efa8b5SSam Leffler * Collect channel set based on EEPROM contents. 4743b032f27cSSam Leffler */ 474459efa8b5SSam Leffler status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, 474559efa8b5SSam Leffler &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); 474659efa8b5SSam Leffler if (status != HAL_OK) { 474759efa8b5SSam Leffler if_printf(ifp, "%s: unable to collect channel list from hal, " 474859efa8b5SSam Leffler "status %d\n", __func__, status); 474959efa8b5SSam Leffler return EINVAL; 475059efa8b5SSam Leffler } 4751ca876918SSam Leffler (void) ath_hal_getregdomain(ah, &sc->sc_eerd); 4752ca876918SSam Leffler ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ 475359efa8b5SSam Leffler /* XXX map Atheros sku's to net80211 SKU's */ 475459efa8b5SSam Leffler /* XXX net80211 types too small */ 475559efa8b5SSam Leffler ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; 475659efa8b5SSam Leffler ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; 475759efa8b5SSam Leffler ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ 475859efa8b5SSam Leffler ic->ic_regdomain.isocc[1] = ' '; 475959efa8b5SSam Leffler 4760b032f27cSSam Leffler ic->ic_regdomain.ecm = 1; 4761b032f27cSSam Leffler ic->ic_regdomain.location = 'I'; 4762033022a9SSam Leffler 4763033022a9SSam Leffler DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 476459efa8b5SSam Leffler "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", 4765033022a9SSam Leffler __func__, sc->sc_eerd, sc->sc_eecc, 4766033022a9SSam Leffler ic->ic_regdomain.regdomain, ic->ic_regdomain.country, 476759efa8b5SSam Leffler ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); 47685591b213SSam Leffler return 0; 47695591b213SSam Leffler } 47705591b213SSam Leffler 47716c4612b9SSam Leffler static int 47726c4612b9SSam Leffler ath_rate_setup(struct ath_softc *sc, u_int mode) 47736c4612b9SSam Leffler { 47746c4612b9SSam Leffler struct ath_hal *ah = sc->sc_ah; 47756c4612b9SSam Leffler const HAL_RATE_TABLE *rt; 47766c4612b9SSam Leffler 47776c4612b9SSam Leffler switch (mode) { 47786c4612b9SSam Leffler case IEEE80211_MODE_11A: 47796c4612b9SSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11A); 47806c4612b9SSam Leffler break; 4781724c193aSSam Leffler case IEEE80211_MODE_HALF: 4782aaa70f2fSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 4783aaa70f2fSSam Leffler break; 4784724c193aSSam Leffler case IEEE80211_MODE_QUARTER: 4785aaa70f2fSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 4786aaa70f2fSSam Leffler break; 47876c4612b9SSam Leffler case IEEE80211_MODE_11B: 47886c4612b9SSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11B); 47896c4612b9SSam Leffler break; 47906c4612b9SSam Leffler case IEEE80211_MODE_11G: 47916c4612b9SSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11G); 47926c4612b9SSam Leffler break; 47936c4612b9SSam Leffler case IEEE80211_MODE_TURBO_A: 479468e8e04eSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_108A); 47956c4612b9SSam Leffler break; 47966c4612b9SSam Leffler case IEEE80211_MODE_TURBO_G: 47976c4612b9SSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_108G); 47986c4612b9SSam Leffler break; 479968e8e04eSSam Leffler case IEEE80211_MODE_STURBO_A: 480068e8e04eSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 480168e8e04eSSam Leffler break; 480268e8e04eSSam Leffler case IEEE80211_MODE_11NA: 480368e8e04eSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); 480468e8e04eSSam Leffler break; 480568e8e04eSSam Leffler case IEEE80211_MODE_11NG: 480668e8e04eSSam Leffler rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); 480768e8e04eSSam Leffler break; 48086c4612b9SSam Leffler default: 48096c4612b9SSam Leffler DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 48106c4612b9SSam Leffler __func__, mode); 48116c4612b9SSam Leffler return 0; 48126c4612b9SSam Leffler } 48136c4612b9SSam Leffler sc->sc_rates[mode] = rt; 4814aaa70f2fSSam Leffler return (rt != NULL); 48155591b213SSam Leffler } 48165591b213SSam Leffler 48175591b213SSam Leffler static void 48185591b213SSam Leffler ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 48195591b213SSam Leffler { 48203e50ec2cSSam Leffler #define N(a) (sizeof(a)/sizeof(a[0])) 48213e50ec2cSSam Leffler /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 48223e50ec2cSSam Leffler static const struct { 48233e50ec2cSSam Leffler u_int rate; /* tx/rx 802.11 rate */ 48243e50ec2cSSam Leffler u_int16_t timeOn; /* LED on time (ms) */ 48253e50ec2cSSam Leffler u_int16_t timeOff; /* LED off time (ms) */ 48263e50ec2cSSam Leffler } blinkrates[] = { 48273e50ec2cSSam Leffler { 108, 40, 10 }, 48283e50ec2cSSam Leffler { 96, 44, 11 }, 48293e50ec2cSSam Leffler { 72, 50, 13 }, 48303e50ec2cSSam Leffler { 48, 57, 14 }, 48313e50ec2cSSam Leffler { 36, 67, 16 }, 48323e50ec2cSSam Leffler { 24, 80, 20 }, 48333e50ec2cSSam Leffler { 22, 100, 25 }, 48343e50ec2cSSam Leffler { 18, 133, 34 }, 48353e50ec2cSSam Leffler { 12, 160, 40 }, 48363e50ec2cSSam Leffler { 10, 200, 50 }, 48373e50ec2cSSam Leffler { 6, 240, 58 }, 48383e50ec2cSSam Leffler { 4, 267, 66 }, 48393e50ec2cSSam Leffler { 2, 400, 100 }, 48403e50ec2cSSam Leffler { 0, 500, 130 }, 4841724c193aSSam Leffler /* XXX half/quarter rates */ 48423e50ec2cSSam Leffler }; 48435591b213SSam Leffler const HAL_RATE_TABLE *rt; 48443e50ec2cSSam Leffler int i, j; 48455591b213SSam Leffler 48465591b213SSam Leffler memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 48475591b213SSam Leffler rt = sc->sc_rates[mode]; 48485591b213SSam Leffler KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 4849180f268dSSam Leffler for (i = 0; i < rt->rateCount; i++) { 4850180f268dSSam Leffler uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 4851180f268dSSam Leffler if (rt->info[i].phy != IEEE80211_T_HT) 4852180f268dSSam Leffler sc->sc_rixmap[ieeerate] = i; 4853180f268dSSam Leffler else 4854180f268dSSam Leffler sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; 4855180f268dSSam Leffler } 48561b1a8e41SSam Leffler memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 485746d4d74cSSam Leffler for (i = 0; i < N(sc->sc_hwmap); i++) { 485846d4d74cSSam Leffler if (i >= rt->rateCount) { 48593e50ec2cSSam Leffler sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 48603e50ec2cSSam Leffler sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 486116b4851aSSam Leffler continue; 48623e50ec2cSSam Leffler } 48633e50ec2cSSam Leffler sc->sc_hwmap[i].ieeerate = 486446d4d74cSSam Leffler rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 486546d4d74cSSam Leffler if (rt->info[i].phy == IEEE80211_T_HT) 486626041a14SSam Leffler sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; 4867d3be6f5bSSam Leffler sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 486846d4d74cSSam Leffler if (rt->info[i].shortPreamble || 486946d4d74cSSam Leffler rt->info[i].phy == IEEE80211_T_OFDM) 4870d3be6f5bSSam Leffler sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 48715463c4a4SSam Leffler sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; 48723e50ec2cSSam Leffler for (j = 0; j < N(blinkrates)-1; j++) 48733e50ec2cSSam Leffler if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 48743e50ec2cSSam Leffler break; 48753e50ec2cSSam Leffler /* NB: this uses the last entry if the rate isn't found */ 48763e50ec2cSSam Leffler /* XXX beware of overlow */ 48773e50ec2cSSam Leffler sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 48783e50ec2cSSam Leffler sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 4879c42a7b7eSSam Leffler } 48805591b213SSam Leffler sc->sc_currates = rt; 48815591b213SSam Leffler sc->sc_curmode = mode; 48825591b213SSam Leffler /* 4883c42a7b7eSSam Leffler * All protection frames are transmited at 2Mb/s for 4884c42a7b7eSSam Leffler * 11g, otherwise at 1Mb/s. 48855591b213SSam Leffler */ 4886913a1ba1SSam Leffler if (mode == IEEE80211_MODE_11G) 4887ab06fdf2SSam Leffler sc->sc_protrix = ath_tx_findrix(sc, 2*2); 4888913a1ba1SSam Leffler else 4889ab06fdf2SSam Leffler sc->sc_protrix = ath_tx_findrix(sc, 2*1); 48904fa8d4efSDaniel Eischen /* NB: caller is responsible for resetting rate control state */ 48913e50ec2cSSam Leffler #undef N 48925591b213SSam Leffler } 48935591b213SSam Leffler 4894c42a7b7eSSam Leffler static void 48952e986da5SSam Leffler ath_watchdog(void *arg) 4896c42a7b7eSSam Leffler { 48972e986da5SSam Leffler struct ath_softc *sc = arg; 4898ef27340cSAdrian Chadd int do_reset = 0; 4899c42a7b7eSSam Leffler 49002e986da5SSam Leffler if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { 49012e986da5SSam Leffler struct ifnet *ifp = sc->sc_ifp; 4902459bc4f0SSam Leffler uint32_t hangs; 4903459bc4f0SSam Leffler 4904459bc4f0SSam Leffler if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && 4905459bc4f0SSam Leffler hangs != 0) { 4906459bc4f0SSam Leffler if_printf(ifp, "%s hang detected (0x%x)\n", 4907459bc4f0SSam Leffler hangs & 0xff ? "bb" : "mac", hangs); 4908459bc4f0SSam Leffler } else 4909c42a7b7eSSam Leffler if_printf(ifp, "device timeout\n"); 4910ef27340cSAdrian Chadd do_reset = 1; 4911c42a7b7eSSam Leffler ifp->if_oerrors++; 4912c42a7b7eSSam Leffler sc->sc_stats.ast_watchdog++; 4913c42a7b7eSSam Leffler } 4914ef27340cSAdrian Chadd 4915ef27340cSAdrian Chadd /* 4916ef27340cSAdrian Chadd * We can't hold the lock across the ath_reset() call. 4917d52f7132SAdrian Chadd * 4918d52f7132SAdrian Chadd * And since this routine can't hold a lock and sleep, 4919d52f7132SAdrian Chadd * do the reset deferred. 4920ef27340cSAdrian Chadd */ 4921ef27340cSAdrian Chadd if (do_reset) { 4922d52f7132SAdrian Chadd taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 4923ef27340cSAdrian Chadd } 4924ef27340cSAdrian Chadd 49252e986da5SSam Leffler callout_schedule(&sc->sc_wd_ch, hz); 4926c42a7b7eSSam Leffler } 4927c42a7b7eSSam Leffler 4928a585a9a1SSam Leffler #ifdef ATH_DIAGAPI 4929c42a7b7eSSam Leffler /* 4930c42a7b7eSSam Leffler * Diagnostic interface to the HAL. This is used by various 4931c42a7b7eSSam Leffler * tools to do things like retrieve register contents for 4932c42a7b7eSSam Leffler * debugging. The mechanism is intentionally opaque so that 4933c42a7b7eSSam Leffler * it can change frequently w/o concern for compatiblity. 4934c42a7b7eSSam Leffler */ 4935c42a7b7eSSam Leffler static int 4936c42a7b7eSSam Leffler ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 4937c42a7b7eSSam Leffler { 4938c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 4939c42a7b7eSSam Leffler u_int id = ad->ad_id & ATH_DIAG_ID; 4940c42a7b7eSSam Leffler void *indata = NULL; 4941c42a7b7eSSam Leffler void *outdata = NULL; 4942c42a7b7eSSam Leffler u_int32_t insize = ad->ad_in_size; 4943c42a7b7eSSam Leffler u_int32_t outsize = ad->ad_out_size; 4944c42a7b7eSSam Leffler int error = 0; 4945c42a7b7eSSam Leffler 4946c42a7b7eSSam Leffler if (ad->ad_id & ATH_DIAG_IN) { 4947c42a7b7eSSam Leffler /* 4948c42a7b7eSSam Leffler * Copy in data. 4949c42a7b7eSSam Leffler */ 4950c42a7b7eSSam Leffler indata = malloc(insize, M_TEMP, M_NOWAIT); 4951c42a7b7eSSam Leffler if (indata == NULL) { 4952c42a7b7eSSam Leffler error = ENOMEM; 4953c42a7b7eSSam Leffler goto bad; 4954c42a7b7eSSam Leffler } 4955c42a7b7eSSam Leffler error = copyin(ad->ad_in_data, indata, insize); 4956c42a7b7eSSam Leffler if (error) 4957c42a7b7eSSam Leffler goto bad; 4958c42a7b7eSSam Leffler } 4959c42a7b7eSSam Leffler if (ad->ad_id & ATH_DIAG_DYN) { 4960c42a7b7eSSam Leffler /* 4961c42a7b7eSSam Leffler * Allocate a buffer for the results (otherwise the HAL 4962c42a7b7eSSam Leffler * returns a pointer to a buffer where we can read the 4963c42a7b7eSSam Leffler * results). Note that we depend on the HAL leaving this 4964c42a7b7eSSam Leffler * pointer for us to use below in reclaiming the buffer; 4965c42a7b7eSSam Leffler * may want to be more defensive. 4966c42a7b7eSSam Leffler */ 4967c42a7b7eSSam Leffler outdata = malloc(outsize, M_TEMP, M_NOWAIT); 4968c42a7b7eSSam Leffler if (outdata == NULL) { 4969c42a7b7eSSam Leffler error = ENOMEM; 4970c42a7b7eSSam Leffler goto bad; 4971c42a7b7eSSam Leffler } 4972c42a7b7eSSam Leffler } 4973c42a7b7eSSam Leffler if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 4974c42a7b7eSSam Leffler if (outsize < ad->ad_out_size) 4975c42a7b7eSSam Leffler ad->ad_out_size = outsize; 4976c42a7b7eSSam Leffler if (outdata != NULL) 4977c42a7b7eSSam Leffler error = copyout(outdata, ad->ad_out_data, 4978c42a7b7eSSam Leffler ad->ad_out_size); 4979c42a7b7eSSam Leffler } else { 4980c42a7b7eSSam Leffler error = EINVAL; 4981c42a7b7eSSam Leffler } 4982c42a7b7eSSam Leffler bad: 4983c42a7b7eSSam Leffler if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 4984c42a7b7eSSam Leffler free(indata, M_TEMP); 4985c42a7b7eSSam Leffler if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 4986c42a7b7eSSam Leffler free(outdata, M_TEMP); 4987c42a7b7eSSam Leffler return error; 4988c42a7b7eSSam Leffler } 4989a585a9a1SSam Leffler #endif /* ATH_DIAGAPI */ 4990c42a7b7eSSam Leffler 4991c42a7b7eSSam Leffler static int 4992c42a7b7eSSam Leffler ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 4993c42a7b7eSSam Leffler { 4994c42a7b7eSSam Leffler #define IS_RUNNING(ifp) \ 499513f4c340SRobert Watson ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 4996c42a7b7eSSam Leffler struct ath_softc *sc = ifp->if_softc; 4997b032f27cSSam Leffler struct ieee80211com *ic = ifp->if_l2com; 4998c42a7b7eSSam Leffler struct ifreq *ifr = (struct ifreq *)data; 499984784be1SSam Leffler const HAL_RATE_TABLE *rt; 5000c42a7b7eSSam Leffler int error = 0; 5001c42a7b7eSSam Leffler 5002c42a7b7eSSam Leffler switch (cmd) { 5003c42a7b7eSSam Leffler case SIOCSIFFLAGS: 500431a8c1edSAndrew Thompson ATH_LOCK(sc); 5005c42a7b7eSSam Leffler if (IS_RUNNING(ifp)) { 5006c42a7b7eSSam Leffler /* 5007c42a7b7eSSam Leffler * To avoid rescanning another access point, 5008c42a7b7eSSam Leffler * do not call ath_init() here. Instead, 5009c42a7b7eSSam Leffler * only reflect promisc mode settings. 5010c42a7b7eSSam Leffler */ 5011c42a7b7eSSam Leffler ath_mode_init(sc); 5012c42a7b7eSSam Leffler } else if (ifp->if_flags & IFF_UP) { 5013c42a7b7eSSam Leffler /* 5014c42a7b7eSSam Leffler * Beware of being called during attach/detach 5015c42a7b7eSSam Leffler * to reset promiscuous mode. In that case we 5016c42a7b7eSSam Leffler * will still be marked UP but not RUNNING. 5017c42a7b7eSSam Leffler * However trying to re-init the interface 5018c42a7b7eSSam Leffler * is the wrong thing to do as we've already 5019c42a7b7eSSam Leffler * torn down much of our state. There's 5020c42a7b7eSSam Leffler * probably a better way to deal with this. 5021c42a7b7eSSam Leffler */ 5022b032f27cSSam Leffler if (!sc->sc_invalid) 5023fc74a9f9SBrooks Davis ath_init(sc); /* XXX lose error */ 5024d3ac945bSSam Leffler } else { 5025c42a7b7eSSam Leffler ath_stop_locked(ifp); 5026d3ac945bSSam Leffler #ifdef notyet 5027d3ac945bSSam Leffler /* XXX must wakeup in places like ath_vap_delete */ 5028d3ac945bSSam Leffler if (!sc->sc_invalid) 5029d3ac945bSSam Leffler ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP); 5030d3ac945bSSam Leffler #endif 5031d3ac945bSSam Leffler } 503231a8c1edSAndrew Thompson ATH_UNLOCK(sc); 5033c42a7b7eSSam Leffler break; 5034b032f27cSSam Leffler case SIOCGIFMEDIA: 5035b032f27cSSam Leffler case SIOCSIFMEDIA: 5036b032f27cSSam Leffler error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 5037b032f27cSSam Leffler break; 5038c42a7b7eSSam Leffler case SIOCGATHSTATS: 5039c42a7b7eSSam Leffler /* NB: embed these numbers to get a consistent view */ 5040c42a7b7eSSam Leffler sc->sc_stats.ast_tx_packets = ifp->if_opackets; 5041c42a7b7eSSam Leffler sc->sc_stats.ast_rx_packets = ifp->if_ipackets; 504284784be1SSam Leffler sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi); 504384784be1SSam Leffler sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi); 5044584f7327SSam Leffler #ifdef IEEE80211_SUPPORT_TDMA 504510ad9a77SSam Leffler sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap); 504610ad9a77SSam Leffler sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam); 504710ad9a77SSam Leffler #endif 504884784be1SSam Leffler rt = sc->sc_currates; 504946d4d74cSSam Leffler sc->sc_stats.ast_tx_rate = 505046d4d74cSSam Leffler rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC; 50516aa113fdSAdrian Chadd if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT) 50526aa113fdSAdrian Chadd sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS; 5053c42a7b7eSSam Leffler return copyout(&sc->sc_stats, 5054c42a7b7eSSam Leffler ifr->ifr_data, sizeof (sc->sc_stats)); 505594fe37d2SAdrian Chadd case SIOCGATHAGSTATS: 505694fe37d2SAdrian Chadd return copyout(&sc->sc_aggr_stats, 505794fe37d2SAdrian Chadd ifr->ifr_data, sizeof (sc->sc_aggr_stats)); 50583fc21fedSSam Leffler case SIOCZATHSTATS: 50593fc21fedSSam Leffler error = priv_check(curthread, PRIV_DRIVER); 50609467e3f3SAdrian Chadd if (error == 0) { 50613fc21fedSSam Leffler memset(&sc->sc_stats, 0, sizeof(sc->sc_stats)); 506241b6b507SAdrian Chadd memset(&sc->sc_aggr_stats, 0, 506341b6b507SAdrian Chadd sizeof(sc->sc_aggr_stats)); 50649467e3f3SAdrian Chadd memset(&sc->sc_intr_stats, 0, 50659467e3f3SAdrian Chadd sizeof(sc->sc_intr_stats)); 50669467e3f3SAdrian Chadd } 50673fc21fedSSam Leffler break; 5068a585a9a1SSam Leffler #ifdef ATH_DIAGAPI 5069c42a7b7eSSam Leffler case SIOCGATHDIAG: 5070c42a7b7eSSam Leffler error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 5071c42a7b7eSSam Leffler break; 5072f51c84eaSAdrian Chadd case SIOCGATHPHYERR: 5073f51c84eaSAdrian Chadd error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr); 5074f51c84eaSAdrian Chadd break; 5075a585a9a1SSam Leffler #endif 507631a8c1edSAndrew Thompson case SIOCGIFADDR: 5077b032f27cSSam Leffler error = ether_ioctl(ifp, cmd, data); 5078c42a7b7eSSam Leffler break; 507931a8c1edSAndrew Thompson default: 508031a8c1edSAndrew Thompson error = EINVAL; 508131a8c1edSAndrew Thompson break; 5082c42a7b7eSSam Leffler } 5083c42a7b7eSSam Leffler return error; 5084a614e076SSam Leffler #undef IS_RUNNING 5085c42a7b7eSSam Leffler } 5086c42a7b7eSSam Leffler 5087c42a7b7eSSam Leffler /* 5088c42a7b7eSSam Leffler * Announce various information on device/driver attach. 5089c42a7b7eSSam Leffler */ 5090c42a7b7eSSam Leffler static void 5091c42a7b7eSSam Leffler ath_announce(struct ath_softc *sc) 5092c42a7b7eSSam Leffler { 5093fc74a9f9SBrooks Davis struct ifnet *ifp = sc->sc_ifp; 5094c42a7b7eSSam Leffler struct ath_hal *ah = sc->sc_ah; 5095c42a7b7eSSam Leffler 5096498657cfSSam Leffler if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n", 5097498657cfSSam Leffler ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, 5098498657cfSSam Leffler ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 509946a924c4SAdrian Chadd if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n", 510046a924c4SAdrian Chadd ah->ah_analog2GhzRev, ah->ah_analog5GhzRev); 5101c42a7b7eSSam Leffler if (bootverbose) { 5102c42a7b7eSSam Leffler int i; 5103c42a7b7eSSam Leffler for (i = 0; i <= WME_AC_VO; i++) { 5104c42a7b7eSSam Leffler struct ath_txq *txq = sc->sc_ac2q[i]; 5105c42a7b7eSSam Leffler if_printf(ifp, "Use hw queue %u for %s traffic\n", 5106c42a7b7eSSam Leffler txq->axq_qnum, ieee80211_wme_acnames[i]); 5107c42a7b7eSSam Leffler } 5108c42a7b7eSSam Leffler if_printf(ifp, "Use hw queue %u for CAB traffic\n", 5109c42a7b7eSSam Leffler sc->sc_cabq->axq_qnum); 5110c42a7b7eSSam Leffler if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 5111c42a7b7eSSam Leffler } 5112e2d787faSSam Leffler if (ath_rxbuf != ATH_RXBUF) 5113e2d787faSSam Leffler if_printf(ifp, "using %u rx buffers\n", ath_rxbuf); 5114e2d787faSSam Leffler if (ath_txbuf != ATH_TXBUF) 5115e2d787faSSam Leffler if_printf(ifp, "using %u tx buffers\n", ath_txbuf); 51169ac01d39SRui Paulo if (sc->sc_mcastkey && bootverbose) 51179ac01d39SRui Paulo if_printf(ifp, "using multicast key search\n"); 5118c42a7b7eSSam Leffler } 511910ad9a77SSam Leffler 512048237774SAdrian Chadd static void 512148237774SAdrian Chadd ath_dfs_tasklet(void *p, int npending) 512248237774SAdrian Chadd { 512348237774SAdrian Chadd struct ath_softc *sc = (struct ath_softc *) p; 512448237774SAdrian Chadd struct ifnet *ifp = sc->sc_ifp; 512548237774SAdrian Chadd struct ieee80211com *ic = ifp->if_l2com; 512648237774SAdrian Chadd 512748237774SAdrian Chadd /* 512848237774SAdrian Chadd * If previous processing has found a radar event, 512948237774SAdrian Chadd * signal this to the net80211 layer to begin DFS 513048237774SAdrian Chadd * processing. 513148237774SAdrian Chadd */ 513248237774SAdrian Chadd if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { 513348237774SAdrian Chadd /* DFS event found, initiate channel change */ 513406fc4a10SAdrian Chadd /* 513506fc4a10SAdrian Chadd * XXX doesn't currently tell us whether the event 513606fc4a10SAdrian Chadd * XXX was found in the primary or extension 513706fc4a10SAdrian Chadd * XXX channel! 513806fc4a10SAdrian Chadd */ 513906fc4a10SAdrian Chadd IEEE80211_LOCK(ic); 514048237774SAdrian Chadd ieee80211_dfs_notify_radar(ic, sc->sc_curchan); 514106fc4a10SAdrian Chadd IEEE80211_UNLOCK(ic); 514248237774SAdrian Chadd } 514348237774SAdrian Chadd } 514448237774SAdrian Chadd 5145dba9c859SAdrian Chadd MODULE_VERSION(if_ath, 1); 5146dba9c859SAdrian Chadd MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */ 514758816f3fSAdrian Chadd #if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) 514858816f3fSAdrian Chadd MODULE_DEPEND(if_ath, alq, 1, 1, 1); 514958816f3fSAdrian Chadd #endif 5150