1 /*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40 #include "opt_inet.h" 41 #include "opt_ath.h" 42 /* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49 #include "opt_ah.h" 50 #include "opt_wlan.h" 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/sysctl.h> 55 #include <sys/mbuf.h> 56 #include <sys/malloc.h> 57 #include <sys/lock.h> 58 #include <sys/mutex.h> 59 #include <sys/kernel.h> 60 #include <sys/socket.h> 61 #include <sys/sockio.h> 62 #include <sys/errno.h> 63 #include <sys/callout.h> 64 #include <sys/bus.h> 65 #include <sys/endian.h> 66 #include <sys/kthread.h> 67 #include <sys/taskqueue.h> 68 #include <sys/priv.h> 69 #include <sys/module.h> 70 #include <sys/ktr.h> 71 #include <sys/smp.h> /* for mp_ncpus */ 72 73 #include <machine/bus.h> 74 75 #include <net/if.h> 76 #include <net/if_dl.h> 77 #include <net/if_media.h> 78 #include <net/if_types.h> 79 #include <net/if_arp.h> 80 #include <net/ethernet.h> 81 #include <net/if_llc.h> 82 83 #include <net80211/ieee80211_var.h> 84 #include <net80211/ieee80211_regdomain.h> 85 #ifdef IEEE80211_SUPPORT_SUPERG 86 #include <net80211/ieee80211_superg.h> 87 #endif 88 #ifdef IEEE80211_SUPPORT_TDMA 89 #include <net80211/ieee80211_tdma.h> 90 #endif 91 92 #include <net/bpf.h> 93 94 #ifdef INET 95 #include <netinet/in.h> 96 #include <netinet/if_ether.h> 97 #endif 98 99 #include <dev/ath/if_athvar.h> 100 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 101 #include <dev/ath/ath_hal/ah_diagcodes.h> 102 103 #include <dev/ath/if_ath_debug.h> 104 #include <dev/ath/if_ath_misc.h> 105 #include <dev/ath/if_ath_tx.h> 106 #include <dev/ath/if_ath_sysctl.h> 107 #include <dev/ath/if_ath_led.h> 108 #include <dev/ath/if_ath_keycache.h> 109 #include <dev/ath/if_athdfs.h> 110 111 #ifdef ATH_TX99_DIAG 112 #include <dev/ath/ath_tx99/ath_tx99.h> 113 #endif 114 115 #define ATH_KTR_INTR KTR_SPARE4 116 #define ATH_KTR_ERR KTR_SPARE3 117 118 /* 119 * ATH_BCBUF determines the number of vap's that can transmit 120 * beacons and also (currently) the number of vap's that can 121 * have unique mac addresses/bssid. When staggering beacons 122 * 4 is probably a good max as otherwise the beacons become 123 * very closely spaced and there is limited time for cab q traffic 124 * to go out. You can burst beacons instead but that is not good 125 * for stations in power save and at some point you really want 126 * another radio (and channel). 127 * 128 * The limit on the number of mac addresses is tied to our use of 129 * the U/L bit and tracking addresses in a byte; it would be 130 * worthwhile to allow more for applications like proxy sta. 131 */ 132 CTASSERT(ATH_BCBUF <= 8); 133 134 static struct ieee80211vap *ath_vap_create(struct ieee80211com *, 135 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 136 const uint8_t [IEEE80211_ADDR_LEN], 137 const uint8_t [IEEE80211_ADDR_LEN]); 138 static void ath_vap_delete(struct ieee80211vap *); 139 static void ath_init(void *); 140 static void ath_stop_locked(struct ifnet *); 141 static void ath_stop(struct ifnet *); 142 static void ath_start(struct ifnet *); 143 static int ath_reset_vap(struct ieee80211vap *, u_long); 144 static int ath_media_change(struct ifnet *); 145 static void ath_watchdog(void *); 146 static int ath_ioctl(struct ifnet *, u_long, caddr_t); 147 static void ath_fatal_proc(void *, int); 148 static void ath_bmiss_vap(struct ieee80211vap *); 149 static void ath_bmiss_proc(void *, int); 150 static void ath_key_update_begin(struct ieee80211vap *); 151 static void ath_key_update_end(struct ieee80211vap *); 152 static void ath_update_mcast(struct ifnet *); 153 static void ath_update_promisc(struct ifnet *); 154 static void ath_mode_init(struct ath_softc *); 155 static void ath_setslottime(struct ath_softc *); 156 static void ath_updateslot(struct ifnet *); 157 static int ath_beaconq_setup(struct ath_hal *); 158 static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); 159 static void ath_beacon_update(struct ieee80211vap *, int item); 160 static void ath_beacon_setup(struct ath_softc *, struct ath_buf *); 161 static void ath_beacon_proc(void *, int); 162 static struct ath_buf *ath_beacon_generate(struct ath_softc *, 163 struct ieee80211vap *); 164 static void ath_bstuck_proc(void *, int); 165 static void ath_reset_proc(void *, int); 166 static void ath_beacon_return(struct ath_softc *, struct ath_buf *); 167 static void ath_beacon_free(struct ath_softc *); 168 static void ath_beacon_config(struct ath_softc *, struct ieee80211vap *); 169 static void ath_descdma_cleanup(struct ath_softc *sc, 170 struct ath_descdma *, ath_bufhead *); 171 static int ath_desc_alloc(struct ath_softc *); 172 static void ath_desc_free(struct ath_softc *); 173 static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, 174 const uint8_t [IEEE80211_ADDR_LEN]); 175 static void ath_node_cleanup(struct ieee80211_node *); 176 static void ath_node_free(struct ieee80211_node *); 177 static void ath_node_getsignal(const struct ieee80211_node *, 178 int8_t *, int8_t *); 179 static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); 180 static void ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, 181 int subtype, int rssi, int nf); 182 static void ath_setdefantenna(struct ath_softc *, u_int); 183 static void ath_rx_proc(struct ath_softc *sc, int); 184 static void ath_rx_tasklet(void *, int); 185 static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 186 static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 187 static int ath_tx_setup(struct ath_softc *, int, int); 188 static int ath_wme_update(struct ieee80211com *); 189 static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 190 static void ath_tx_cleanup(struct ath_softc *); 191 static void ath_tx_proc_q0(void *, int); 192 static void ath_tx_proc_q0123(void *, int); 193 static void ath_tx_proc(void *, int); 194 static void ath_txq_sched_tasklet(void *, int); 195 static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 196 static void ath_draintxq(struct ath_softc *, ATH_RESET_TYPE reset_type); 197 static void ath_stoprecv(struct ath_softc *, int); 198 static int ath_startrecv(struct ath_softc *); 199 static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 200 static void ath_scan_start(struct ieee80211com *); 201 static void ath_scan_end(struct ieee80211com *); 202 static void ath_set_channel(struct ieee80211com *); 203 static void ath_update_chw(struct ieee80211com *); 204 static void ath_calibrate(void *); 205 static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); 206 static void ath_setup_stationkey(struct ieee80211_node *); 207 static void ath_newassoc(struct ieee80211_node *, int); 208 static int ath_setregdomain(struct ieee80211com *, 209 struct ieee80211_regdomain *, int, 210 struct ieee80211_channel []); 211 static void ath_getradiocaps(struct ieee80211com *, int, int *, 212 struct ieee80211_channel []); 213 static int ath_getchannels(struct ath_softc *); 214 215 static int ath_rate_setup(struct ath_softc *, u_int mode); 216 static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 217 218 static void ath_announce(struct ath_softc *); 219 220 static void ath_dfs_tasklet(void *, int); 221 222 #ifdef IEEE80211_SUPPORT_TDMA 223 static void ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, 224 u_int32_t bintval); 225 static void ath_tdma_bintvalsetup(struct ath_softc *sc, 226 const struct ieee80211_tdma_state *tdma); 227 static void ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap); 228 static void ath_tdma_update(struct ieee80211_node *ni, 229 const struct ieee80211_tdma_param *tdma, int); 230 static void ath_tdma_beacon_send(struct ath_softc *sc, 231 struct ieee80211vap *vap); 232 233 #define TDMA_EP_MULTIPLIER (1<<10) /* pow2 to optimize out * and / */ 234 #define TDMA_LPF_LEN 6 235 #define TDMA_DUMMY_MARKER 0x127 236 #define TDMA_EP_MUL(x, mul) ((x) * (mul)) 237 #define TDMA_IN(x) (TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER)) 238 #define TDMA_LPF(x, y, len) \ 239 ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y)) 240 #define TDMA_SAMPLE(x, y) do { \ 241 x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN); \ 242 } while (0) 243 #define TDMA_EP_RND(x,mul) \ 244 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 245 #define TDMA_AVG(x) TDMA_EP_RND(x, TDMA_EP_MULTIPLIER) 246 #endif /* IEEE80211_SUPPORT_TDMA */ 247 248 SYSCTL_DECL(_hw_ath); 249 250 /* XXX validate sysctl values */ 251 static int ath_longcalinterval = 30; /* long cals every 30 secs */ 252 SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, 253 0, "long chip calibration interval (secs)"); 254 static int ath_shortcalinterval = 100; /* short cals every 100 ms */ 255 SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, 256 0, "short chip calibration interval (msecs)"); 257 static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ 258 SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, 259 0, "reset chip calibration results (secs)"); 260 static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ 261 SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, 262 0, "ANI calibration (msecs)"); 263 264 static int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 265 SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf, 266 0, "rx buffers allocated"); 267 TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf); 268 static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 269 SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf, 270 0, "tx buffers allocated"); 271 TUNABLE_INT("hw.ath.txbuf", &ath_txbuf); 272 273 static int ath_bstuck_threshold = 4; /* max missed beacons */ 274 SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, 275 0, "max missed beacon xmits before chip reset"); 276 277 MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 278 279 #define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) 280 #define HAL_MODE_HT40 \ 281 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ 282 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) 283 int 284 ath_attach(u_int16_t devid, struct ath_softc *sc) 285 { 286 struct ifnet *ifp; 287 struct ieee80211com *ic; 288 struct ath_hal *ah = NULL; 289 HAL_STATUS status; 290 int error = 0, i; 291 u_int wmodes; 292 uint8_t macaddr[IEEE80211_ADDR_LEN]; 293 int rx_chainmask, tx_chainmask; 294 295 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 296 297 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 298 if (ifp == NULL) { 299 device_printf(sc->sc_dev, "can not if_alloc()\n"); 300 error = ENOSPC; 301 goto bad; 302 } 303 ic = ifp->if_l2com; 304 305 /* set these up early for if_printf use */ 306 if_initname(ifp, device_get_name(sc->sc_dev), 307 device_get_unit(sc->sc_dev)); 308 309 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 310 sc->sc_eepromdata, &status); 311 if (ah == NULL) { 312 if_printf(ifp, "unable to attach hardware; HAL status %u\n", 313 status); 314 error = ENXIO; 315 goto bad; 316 } 317 sc->sc_ah = ah; 318 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 319 #ifdef ATH_DEBUG 320 sc->sc_debug = ath_debug; 321 #endif 322 323 /* 324 * Check if the MAC has multi-rate retry support. 325 * We do this by trying to setup a fake extended 326 * descriptor. MAC's that don't have support will 327 * return false w/o doing anything. MAC's that do 328 * support it will return true w/o doing anything. 329 */ 330 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 331 332 /* 333 * Check if the device has hardware counters for PHY 334 * errors. If so we need to enable the MIB interrupt 335 * so we can act on stat triggers. 336 */ 337 if (ath_hal_hwphycounters(ah)) 338 sc->sc_needmib = 1; 339 340 /* 341 * Get the hardware key cache size. 342 */ 343 sc->sc_keymax = ath_hal_keycachesize(ah); 344 if (sc->sc_keymax > ATH_KEYMAX) { 345 if_printf(ifp, "Warning, using only %u of %u key cache slots\n", 346 ATH_KEYMAX, sc->sc_keymax); 347 sc->sc_keymax = ATH_KEYMAX; 348 } 349 /* 350 * Reset the key cache since some parts do not 351 * reset the contents on initial power up. 352 */ 353 for (i = 0; i < sc->sc_keymax; i++) 354 ath_hal_keyreset(ah, i); 355 356 /* 357 * Collect the default channel list. 358 */ 359 error = ath_getchannels(sc); 360 if (error != 0) 361 goto bad; 362 363 /* 364 * Setup rate tables for all potential media types. 365 */ 366 ath_rate_setup(sc, IEEE80211_MODE_11A); 367 ath_rate_setup(sc, IEEE80211_MODE_11B); 368 ath_rate_setup(sc, IEEE80211_MODE_11G); 369 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 370 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 371 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); 372 ath_rate_setup(sc, IEEE80211_MODE_11NA); 373 ath_rate_setup(sc, IEEE80211_MODE_11NG); 374 ath_rate_setup(sc, IEEE80211_MODE_HALF); 375 ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 376 377 /* NB: setup here so ath_rate_update is happy */ 378 ath_setcurmode(sc, IEEE80211_MODE_11A); 379 380 /* 381 * Allocate tx+rx descriptors and populate the lists. 382 */ 383 error = ath_desc_alloc(sc); 384 if (error != 0) { 385 if_printf(ifp, "failed to allocate descriptors: %d\n", error); 386 goto bad; 387 } 388 callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0); 389 callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0); 390 391 ATH_TXBUF_LOCK_INIT(sc); 392 393 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, 394 taskqueue_thread_enqueue, &sc->sc_tq); 395 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, 396 "%s taskq", ifp->if_xname); 397 398 TASK_INIT(&sc->sc_rxtask, 0, ath_rx_tasklet, sc); 399 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 400 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 401 TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); 402 TASK_INIT(&sc->sc_txqtask,0, ath_txq_sched_tasklet, sc); 403 404 /* 405 * Allocate hardware transmit queues: one queue for 406 * beacon frames and one data queue for each QoS 407 * priority. Note that the hal handles resetting 408 * these queues at the needed time. 409 * 410 * XXX PS-Poll 411 */ 412 sc->sc_bhalq = ath_beaconq_setup(ah); 413 if (sc->sc_bhalq == (u_int) -1) { 414 if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 415 error = EIO; 416 goto bad2; 417 } 418 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 419 if (sc->sc_cabq == NULL) { 420 if_printf(ifp, "unable to setup CAB xmit queue!\n"); 421 error = EIO; 422 goto bad2; 423 } 424 /* NB: insure BK queue is the lowest priority h/w queue */ 425 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 426 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 427 ieee80211_wme_acnames[WME_AC_BK]); 428 error = EIO; 429 goto bad2; 430 } 431 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 432 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 433 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 434 /* 435 * Not enough hardware tx queues to properly do WME; 436 * just punt and assign them all to the same h/w queue. 437 * We could do a better job of this if, for example, 438 * we allocate queues when we switch from station to 439 * AP mode. 440 */ 441 if (sc->sc_ac2q[WME_AC_VI] != NULL) 442 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 443 if (sc->sc_ac2q[WME_AC_BE] != NULL) 444 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 445 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 446 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 447 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 448 } 449 450 /* 451 * Special case certain configurations. Note the 452 * CAB queue is handled by these specially so don't 453 * include them when checking the txq setup mask. 454 */ 455 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 456 case 0x01: 457 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 458 break; 459 case 0x0f: 460 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 461 break; 462 default: 463 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 464 break; 465 } 466 467 /* 468 * Setup rate control. Some rate control modules 469 * call back to change the anntena state so expose 470 * the necessary entry points. 471 * XXX maybe belongs in struct ath_ratectrl? 472 */ 473 sc->sc_setdefantenna = ath_setdefantenna; 474 sc->sc_rc = ath_rate_attach(sc); 475 if (sc->sc_rc == NULL) { 476 error = EIO; 477 goto bad2; 478 } 479 480 /* Attach DFS module */ 481 if (! ath_dfs_attach(sc)) { 482 device_printf(sc->sc_dev, 483 "%s: unable to attach DFS\n", __func__); 484 error = EIO; 485 goto bad2; 486 } 487 488 /* Start DFS processing tasklet */ 489 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); 490 491 /* Configure LED state */ 492 sc->sc_blinking = 0; 493 sc->sc_ledstate = 1; 494 sc->sc_ledon = 0; /* low true */ 495 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 496 callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE); 497 498 /* 499 * Don't setup hardware-based blinking. 500 * 501 * Although some NICs may have this configured in the 502 * default reset register values, the user may wish 503 * to alter which pins have which function. 504 * 505 * The reference driver attaches the MAC network LED to GPIO1 and 506 * the MAC power LED to GPIO2. However, the DWA-552 cardbus 507 * NIC has these reversed. 508 */ 509 sc->sc_hardled = (1 == 0); 510 sc->sc_led_net_pin = -1; 511 sc->sc_led_pwr_pin = -1; 512 /* 513 * Auto-enable soft led processing for IBM cards and for 514 * 5211 minipci cards. Users can also manually enable/disable 515 * support with a sysctl. 516 */ 517 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 518 ath_led_config(sc); 519 ath_hal_setledstate(ah, HAL_LED_INIT); 520 521 ifp->if_softc = sc; 522 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 523 ifp->if_start = ath_start; 524 ifp->if_ioctl = ath_ioctl; 525 ifp->if_init = ath_init; 526 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 527 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 528 IFQ_SET_READY(&ifp->if_snd); 529 530 ic->ic_ifp = ifp; 531 /* XXX not right but it's not used anywhere important */ 532 ic->ic_phytype = IEEE80211_T_OFDM; 533 ic->ic_opmode = IEEE80211_M_STA; 534 ic->ic_caps = 535 IEEE80211_C_STA /* station mode */ 536 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 537 | IEEE80211_C_HOSTAP /* hostap mode */ 538 | IEEE80211_C_MONITOR /* monitor mode */ 539 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 540 | IEEE80211_C_WDS /* 4-address traffic works */ 541 | IEEE80211_C_MBSS /* mesh point link mode */ 542 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 543 | IEEE80211_C_SHSLOT /* short slot time supported */ 544 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 545 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 546 | IEEE80211_C_TXFRAG /* handle tx frags */ 547 #ifdef ATH_ENABLE_DFS 548 | IEEE80211_C_DFS /* Enable radar detection */ 549 #endif 550 ; 551 /* 552 * Query the hal to figure out h/w crypto support. 553 */ 554 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 555 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; 556 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 557 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; 558 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 559 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 560 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 561 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; 562 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 563 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; 564 /* 565 * Check if h/w does the MIC and/or whether the 566 * separate key cache entries are required to 567 * handle both tx+rx MIC keys. 568 */ 569 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 570 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 571 /* 572 * If the h/w supports storing tx+rx MIC keys 573 * in one cache slot automatically enable use. 574 */ 575 if (ath_hal_hastkipsplit(ah) || 576 !ath_hal_settkipsplit(ah, AH_FALSE)) 577 sc->sc_splitmic = 1; 578 /* 579 * If the h/w can do TKIP MIC together with WME then 580 * we use it; otherwise we force the MIC to be done 581 * in software by the net80211 layer. 582 */ 583 if (ath_hal_haswmetkipmic(ah)) 584 sc->sc_wmetkipmic = 1; 585 } 586 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 587 /* 588 * Check for multicast key search support. 589 */ 590 if (ath_hal_hasmcastkeysearch(sc->sc_ah) && 591 !ath_hal_getmcastkeysearch(sc->sc_ah)) { 592 ath_hal_setmcastkeysearch(sc->sc_ah, 1); 593 } 594 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 595 /* 596 * Mark key cache slots associated with global keys 597 * as in use. If we knew TKIP was not to be used we 598 * could leave the +32, +64, and +32+64 slots free. 599 */ 600 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 601 setbit(sc->sc_keymap, i); 602 setbit(sc->sc_keymap, i+64); 603 if (sc->sc_splitmic) { 604 setbit(sc->sc_keymap, i+32); 605 setbit(sc->sc_keymap, i+32+64); 606 } 607 } 608 /* 609 * TPC support can be done either with a global cap or 610 * per-packet support. The latter is not available on 611 * all parts. We're a bit pedantic here as all parts 612 * support a global cap. 613 */ 614 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 615 ic->ic_caps |= IEEE80211_C_TXPMGT; 616 617 /* 618 * Mark WME capability only if we have sufficient 619 * hardware queues to do proper priority scheduling. 620 */ 621 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 622 ic->ic_caps |= IEEE80211_C_WME; 623 /* 624 * Check for misc other capabilities. 625 */ 626 if (ath_hal_hasbursting(ah)) 627 ic->ic_caps |= IEEE80211_C_BURST; 628 sc->sc_hasbmask = ath_hal_hasbssidmask(ah); 629 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); 630 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); 631 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); 632 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah); 633 if (ath_hal_hasfastframes(ah)) 634 ic->ic_caps |= IEEE80211_C_FF; 635 wmodes = ath_hal_getwirelessmodes(ah); 636 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) 637 ic->ic_caps |= IEEE80211_C_TURBOP; 638 #ifdef IEEE80211_SUPPORT_TDMA 639 if (ath_hal_macversion(ah) > 0x78) { 640 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ 641 ic->ic_tdma_update = ath_tdma_update; 642 } 643 #endif 644 645 /* 646 * TODO: enforce that at least this many frames are available 647 * in the txbuf list before allowing data frames (raw or 648 * otherwise) to be transmitted. 649 */ 650 sc->sc_txq_data_minfree = 10; 651 /* 652 * Leave this as default to maintain legacy behaviour. 653 * Shortening the cabq/mcastq may end up causing some 654 * undesirable behaviour. 655 */ 656 sc->sc_txq_mcastq_maxdepth = ath_txbuf; 657 658 /* 659 * Allow the TX and RX chainmasks to be overridden by 660 * environment variables and/or device.hints. 661 * 662 * This must be done early - before the hardware is 663 * calibrated or before the 802.11n stream calculation 664 * is done. 665 */ 666 if (resource_int_value(device_get_name(sc->sc_dev), 667 device_get_unit(sc->sc_dev), "rx_chainmask", 668 &rx_chainmask) == 0) { 669 device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n", 670 rx_chainmask); 671 (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask); 672 } 673 if (resource_int_value(device_get_name(sc->sc_dev), 674 device_get_unit(sc->sc_dev), "tx_chainmask", 675 &tx_chainmask) == 0) { 676 device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n", 677 tx_chainmask); 678 (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask); 679 } 680 681 /* 682 * The if_ath 11n support is completely not ready for normal use. 683 * Enabling this option will likely break everything and everything. 684 * Don't think of doing that unless you know what you're doing. 685 */ 686 687 #ifdef ATH_ENABLE_11N 688 /* 689 * Query HT capabilities 690 */ 691 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && 692 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { 693 int rxs, txs; 694 695 device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); 696 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ 697 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ 698 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ 699 | IEEE80211_HTCAP_MAXAMSDU_3839 700 /* max A-MSDU length */ 701 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ 702 ; 703 704 /* 705 * Enable short-GI for HT20 only if the hardware 706 * advertises support. 707 * Notably, anything earlier than the AR9287 doesn't. 708 */ 709 if ((ath_hal_getcapability(ah, 710 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && 711 (wmodes & HAL_MODE_HT20)) { 712 device_printf(sc->sc_dev, 713 "[HT] enabling short-GI in 20MHz mode\n"); 714 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; 715 } 716 717 if (wmodes & HAL_MODE_HT40) 718 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 719 | IEEE80211_HTCAP_SHORTGI40; 720 721 /* 722 * TX/RX streams need to be taken into account when 723 * negotiating which MCS rates it'll receive and 724 * what MCS rates are available for TX. 725 */ 726 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs); 727 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs); 728 729 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); 730 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); 731 732 ic->ic_txstream = txs; 733 ic->ic_rxstream = rxs; 734 735 (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1, 736 &sc->sc_rts_aggr_limit); 737 if (sc->sc_rts_aggr_limit != (64 * 1024)) 738 device_printf(sc->sc_dev, 739 "[HT] RTS aggregates limited to %d KiB\n", 740 sc->sc_rts_aggr_limit / 1024); 741 742 device_printf(sc->sc_dev, 743 "[HT] %d RX streams; %d TX streams\n", rxs, txs); 744 } 745 #endif 746 747 /* 748 * Check if the hardware requires PCI register serialisation. 749 * Some of the Owl based MACs require this. 750 */ 751 if (mp_ncpus > 1 && 752 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR, 753 0, NULL) == HAL_OK) { 754 sc->sc_ah->ah_config.ah_serialise_reg_war = 1; 755 device_printf(sc->sc_dev, 756 "Enabling register serialisation\n"); 757 } 758 759 /* 760 * Indicate we need the 802.11 header padded to a 761 * 32-bit boundary for 4-address and QoS frames. 762 */ 763 ic->ic_flags |= IEEE80211_F_DATAPAD; 764 765 /* 766 * Query the hal about antenna support. 767 */ 768 sc->sc_defant = ath_hal_getdefantenna(ah); 769 770 /* 771 * Not all chips have the VEOL support we want to 772 * use with IBSS beacons; check here for it. 773 */ 774 sc->sc_hasveol = ath_hal_hasveol(ah); 775 776 /* get mac address from hardware */ 777 ath_hal_getmac(ah, macaddr); 778 if (sc->sc_hasbmask) 779 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); 780 781 /* NB: used to size node table key mapping array */ 782 ic->ic_max_keyix = sc->sc_keymax; 783 /* call MI attach routine. */ 784 ieee80211_ifattach(ic, macaddr); 785 ic->ic_setregdomain = ath_setregdomain; 786 ic->ic_getradiocaps = ath_getradiocaps; 787 sc->sc_opmode = HAL_M_STA; 788 789 /* override default methods */ 790 ic->ic_newassoc = ath_newassoc; 791 ic->ic_updateslot = ath_updateslot; 792 ic->ic_wme.wme_update = ath_wme_update; 793 ic->ic_vap_create = ath_vap_create; 794 ic->ic_vap_delete = ath_vap_delete; 795 ic->ic_raw_xmit = ath_raw_xmit; 796 ic->ic_update_mcast = ath_update_mcast; 797 ic->ic_update_promisc = ath_update_promisc; 798 ic->ic_node_alloc = ath_node_alloc; 799 sc->sc_node_free = ic->ic_node_free; 800 ic->ic_node_free = ath_node_free; 801 sc->sc_node_cleanup = ic->ic_node_cleanup; 802 ic->ic_node_cleanup = ath_node_cleanup; 803 ic->ic_node_getsignal = ath_node_getsignal; 804 ic->ic_scan_start = ath_scan_start; 805 ic->ic_scan_end = ath_scan_end; 806 ic->ic_set_channel = ath_set_channel; 807 ic->ic_update_chw = ath_update_chw; 808 809 /* 802.11n specific - but just override anyway */ 810 sc->sc_addba_request = ic->ic_addba_request; 811 sc->sc_addba_response = ic->ic_addba_response; 812 sc->sc_addba_stop = ic->ic_addba_stop; 813 sc->sc_bar_response = ic->ic_bar_response; 814 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout; 815 816 ic->ic_addba_request = ath_addba_request; 817 ic->ic_addba_response = ath_addba_response; 818 ic->ic_addba_response_timeout = ath_addba_response_timeout; 819 ic->ic_addba_stop = ath_addba_stop; 820 ic->ic_bar_response = ath_bar_response; 821 822 ieee80211_radiotap_attach(ic, 823 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 824 ATH_TX_RADIOTAP_PRESENT, 825 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 826 ATH_RX_RADIOTAP_PRESENT); 827 828 /* 829 * Setup dynamic sysctl's now that country code and 830 * regdomain are available from the hal. 831 */ 832 ath_sysctlattach(sc); 833 ath_sysctl_stats_attach(sc); 834 ath_sysctl_hal_attach(sc); 835 836 if (bootverbose) 837 ieee80211_announce(ic); 838 ath_announce(sc); 839 return 0; 840 bad2: 841 ath_tx_cleanup(sc); 842 ath_desc_free(sc); 843 bad: 844 if (ah) 845 ath_hal_detach(ah); 846 if (ifp != NULL) 847 if_free(ifp); 848 sc->sc_invalid = 1; 849 return error; 850 } 851 852 int 853 ath_detach(struct ath_softc *sc) 854 { 855 struct ifnet *ifp = sc->sc_ifp; 856 857 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 858 __func__, ifp->if_flags); 859 860 /* 861 * NB: the order of these is important: 862 * o stop the chip so no more interrupts will fire 863 * o call the 802.11 layer before detaching the hal to 864 * insure callbacks into the driver to delete global 865 * key cache entries can be handled 866 * o free the taskqueue which drains any pending tasks 867 * o reclaim the tx queue data structures after calling 868 * the 802.11 layer as we'll get called back to reclaim 869 * node state and potentially want to use them 870 * o to cleanup the tx queues the hal is called, so detach 871 * it last 872 * Other than that, it's straightforward... 873 */ 874 ath_stop(ifp); 875 ieee80211_ifdetach(ifp->if_l2com); 876 taskqueue_free(sc->sc_tq); 877 #ifdef ATH_TX99_DIAG 878 if (sc->sc_tx99 != NULL) 879 sc->sc_tx99->detach(sc->sc_tx99); 880 #endif 881 ath_rate_detach(sc->sc_rc); 882 883 ath_dfs_detach(sc); 884 ath_desc_free(sc); 885 ath_tx_cleanup(sc); 886 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ 887 if_free(ifp); 888 889 return 0; 890 } 891 892 /* 893 * MAC address handling for multiple BSS on the same radio. 894 * The first vap uses the MAC address from the EEPROM. For 895 * subsequent vap's we set the U/L bit (bit 1) in the MAC 896 * address and use the next six bits as an index. 897 */ 898 static void 899 assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) 900 { 901 int i; 902 903 if (clone && sc->sc_hasbmask) { 904 /* NB: we only do this if h/w supports multiple bssid */ 905 for (i = 0; i < 8; i++) 906 if ((sc->sc_bssidmask & (1<<i)) == 0) 907 break; 908 if (i != 0) 909 mac[0] |= (i << 2)|0x2; 910 } else 911 i = 0; 912 sc->sc_bssidmask |= 1<<i; 913 sc->sc_hwbssidmask[0] &= ~mac[0]; 914 if (i == 0) 915 sc->sc_nbssid0++; 916 } 917 918 static void 919 reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) 920 { 921 int i = mac[0] >> 2; 922 uint8_t mask; 923 924 if (i != 0 || --sc->sc_nbssid0 == 0) { 925 sc->sc_bssidmask &= ~(1<<i); 926 /* recalculate bssid mask from remaining addresses */ 927 mask = 0xff; 928 for (i = 1; i < 8; i++) 929 if (sc->sc_bssidmask & (1<<i)) 930 mask &= ~((i<<2)|0x2); 931 sc->sc_hwbssidmask[0] |= mask; 932 } 933 } 934 935 /* 936 * Assign a beacon xmit slot. We try to space out 937 * assignments so when beacons are staggered the 938 * traffic coming out of the cab q has maximal time 939 * to go out before the next beacon is scheduled. 940 */ 941 static int 942 assign_bslot(struct ath_softc *sc) 943 { 944 u_int slot, free; 945 946 free = 0; 947 for (slot = 0; slot < ATH_BCBUF; slot++) 948 if (sc->sc_bslot[slot] == NULL) { 949 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && 950 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) 951 return slot; 952 free = slot; 953 /* NB: keep looking for a double slot */ 954 } 955 return free; 956 } 957 958 static struct ieee80211vap * 959 ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 960 enum ieee80211_opmode opmode, int flags, 961 const uint8_t bssid[IEEE80211_ADDR_LEN], 962 const uint8_t mac0[IEEE80211_ADDR_LEN]) 963 { 964 struct ath_softc *sc = ic->ic_ifp->if_softc; 965 struct ath_vap *avp; 966 struct ieee80211vap *vap; 967 uint8_t mac[IEEE80211_ADDR_LEN]; 968 int needbeacon, error; 969 enum ieee80211_opmode ic_opmode; 970 971 avp = (struct ath_vap *) malloc(sizeof(struct ath_vap), 972 M_80211_VAP, M_WAITOK | M_ZERO); 973 needbeacon = 0; 974 IEEE80211_ADDR_COPY(mac, mac0); 975 976 ATH_LOCK(sc); 977 ic_opmode = opmode; /* default to opmode of new vap */ 978 switch (opmode) { 979 case IEEE80211_M_STA: 980 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ 981 device_printf(sc->sc_dev, "only 1 sta vap supported\n"); 982 goto bad; 983 } 984 if (sc->sc_nvaps) { 985 /* 986 * With multiple vaps we must fall back 987 * to s/w beacon miss handling. 988 */ 989 flags |= IEEE80211_CLONE_NOBEACONS; 990 } 991 if (flags & IEEE80211_CLONE_NOBEACONS) { 992 /* 993 * Station mode w/o beacons are implemented w/ AP mode. 994 */ 995 ic_opmode = IEEE80211_M_HOSTAP; 996 } 997 break; 998 case IEEE80211_M_IBSS: 999 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ 1000 device_printf(sc->sc_dev, 1001 "only 1 ibss vap supported\n"); 1002 goto bad; 1003 } 1004 needbeacon = 1; 1005 break; 1006 case IEEE80211_M_AHDEMO: 1007 #ifdef IEEE80211_SUPPORT_TDMA 1008 if (flags & IEEE80211_CLONE_TDMA) { 1009 if (sc->sc_nvaps != 0) { 1010 device_printf(sc->sc_dev, 1011 "only 1 tdma vap supported\n"); 1012 goto bad; 1013 } 1014 needbeacon = 1; 1015 flags |= IEEE80211_CLONE_NOBEACONS; 1016 } 1017 /* fall thru... */ 1018 #endif 1019 case IEEE80211_M_MONITOR: 1020 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { 1021 /* 1022 * Adopt existing mode. Adding a monitor or ahdemo 1023 * vap to an existing configuration is of dubious 1024 * value but should be ok. 1025 */ 1026 /* XXX not right for monitor mode */ 1027 ic_opmode = ic->ic_opmode; 1028 } 1029 break; 1030 case IEEE80211_M_HOSTAP: 1031 case IEEE80211_M_MBSS: 1032 needbeacon = 1; 1033 break; 1034 case IEEE80211_M_WDS: 1035 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { 1036 device_printf(sc->sc_dev, 1037 "wds not supported in sta mode\n"); 1038 goto bad; 1039 } 1040 /* 1041 * Silently remove any request for a unique 1042 * bssid; WDS vap's always share the local 1043 * mac address. 1044 */ 1045 flags &= ~IEEE80211_CLONE_BSSID; 1046 if (sc->sc_nvaps == 0) 1047 ic_opmode = IEEE80211_M_HOSTAP; 1048 else 1049 ic_opmode = ic->ic_opmode; 1050 break; 1051 default: 1052 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 1053 goto bad; 1054 } 1055 /* 1056 * Check that a beacon buffer is available; the code below assumes it. 1057 */ 1058 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { 1059 device_printf(sc->sc_dev, "no beacon buffer available\n"); 1060 goto bad; 1061 } 1062 1063 /* STA, AHDEMO? */ 1064 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { 1065 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 1066 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1067 } 1068 1069 vap = &avp->av_vap; 1070 /* XXX can't hold mutex across if_alloc */ 1071 ATH_UNLOCK(sc); 1072 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, 1073 bssid, mac); 1074 ATH_LOCK(sc); 1075 if (error != 0) { 1076 device_printf(sc->sc_dev, "%s: error %d creating vap\n", 1077 __func__, error); 1078 goto bad2; 1079 } 1080 1081 /* h/w crypto support */ 1082 vap->iv_key_alloc = ath_key_alloc; 1083 vap->iv_key_delete = ath_key_delete; 1084 vap->iv_key_set = ath_key_set; 1085 vap->iv_key_update_begin = ath_key_update_begin; 1086 vap->iv_key_update_end = ath_key_update_end; 1087 1088 /* override various methods */ 1089 avp->av_recv_mgmt = vap->iv_recv_mgmt; 1090 vap->iv_recv_mgmt = ath_recv_mgmt; 1091 vap->iv_reset = ath_reset_vap; 1092 vap->iv_update_beacon = ath_beacon_update; 1093 avp->av_newstate = vap->iv_newstate; 1094 vap->iv_newstate = ath_newstate; 1095 avp->av_bmiss = vap->iv_bmiss; 1096 vap->iv_bmiss = ath_bmiss_vap; 1097 1098 /* Set default parameters */ 1099 1100 /* 1101 * Anything earlier than some AR9300 series MACs don't 1102 * support a smaller MPDU density. 1103 */ 1104 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; 1105 /* 1106 * All NICs can handle the maximum size, however 1107 * AR5416 based MACs can only TX aggregates w/ RTS 1108 * protection when the total aggregate size is <= 8k. 1109 * However, for now that's enforced by the TX path. 1110 */ 1111 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 1112 1113 avp->av_bslot = -1; 1114 if (needbeacon) { 1115 /* 1116 * Allocate beacon state and setup the q for buffered 1117 * multicast frames. We know a beacon buffer is 1118 * available because we checked above. 1119 */ 1120 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); 1121 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); 1122 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { 1123 /* 1124 * Assign the vap to a beacon xmit slot. As above 1125 * this cannot fail to find a free one. 1126 */ 1127 avp->av_bslot = assign_bslot(sc); 1128 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, 1129 ("beacon slot %u not empty", avp->av_bslot)); 1130 sc->sc_bslot[avp->av_bslot] = vap; 1131 sc->sc_nbcnvaps++; 1132 } 1133 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { 1134 /* 1135 * Multple vaps are to transmit beacons and we 1136 * have h/w support for TSF adjusting; enable 1137 * use of staggered beacons. 1138 */ 1139 sc->sc_stagbeacons = 1; 1140 } 1141 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); 1142 } 1143 1144 ic->ic_opmode = ic_opmode; 1145 if (opmode != IEEE80211_M_WDS) { 1146 sc->sc_nvaps++; 1147 if (opmode == IEEE80211_M_STA) 1148 sc->sc_nstavaps++; 1149 if (opmode == IEEE80211_M_MBSS) 1150 sc->sc_nmeshvaps++; 1151 } 1152 switch (ic_opmode) { 1153 case IEEE80211_M_IBSS: 1154 sc->sc_opmode = HAL_M_IBSS; 1155 break; 1156 case IEEE80211_M_STA: 1157 sc->sc_opmode = HAL_M_STA; 1158 break; 1159 case IEEE80211_M_AHDEMO: 1160 #ifdef IEEE80211_SUPPORT_TDMA 1161 if (vap->iv_caps & IEEE80211_C_TDMA) { 1162 sc->sc_tdma = 1; 1163 /* NB: disable tsf adjust */ 1164 sc->sc_stagbeacons = 0; 1165 } 1166 /* 1167 * NB: adhoc demo mode is a pseudo mode; to the hal it's 1168 * just ap mode. 1169 */ 1170 /* fall thru... */ 1171 #endif 1172 case IEEE80211_M_HOSTAP: 1173 case IEEE80211_M_MBSS: 1174 sc->sc_opmode = HAL_M_HOSTAP; 1175 break; 1176 case IEEE80211_M_MONITOR: 1177 sc->sc_opmode = HAL_M_MONITOR; 1178 break; 1179 default: 1180 /* XXX should not happen */ 1181 break; 1182 } 1183 if (sc->sc_hastsfadd) { 1184 /* 1185 * Configure whether or not TSF adjust should be done. 1186 */ 1187 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); 1188 } 1189 if (flags & IEEE80211_CLONE_NOBEACONS) { 1190 /* 1191 * Enable s/w beacon miss handling. 1192 */ 1193 sc->sc_swbmiss = 1; 1194 } 1195 ATH_UNLOCK(sc); 1196 1197 /* complete setup */ 1198 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status); 1199 return vap; 1200 bad2: 1201 reclaim_address(sc, mac); 1202 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1203 bad: 1204 free(avp, M_80211_VAP); 1205 ATH_UNLOCK(sc); 1206 return NULL; 1207 } 1208 1209 static void 1210 ath_vap_delete(struct ieee80211vap *vap) 1211 { 1212 struct ieee80211com *ic = vap->iv_ic; 1213 struct ifnet *ifp = ic->ic_ifp; 1214 struct ath_softc *sc = ifp->if_softc; 1215 struct ath_hal *ah = sc->sc_ah; 1216 struct ath_vap *avp = ATH_VAP(vap); 1217 1218 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 1219 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1220 /* 1221 * Quiesce the hardware while we remove the vap. In 1222 * particular we need to reclaim all references to 1223 * the vap state by any frames pending on the tx queues. 1224 */ 1225 ath_hal_intrset(ah, 0); /* disable interrupts */ 1226 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */ 1227 /* XXX Do all frames from all vaps/nodes need draining here? */ 1228 ath_stoprecv(sc, 1); /* stop recv side */ 1229 } 1230 1231 ieee80211_vap_detach(vap); 1232 1233 /* 1234 * XXX Danger Will Robinson! Danger! 1235 * 1236 * Because ieee80211_vap_detach() can queue a frame (the station 1237 * diassociate message?) after we've drained the TXQ and 1238 * flushed the software TXQ, we will end up with a frame queued 1239 * to a node whose vap is about to be freed. 1240 * 1241 * To work around this, flush the hardware/software again. 1242 * This may be racy - the ath task may be running and the packet 1243 * may be being scheduled between sw->hw txq. Tsk. 1244 * 1245 * TODO: figure out why a new node gets allocated somewhere around 1246 * here (after the ath_tx_swq() call; and after an ath_stop_locked() 1247 * call!) 1248 */ 1249 1250 ath_draintxq(sc, ATH_RESET_DEFAULT); 1251 1252 ATH_LOCK(sc); 1253 /* 1254 * Reclaim beacon state. Note this must be done before 1255 * the vap instance is reclaimed as we may have a reference 1256 * to it in the buffer for the beacon frame. 1257 */ 1258 if (avp->av_bcbuf != NULL) { 1259 if (avp->av_bslot != -1) { 1260 sc->sc_bslot[avp->av_bslot] = NULL; 1261 sc->sc_nbcnvaps--; 1262 } 1263 ath_beacon_return(sc, avp->av_bcbuf); 1264 avp->av_bcbuf = NULL; 1265 if (sc->sc_nbcnvaps == 0) { 1266 sc->sc_stagbeacons = 0; 1267 if (sc->sc_hastsfadd) 1268 ath_hal_settsfadjust(sc->sc_ah, 0); 1269 } 1270 /* 1271 * Reclaim any pending mcast frames for the vap. 1272 */ 1273 ath_tx_draintxq(sc, &avp->av_mcastq); 1274 ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq); 1275 } 1276 /* 1277 * Update bookkeeping. 1278 */ 1279 if (vap->iv_opmode == IEEE80211_M_STA) { 1280 sc->sc_nstavaps--; 1281 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) 1282 sc->sc_swbmiss = 0; 1283 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || 1284 vap->iv_opmode == IEEE80211_M_MBSS) { 1285 reclaim_address(sc, vap->iv_myaddr); 1286 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); 1287 if (vap->iv_opmode == IEEE80211_M_MBSS) 1288 sc->sc_nmeshvaps--; 1289 } 1290 if (vap->iv_opmode != IEEE80211_M_WDS) 1291 sc->sc_nvaps--; 1292 #ifdef IEEE80211_SUPPORT_TDMA 1293 /* TDMA operation ceases when the last vap is destroyed */ 1294 if (sc->sc_tdma && sc->sc_nvaps == 0) { 1295 sc->sc_tdma = 0; 1296 sc->sc_swbmiss = 0; 1297 } 1298 #endif 1299 free(avp, M_80211_VAP); 1300 1301 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1302 /* 1303 * Restart rx+tx machines if still running (RUNNING will 1304 * be reset if we just destroyed the last vap). 1305 */ 1306 if (ath_startrecv(sc) != 0) 1307 if_printf(ifp, "%s: unable to restart recv logic\n", 1308 __func__); 1309 if (sc->sc_beacons) { /* restart beacons */ 1310 #ifdef IEEE80211_SUPPORT_TDMA 1311 if (sc->sc_tdma) 1312 ath_tdma_config(sc, NULL); 1313 else 1314 #endif 1315 ath_beacon_config(sc, NULL); 1316 } 1317 ath_hal_intrset(ah, sc->sc_imask); 1318 } 1319 ATH_UNLOCK(sc); 1320 } 1321 1322 void 1323 ath_suspend(struct ath_softc *sc) 1324 { 1325 struct ifnet *ifp = sc->sc_ifp; 1326 struct ieee80211com *ic = ifp->if_l2com; 1327 1328 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1329 __func__, ifp->if_flags); 1330 1331 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0; 1332 if (ic->ic_opmode == IEEE80211_M_STA) 1333 ath_stop(ifp); 1334 else 1335 ieee80211_suspend_all(ic); 1336 /* 1337 * NB: don't worry about putting the chip in low power 1338 * mode; pci will power off our socket on suspend and 1339 * CardBus detaches the device. 1340 */ 1341 } 1342 1343 /* 1344 * Reset the key cache since some parts do not reset the 1345 * contents on resume. First we clear all entries, then 1346 * re-load keys that the 802.11 layer assumes are setup 1347 * in h/w. 1348 */ 1349 static void 1350 ath_reset_keycache(struct ath_softc *sc) 1351 { 1352 struct ifnet *ifp = sc->sc_ifp; 1353 struct ieee80211com *ic = ifp->if_l2com; 1354 struct ath_hal *ah = sc->sc_ah; 1355 int i; 1356 1357 for (i = 0; i < sc->sc_keymax; i++) 1358 ath_hal_keyreset(ah, i); 1359 ieee80211_crypto_reload_keys(ic); 1360 } 1361 1362 void 1363 ath_resume(struct ath_softc *sc) 1364 { 1365 struct ifnet *ifp = sc->sc_ifp; 1366 struct ieee80211com *ic = ifp->if_l2com; 1367 struct ath_hal *ah = sc->sc_ah; 1368 HAL_STATUS status; 1369 1370 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1371 __func__, ifp->if_flags); 1372 1373 /* 1374 * Must reset the chip before we reload the 1375 * keycache as we were powered down on suspend. 1376 */ 1377 ath_hal_reset(ah, sc->sc_opmode, 1378 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, 1379 AH_FALSE, &status); 1380 ath_reset_keycache(sc); 1381 1382 /* Let DFS at it in case it's a DFS channel */ 1383 ath_dfs_radar_enable(sc, ic->ic_curchan); 1384 1385 /* Restore the LED configuration */ 1386 ath_led_config(sc); 1387 ath_hal_setledstate(ah, HAL_LED_INIT); 1388 1389 if (sc->sc_resume_up) { 1390 if (ic->ic_opmode == IEEE80211_M_STA) { 1391 ath_init(sc); 1392 ath_hal_setledstate(ah, HAL_LED_RUN); 1393 /* 1394 * Program the beacon registers using the last rx'd 1395 * beacon frame and enable sync on the next beacon 1396 * we see. This should handle the case where we 1397 * wakeup and find the same AP and also the case where 1398 * we wakeup and need to roam. For the latter we 1399 * should get bmiss events that trigger a roam. 1400 */ 1401 ath_beacon_config(sc, NULL); 1402 sc->sc_syncbeacon = 1; 1403 } else 1404 ieee80211_resume_all(ic); 1405 } 1406 1407 /* XXX beacons ? */ 1408 } 1409 1410 void 1411 ath_shutdown(struct ath_softc *sc) 1412 { 1413 struct ifnet *ifp = sc->sc_ifp; 1414 1415 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1416 __func__, ifp->if_flags); 1417 1418 ath_stop(ifp); 1419 /* NB: no point powering down chip as we're about to reboot */ 1420 } 1421 1422 /* 1423 * Interrupt handler. Most of the actual processing is deferred. 1424 */ 1425 void 1426 ath_intr(void *arg) 1427 { 1428 struct ath_softc *sc = arg; 1429 struct ifnet *ifp = sc->sc_ifp; 1430 struct ath_hal *ah = sc->sc_ah; 1431 HAL_INT status = 0; 1432 uint32_t txqs; 1433 1434 /* 1435 * If we're inside a reset path, just print a warning and 1436 * clear the ISR. The reset routine will finish it for us. 1437 */ 1438 ATH_PCU_LOCK(sc); 1439 if (sc->sc_inreset_cnt) { 1440 HAL_INT status; 1441 ath_hal_getisr(ah, &status); /* clear ISR */ 1442 ath_hal_intrset(ah, 0); /* disable further intr's */ 1443 DPRINTF(sc, ATH_DEBUG_ANY, 1444 "%s: in reset, ignoring: status=0x%x\n", 1445 __func__, status); 1446 ATH_PCU_UNLOCK(sc); 1447 return; 1448 } 1449 1450 if (sc->sc_invalid) { 1451 /* 1452 * The hardware is not ready/present, don't touch anything. 1453 * Note this can happen early on if the IRQ is shared. 1454 */ 1455 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 1456 ATH_PCU_UNLOCK(sc); 1457 return; 1458 } 1459 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */ 1460 ATH_PCU_UNLOCK(sc); 1461 return; 1462 } 1463 1464 if ((ifp->if_flags & IFF_UP) == 0 || 1465 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1466 HAL_INT status; 1467 1468 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1469 __func__, ifp->if_flags); 1470 ath_hal_getisr(ah, &status); /* clear ISR */ 1471 ath_hal_intrset(ah, 0); /* disable further intr's */ 1472 ATH_PCU_UNLOCK(sc); 1473 return; 1474 } 1475 1476 /* 1477 * Figure out the reason(s) for the interrupt. Note 1478 * that the hal returns a pseudo-ISR that may include 1479 * bits we haven't explicitly enabled so we mask the 1480 * value to insure we only process bits we requested. 1481 */ 1482 ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 1483 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 1484 CTR1(ATH_KTR_INTR, "ath_intr: mask=0x%.8x", status); 1485 #ifdef ATH_KTR_INTR_DEBUG 1486 CTR5(ATH_KTR_INTR, 1487 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x", 1488 ah->ah_intrstate[0], 1489 ah->ah_intrstate[1], 1490 ah->ah_intrstate[2], 1491 ah->ah_intrstate[3], 1492 ah->ah_intrstate[6]); 1493 #endif 1494 status &= sc->sc_imask; /* discard unasked for bits */ 1495 1496 /* Short-circuit un-handled interrupts */ 1497 if (status == 0x0) { 1498 ATH_PCU_UNLOCK(sc); 1499 return; 1500 } 1501 1502 /* 1503 * Take a note that we're inside the interrupt handler, so 1504 * the reset routines know to wait. 1505 */ 1506 sc->sc_intr_cnt++; 1507 ATH_PCU_UNLOCK(sc); 1508 1509 /* 1510 * Handle the interrupt. We won't run concurrent with the reset 1511 * or channel change routines as they'll wait for sc_intr_cnt 1512 * to be 0 before continuing. 1513 */ 1514 if (status & HAL_INT_FATAL) { 1515 sc->sc_stats.ast_hardware++; 1516 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 1517 ath_fatal_proc(sc, 0); 1518 } else { 1519 if (status & HAL_INT_SWBA) { 1520 /* 1521 * Software beacon alert--time to send a beacon. 1522 * Handle beacon transmission directly; deferring 1523 * this is too slow to meet timing constraints 1524 * under load. 1525 */ 1526 #ifdef IEEE80211_SUPPORT_TDMA 1527 if (sc->sc_tdma) { 1528 if (sc->sc_tdmaswba == 0) { 1529 struct ieee80211com *ic = ifp->if_l2com; 1530 struct ieee80211vap *vap = 1531 TAILQ_FIRST(&ic->ic_vaps); 1532 ath_tdma_beacon_send(sc, vap); 1533 sc->sc_tdmaswba = 1534 vap->iv_tdma->tdma_bintval; 1535 } else 1536 sc->sc_tdmaswba--; 1537 } else 1538 #endif 1539 { 1540 ath_beacon_proc(sc, 0); 1541 #ifdef IEEE80211_SUPPORT_SUPERG 1542 /* 1543 * Schedule the rx taskq in case there's no 1544 * traffic so any frames held on the staging 1545 * queue are aged and potentially flushed. 1546 */ 1547 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1548 #endif 1549 } 1550 } 1551 if (status & HAL_INT_RXEOL) { 1552 int imask; 1553 CTR0(ATH_KTR_ERR, "ath_intr: RXEOL"); 1554 ATH_PCU_LOCK(sc); 1555 /* 1556 * NB: the hardware should re-read the link when 1557 * RXE bit is written, but it doesn't work at 1558 * least on older hardware revs. 1559 */ 1560 sc->sc_stats.ast_rxeol++; 1561 /* 1562 * Disable RXEOL/RXORN - prevent an interrupt 1563 * storm until the PCU logic can be reset. 1564 * In case the interface is reset some other 1565 * way before "sc_kickpcu" is called, don't 1566 * modify sc_imask - that way if it is reset 1567 * by a call to ath_reset() somehow, the 1568 * interrupt mask will be correctly reprogrammed. 1569 */ 1570 imask = sc->sc_imask; 1571 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); 1572 ath_hal_intrset(ah, imask); 1573 /* 1574 * Only blank sc_rxlink if we've not yet kicked 1575 * the PCU. 1576 * 1577 * This isn't entirely correct - the correct solution 1578 * would be to have a PCU lock and engage that for 1579 * the duration of the PCU fiddling; which would include 1580 * running the RX process. Otherwise we could end up 1581 * messing up the RX descriptor chain and making the 1582 * RX desc list much shorter. 1583 */ 1584 if (! sc->sc_kickpcu) 1585 sc->sc_rxlink = NULL; 1586 sc->sc_kickpcu = 1; 1587 /* 1588 * Enqueue an RX proc, to handled whatever 1589 * is in the RX queue. 1590 * This will then kick the PCU. 1591 */ 1592 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1593 ATH_PCU_UNLOCK(sc); 1594 } 1595 if (status & HAL_INT_TXURN) { 1596 sc->sc_stats.ast_txurn++; 1597 /* bump tx trigger level */ 1598 ath_hal_updatetxtriglevel(ah, AH_TRUE); 1599 } 1600 if (status & HAL_INT_RX) { 1601 sc->sc_stats.ast_rx_intr++; 1602 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1603 } 1604 if (status & HAL_INT_TX) { 1605 sc->sc_stats.ast_tx_intr++; 1606 /* 1607 * Grab all the currently set bits in the HAL txq bitmap 1608 * and blank them. This is the only place we should be 1609 * doing this. 1610 */ 1611 ATH_PCU_LOCK(sc); 1612 txqs = 0xffffffff; 1613 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); 1614 sc->sc_txq_active |= txqs; 1615 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 1616 ATH_PCU_UNLOCK(sc); 1617 } 1618 if (status & HAL_INT_BMISS) { 1619 sc->sc_stats.ast_bmiss++; 1620 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 1621 } 1622 if (status & HAL_INT_GTT) 1623 sc->sc_stats.ast_tx_timeout++; 1624 if (status & HAL_INT_CST) 1625 sc->sc_stats.ast_tx_cst++; 1626 if (status & HAL_INT_MIB) { 1627 sc->sc_stats.ast_mib++; 1628 ATH_PCU_LOCK(sc); 1629 /* 1630 * Disable interrupts until we service the MIB 1631 * interrupt; otherwise it will continue to fire. 1632 */ 1633 ath_hal_intrset(ah, 0); 1634 /* 1635 * Let the hal handle the event. We assume it will 1636 * clear whatever condition caused the interrupt. 1637 */ 1638 ath_hal_mibevent(ah, &sc->sc_halstats); 1639 /* 1640 * Don't reset the interrupt if we've just 1641 * kicked the PCU, or we may get a nested 1642 * RXEOL before the rxproc has had a chance 1643 * to run. 1644 */ 1645 if (sc->sc_kickpcu == 0) 1646 ath_hal_intrset(ah, sc->sc_imask); 1647 ATH_PCU_UNLOCK(sc); 1648 } 1649 if (status & HAL_INT_RXORN) { 1650 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ 1651 CTR0(ATH_KTR_ERR, "ath_intr: RXORN"); 1652 sc->sc_stats.ast_rxorn++; 1653 } 1654 } 1655 ATH_PCU_LOCK(sc); 1656 sc->sc_intr_cnt--; 1657 ATH_PCU_UNLOCK(sc); 1658 } 1659 1660 static void 1661 ath_fatal_proc(void *arg, int pending) 1662 { 1663 struct ath_softc *sc = arg; 1664 struct ifnet *ifp = sc->sc_ifp; 1665 u_int32_t *state; 1666 u_int32_t len; 1667 void *sp; 1668 1669 if_printf(ifp, "hardware error; resetting\n"); 1670 /* 1671 * Fatal errors are unrecoverable. Typically these 1672 * are caused by DMA errors. Collect h/w state from 1673 * the hal so we can diagnose what's going on. 1674 */ 1675 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { 1676 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 1677 state = sp; 1678 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", 1679 state[0], state[1] , state[2], state[3], 1680 state[4], state[5]); 1681 } 1682 ath_reset(ifp, ATH_RESET_NOLOSS); 1683 } 1684 1685 static void 1686 ath_bmiss_vap(struct ieee80211vap *vap) 1687 { 1688 /* 1689 * Workaround phantom bmiss interrupts by sanity-checking 1690 * the time of our last rx'd frame. If it is within the 1691 * beacon miss interval then ignore the interrupt. If it's 1692 * truly a bmiss we'll get another interrupt soon and that'll 1693 * be dispatched up for processing. Note this applies only 1694 * for h/w beacon miss events. 1695 */ 1696 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { 1697 struct ifnet *ifp = vap->iv_ic->ic_ifp; 1698 struct ath_softc *sc = ifp->if_softc; 1699 u_int64_t lastrx = sc->sc_lastrx; 1700 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 1701 /* XXX should take a locked ref to iv_bss */ 1702 u_int bmisstimeout = 1703 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; 1704 1705 DPRINTF(sc, ATH_DEBUG_BEACON, 1706 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 1707 __func__, (unsigned long long) tsf, 1708 (unsigned long long)(tsf - lastrx), 1709 (unsigned long long) lastrx, bmisstimeout); 1710 1711 if (tsf - lastrx <= bmisstimeout) { 1712 sc->sc_stats.ast_bmiss_phantom++; 1713 return; 1714 } 1715 } 1716 ATH_VAP(vap)->av_bmiss(vap); 1717 } 1718 1719 static int 1720 ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) 1721 { 1722 uint32_t rsize; 1723 void *sp; 1724 1725 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) 1726 return 0; 1727 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); 1728 *hangs = *(uint32_t *)sp; 1729 return 1; 1730 } 1731 1732 static void 1733 ath_bmiss_proc(void *arg, int pending) 1734 { 1735 struct ath_softc *sc = arg; 1736 struct ifnet *ifp = sc->sc_ifp; 1737 uint32_t hangs; 1738 1739 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 1740 1741 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { 1742 if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs); 1743 ath_reset(ifp, ATH_RESET_NOLOSS); 1744 } else 1745 ieee80211_beacon_miss(ifp->if_l2com); 1746 } 1747 1748 /* 1749 * Handle TKIP MIC setup to deal hardware that doesn't do MIC 1750 * calcs together with WME. If necessary disable the crypto 1751 * hardware and mark the 802.11 state so keys will be setup 1752 * with the MIC work done in software. 1753 */ 1754 static void 1755 ath_settkipmic(struct ath_softc *sc) 1756 { 1757 struct ifnet *ifp = sc->sc_ifp; 1758 struct ieee80211com *ic = ifp->if_l2com; 1759 1760 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { 1761 if (ic->ic_flags & IEEE80211_F_WME) { 1762 ath_hal_settkipmic(sc->sc_ah, AH_FALSE); 1763 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; 1764 } else { 1765 ath_hal_settkipmic(sc->sc_ah, AH_TRUE); 1766 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 1767 } 1768 } 1769 } 1770 1771 static void 1772 ath_init(void *arg) 1773 { 1774 struct ath_softc *sc = (struct ath_softc *) arg; 1775 struct ifnet *ifp = sc->sc_ifp; 1776 struct ieee80211com *ic = ifp->if_l2com; 1777 struct ath_hal *ah = sc->sc_ah; 1778 HAL_STATUS status; 1779 1780 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1781 __func__, ifp->if_flags); 1782 1783 ATH_LOCK(sc); 1784 /* 1785 * Stop anything previously setup. This is safe 1786 * whether this is the first time through or not. 1787 */ 1788 ath_stop_locked(ifp); 1789 1790 /* 1791 * The basic interface to setting the hardware in a good 1792 * state is ``reset''. On return the hardware is known to 1793 * be powered up and with interrupts disabled. This must 1794 * be followed by initialization of the appropriate bits 1795 * and then setup of the interrupt mask. 1796 */ 1797 ath_settkipmic(sc); 1798 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) { 1799 if_printf(ifp, "unable to reset hardware; hal status %u\n", 1800 status); 1801 ATH_UNLOCK(sc); 1802 return; 1803 } 1804 ath_chan_change(sc, ic->ic_curchan); 1805 1806 /* Let DFS at it in case it's a DFS channel */ 1807 ath_dfs_radar_enable(sc, ic->ic_curchan); 1808 1809 /* 1810 * Likewise this is set during reset so update 1811 * state cached in the driver. 1812 */ 1813 sc->sc_diversity = ath_hal_getdiversity(ah); 1814 sc->sc_lastlongcal = 0; 1815 sc->sc_resetcal = 1; 1816 sc->sc_lastcalreset = 0; 1817 sc->sc_lastani = 0; 1818 sc->sc_lastshortcal = 0; 1819 sc->sc_doresetcal = AH_FALSE; 1820 /* 1821 * Beacon timers were cleared here; give ath_newstate() 1822 * a hint that the beacon timers should be poked when 1823 * things transition to the RUN state. 1824 */ 1825 sc->sc_beacons = 0; 1826 1827 /* 1828 * Initial aggregation settings. 1829 */ 1830 sc->sc_hwq_limit = ATH_AGGR_MIN_QDEPTH; 1831 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; 1832 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; 1833 1834 /* 1835 * Setup the hardware after reset: the key cache 1836 * is filled as needed and the receive engine is 1837 * set going. Frame transmit is handled entirely 1838 * in the frame output path; there's nothing to do 1839 * here except setup the interrupt mask. 1840 */ 1841 if (ath_startrecv(sc) != 0) { 1842 if_printf(ifp, "unable to start recv logic\n"); 1843 ATH_UNLOCK(sc); 1844 return; 1845 } 1846 1847 /* 1848 * Enable interrupts. 1849 */ 1850 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 1851 | HAL_INT_RXEOL | HAL_INT_RXORN 1852 | HAL_INT_FATAL | HAL_INT_GLOBAL; 1853 /* 1854 * Enable MIB interrupts when there are hardware phy counters. 1855 * Note we only do this (at the moment) for station mode. 1856 */ 1857 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 1858 sc->sc_imask |= HAL_INT_MIB; 1859 1860 /* Enable global TX timeout and carrier sense timeout if available */ 1861 if (ath_hal_gtxto_supported(ah)) 1862 sc->sc_imask |= HAL_INT_GTT; 1863 1864 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", 1865 __func__, sc->sc_imask); 1866 1867 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1868 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 1869 ath_hal_intrset(ah, sc->sc_imask); 1870 1871 ATH_UNLOCK(sc); 1872 1873 #ifdef ATH_TX99_DIAG 1874 if (sc->sc_tx99 != NULL) 1875 sc->sc_tx99->start(sc->sc_tx99); 1876 else 1877 #endif 1878 ieee80211_start_all(ic); /* start all vap's */ 1879 } 1880 1881 static void 1882 ath_stop_locked(struct ifnet *ifp) 1883 { 1884 struct ath_softc *sc = ifp->if_softc; 1885 struct ath_hal *ah = sc->sc_ah; 1886 1887 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 1888 __func__, sc->sc_invalid, ifp->if_flags); 1889 1890 ATH_LOCK_ASSERT(sc); 1891 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1892 /* 1893 * Shutdown the hardware and driver: 1894 * reset 802.11 state machine 1895 * turn off timers 1896 * disable interrupts 1897 * turn off the radio 1898 * clear transmit machinery 1899 * clear receive machinery 1900 * drain and release tx queues 1901 * reclaim beacon resources 1902 * power down hardware 1903 * 1904 * Note that some of this work is not possible if the 1905 * hardware is gone (invalid). 1906 */ 1907 #ifdef ATH_TX99_DIAG 1908 if (sc->sc_tx99 != NULL) 1909 sc->sc_tx99->stop(sc->sc_tx99); 1910 #endif 1911 callout_stop(&sc->sc_wd_ch); 1912 sc->sc_wd_timer = 0; 1913 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1914 if (!sc->sc_invalid) { 1915 if (sc->sc_softled) { 1916 callout_stop(&sc->sc_ledtimer); 1917 ath_hal_gpioset(ah, sc->sc_ledpin, 1918 !sc->sc_ledon); 1919 sc->sc_blinking = 0; 1920 } 1921 ath_hal_intrset(ah, 0); 1922 } 1923 ath_draintxq(sc, ATH_RESET_DEFAULT); 1924 if (!sc->sc_invalid) { 1925 ath_stoprecv(sc, 1); 1926 ath_hal_phydisable(ah); 1927 } else 1928 sc->sc_rxlink = NULL; 1929 ath_beacon_free(sc); /* XXX not needed */ 1930 } 1931 } 1932 1933 #define MAX_TXRX_ITERATIONS 1000 1934 static void 1935 ath_txrx_stop_locked(struct ath_softc *sc) 1936 { 1937 int i = MAX_TXRX_ITERATIONS; 1938 1939 ATH_UNLOCK_ASSERT(sc); 1940 ATH_PCU_LOCK_ASSERT(sc); 1941 1942 /* 1943 * Sleep until all the pending operations have completed. 1944 * 1945 * The caller must ensure that reset has been incremented 1946 * or the pending operations may continue being queued. 1947 */ 1948 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt || 1949 sc->sc_txstart_cnt || sc->sc_intr_cnt) { 1950 if (i <= 0) 1951 break; 1952 msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 1); 1953 i--; 1954 } 1955 1956 if (i <= 0) 1957 device_printf(sc->sc_dev, 1958 "%s: didn't finish after %d iterations\n", 1959 __func__, MAX_TXRX_ITERATIONS); 1960 } 1961 #undef MAX_TXRX_ITERATIONS 1962 1963 #if 0 1964 static void 1965 ath_txrx_stop(struct ath_softc *sc) 1966 { 1967 ATH_UNLOCK_ASSERT(sc); 1968 ATH_PCU_UNLOCK_ASSERT(sc); 1969 1970 ATH_PCU_LOCK(sc); 1971 ath_txrx_stop_locked(sc); 1972 ATH_PCU_UNLOCK(sc); 1973 } 1974 #endif 1975 1976 static void 1977 ath_txrx_start(struct ath_softc *sc) 1978 { 1979 1980 taskqueue_unblock(sc->sc_tq); 1981 } 1982 1983 /* 1984 * Grab the reset lock, and wait around until noone else 1985 * is trying to do anything with it. 1986 * 1987 * This is totally horrible but we can't hold this lock for 1988 * long enough to do TX/RX or we end up with net80211/ip stack 1989 * LORs and eventual deadlock. 1990 * 1991 * "dowait" signals whether to spin, waiting for the reset 1992 * lock count to reach 0. This should (for now) only be used 1993 * during the reset path, as the rest of the code may not 1994 * be locking-reentrant enough to behave correctly. 1995 * 1996 * Another, cleaner way should be found to serialise all of 1997 * these operations. 1998 */ 1999 #define MAX_RESET_ITERATIONS 10 2000 static int 2001 ath_reset_grablock(struct ath_softc *sc, int dowait) 2002 { 2003 int w = 0; 2004 int i = MAX_RESET_ITERATIONS; 2005 2006 ATH_PCU_LOCK_ASSERT(sc); 2007 do { 2008 if (sc->sc_inreset_cnt == 0) { 2009 w = 1; 2010 break; 2011 } 2012 if (dowait == 0) { 2013 w = 0; 2014 break; 2015 } 2016 ATH_PCU_UNLOCK(sc); 2017 pause("ath_reset_grablock", 1); 2018 i--; 2019 ATH_PCU_LOCK(sc); 2020 } while (i > 0); 2021 2022 /* 2023 * We always increment the refcounter, regardless 2024 * of whether we succeeded to get it in an exclusive 2025 * way. 2026 */ 2027 sc->sc_inreset_cnt++; 2028 2029 if (i <= 0) 2030 device_printf(sc->sc_dev, 2031 "%s: didn't finish after %d iterations\n", 2032 __func__, MAX_RESET_ITERATIONS); 2033 2034 if (w == 0) 2035 device_printf(sc->sc_dev, 2036 "%s: warning, recursive reset path!\n", 2037 __func__); 2038 2039 return w; 2040 } 2041 #undef MAX_RESET_ITERATIONS 2042 2043 /* 2044 * XXX TODO: write ath_reset_releaselock 2045 */ 2046 2047 static void 2048 ath_stop(struct ifnet *ifp) 2049 { 2050 struct ath_softc *sc = ifp->if_softc; 2051 2052 ATH_LOCK(sc); 2053 ath_stop_locked(ifp); 2054 ATH_UNLOCK(sc); 2055 } 2056 2057 /* 2058 * Reset the hardware w/o losing operational state. This is 2059 * basically a more efficient way of doing ath_stop, ath_init, 2060 * followed by state transitions to the current 802.11 2061 * operational state. Used to recover from various errors and 2062 * to reset or reload hardware state. 2063 */ 2064 int 2065 ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type) 2066 { 2067 struct ath_softc *sc = ifp->if_softc; 2068 struct ieee80211com *ic = ifp->if_l2com; 2069 struct ath_hal *ah = sc->sc_ah; 2070 HAL_STATUS status; 2071 int i; 2072 2073 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 2074 2075 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */ 2076 ATH_PCU_UNLOCK_ASSERT(sc); 2077 ATH_UNLOCK_ASSERT(sc); 2078 2079 /* Try to (stop any further TX/RX from occuring */ 2080 taskqueue_block(sc->sc_tq); 2081 2082 ATH_PCU_LOCK(sc); 2083 ath_hal_intrset(ah, 0); /* disable interrupts */ 2084 ath_txrx_stop_locked(sc); /* Ensure TX/RX is stopped */ 2085 if (ath_reset_grablock(sc, 1) == 0) { 2086 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 2087 __func__); 2088 } 2089 ATH_PCU_UNLOCK(sc); 2090 2091 /* 2092 * Should now wait for pending TX/RX to complete 2093 * and block future ones from occuring. This needs to be 2094 * done before the TX queue is drained. 2095 */ 2096 ath_draintxq(sc, reset_type); /* stop xmit side */ 2097 2098 /* 2099 * Regardless of whether we're doing a no-loss flush or 2100 * not, stop the PCU and handle what's in the RX queue. 2101 * That way frames aren't dropped which shouldn't be. 2102 */ 2103 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS)); 2104 ath_rx_proc(sc, 0); 2105 2106 ath_settkipmic(sc); /* configure TKIP MIC handling */ 2107 /* NB: indicate channel change so we do a full reset */ 2108 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status)) 2109 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 2110 __func__, status); 2111 sc->sc_diversity = ath_hal_getdiversity(ah); 2112 2113 /* Let DFS at it in case it's a DFS channel */ 2114 ath_dfs_radar_enable(sc, ic->ic_curchan); 2115 2116 if (ath_startrecv(sc) != 0) /* restart recv */ 2117 if_printf(ifp, "%s: unable to start recv logic\n", __func__); 2118 /* 2119 * We may be doing a reset in response to an ioctl 2120 * that changes the channel so update any state that 2121 * might change as a result. 2122 */ 2123 ath_chan_change(sc, ic->ic_curchan); 2124 if (sc->sc_beacons) { /* restart beacons */ 2125 #ifdef IEEE80211_SUPPORT_TDMA 2126 if (sc->sc_tdma) 2127 ath_tdma_config(sc, NULL); 2128 else 2129 #endif 2130 ath_beacon_config(sc, NULL); 2131 } 2132 2133 /* 2134 * Release the reset lock and re-enable interrupts here. 2135 * If an interrupt was being processed in ath_intr(), 2136 * it would disable interrupts at this point. So we have 2137 * to atomically enable interrupts and decrement the 2138 * reset counter - this way ath_intr() doesn't end up 2139 * disabling interrupts without a corresponding enable 2140 * in the rest or channel change path. 2141 */ 2142 ATH_PCU_LOCK(sc); 2143 sc->sc_inreset_cnt--; 2144 /* XXX only do this if sc_inreset_cnt == 0? */ 2145 ath_hal_intrset(ah, sc->sc_imask); 2146 ATH_PCU_UNLOCK(sc); 2147 2148 /* 2149 * TX and RX can be started here. If it were started with 2150 * sc_inreset_cnt > 0, the TX and RX path would abort. 2151 * Thus if this is a nested call through the reset or 2152 * channel change code, TX completion will occur but 2153 * RX completion and ath_start / ath_tx_start will not 2154 * run. 2155 */ 2156 2157 /* Restart TX/RX as needed */ 2158 ath_txrx_start(sc); 2159 2160 /* XXX Restart TX completion and pending TX */ 2161 if (reset_type == ATH_RESET_NOLOSS) { 2162 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 2163 if (ATH_TXQ_SETUP(sc, i)) { 2164 ATH_TXQ_LOCK(&sc->sc_txq[i]); 2165 ath_txq_restart_dma(sc, &sc->sc_txq[i]); 2166 ath_txq_sched(sc, &sc->sc_txq[i]); 2167 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 2168 } 2169 } 2170 } 2171 2172 /* 2173 * This may have been set during an ath_start() call which 2174 * set this once it detected a concurrent TX was going on. 2175 * So, clear it. 2176 */ 2177 IF_LOCK(&ifp->if_snd); 2178 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2179 IF_UNLOCK(&ifp->if_snd); 2180 2181 /* Handle any frames in the TX queue */ 2182 /* 2183 * XXX should this be done by the caller, rather than 2184 * ath_reset() ? 2185 */ 2186 ath_start(ifp); /* restart xmit */ 2187 return 0; 2188 } 2189 2190 static int 2191 ath_reset_vap(struct ieee80211vap *vap, u_long cmd) 2192 { 2193 struct ieee80211com *ic = vap->iv_ic; 2194 struct ifnet *ifp = ic->ic_ifp; 2195 struct ath_softc *sc = ifp->if_softc; 2196 struct ath_hal *ah = sc->sc_ah; 2197 2198 switch (cmd) { 2199 case IEEE80211_IOC_TXPOWER: 2200 /* 2201 * If per-packet TPC is enabled, then we have nothing 2202 * to do; otherwise we need to force the global limit. 2203 * All this can happen directly; no need to reset. 2204 */ 2205 if (!ath_hal_gettpc(ah)) 2206 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 2207 return 0; 2208 } 2209 /* XXX? Full or NOLOSS? */ 2210 return ath_reset(ifp, ATH_RESET_FULL); 2211 } 2212 2213 struct ath_buf * 2214 _ath_getbuf_locked(struct ath_softc *sc) 2215 { 2216 struct ath_buf *bf; 2217 2218 ATH_TXBUF_LOCK_ASSERT(sc); 2219 2220 bf = TAILQ_FIRST(&sc->sc_txbuf); 2221 if (bf == NULL) { 2222 sc->sc_stats.ast_tx_getnobuf++; 2223 } else { 2224 if (bf->bf_flags & ATH_BUF_BUSY) { 2225 sc->sc_stats.ast_tx_getbusybuf++; 2226 bf = NULL; 2227 } 2228 } 2229 2230 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) 2231 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 2232 else 2233 bf = NULL; 2234 2235 if (bf == NULL) { 2236 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, 2237 TAILQ_FIRST(&sc->sc_txbuf) == NULL ? 2238 "out of xmit buffers" : "xmit buffer busy"); 2239 return NULL; 2240 } 2241 2242 /* Valid bf here; clear some basic fields */ 2243 bf->bf_next = NULL; /* XXX just to be sure */ 2244 bf->bf_last = NULL; /* XXX again, just to be sure */ 2245 bf->bf_comp = NULL; /* XXX again, just to be sure */ 2246 bzero(&bf->bf_state, sizeof(bf->bf_state)); 2247 2248 return bf; 2249 } 2250 2251 /* 2252 * When retrying a software frame, buffers marked ATH_BUF_BUSY 2253 * can't be thrown back on the queue as they could still be 2254 * in use by the hardware. 2255 * 2256 * This duplicates the buffer, or returns NULL. 2257 * 2258 * The descriptor is also copied but the link pointers and 2259 * the DMA segments aren't copied; this frame should thus 2260 * be again passed through the descriptor setup/chain routines 2261 * so the link is correct. 2262 * 2263 * The caller must free the buffer using ath_freebuf(). 2264 * 2265 * XXX TODO: this call shouldn't fail as it'll cause packet loss 2266 * XXX in the TX pathway when retries are needed. 2267 * XXX Figure out how to keep some buffers free, or factor the 2268 * XXX number of busy buffers into the xmit path (ath_start()) 2269 * XXX so we don't over-commit. 2270 */ 2271 struct ath_buf * 2272 ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf) 2273 { 2274 struct ath_buf *tbf; 2275 2276 tbf = ath_getbuf(sc); 2277 if (tbf == NULL) 2278 return NULL; /* XXX failure? Why? */ 2279 2280 /* Copy basics */ 2281 tbf->bf_next = NULL; 2282 tbf->bf_nseg = bf->bf_nseg; 2283 tbf->bf_flags = bf->bf_flags & ~ATH_BUF_BUSY; 2284 tbf->bf_status = bf->bf_status; 2285 tbf->bf_m = bf->bf_m; 2286 tbf->bf_node = bf->bf_node; 2287 /* will be setup by the chain/setup function */ 2288 tbf->bf_lastds = NULL; 2289 /* for now, last == self */ 2290 tbf->bf_last = tbf; 2291 tbf->bf_comp = bf->bf_comp; 2292 2293 /* NOTE: DMA segments will be setup by the setup/chain functions */ 2294 2295 /* The caller has to re-init the descriptor + links */ 2296 2297 /* Copy state */ 2298 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state)); 2299 2300 return tbf; 2301 } 2302 2303 struct ath_buf * 2304 ath_getbuf(struct ath_softc *sc) 2305 { 2306 struct ath_buf *bf; 2307 2308 ATH_TXBUF_LOCK(sc); 2309 bf = _ath_getbuf_locked(sc); 2310 ATH_TXBUF_UNLOCK(sc); 2311 if (bf == NULL) { 2312 struct ifnet *ifp = sc->sc_ifp; 2313 2314 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); 2315 sc->sc_stats.ast_tx_qstop++; 2316 IF_LOCK(&ifp->if_snd); 2317 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2318 IF_UNLOCK(&ifp->if_snd); 2319 } 2320 return bf; 2321 } 2322 2323 static void 2324 ath_start(struct ifnet *ifp) 2325 { 2326 struct ath_softc *sc = ifp->if_softc; 2327 struct ieee80211_node *ni; 2328 struct ath_buf *bf; 2329 struct mbuf *m, *next; 2330 ath_bufhead frags; 2331 2332 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) 2333 return; 2334 2335 /* XXX is it ok to hold the ATH_LOCK here? */ 2336 ATH_PCU_LOCK(sc); 2337 if (sc->sc_inreset_cnt > 0) { 2338 device_printf(sc->sc_dev, 2339 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2340 ATH_PCU_UNLOCK(sc); 2341 IF_LOCK(&ifp->if_snd); 2342 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2343 IF_UNLOCK(&ifp->if_snd); 2344 return; 2345 } 2346 sc->sc_txstart_cnt++; 2347 ATH_PCU_UNLOCK(sc); 2348 2349 for (;;) { 2350 /* 2351 * Grab a TX buffer and associated resources. 2352 */ 2353 bf = ath_getbuf(sc); 2354 if (bf == NULL) 2355 break; 2356 2357 IFQ_DEQUEUE(&ifp->if_snd, m); 2358 if (m == NULL) { 2359 ATH_TXBUF_LOCK(sc); 2360 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 2361 ATH_TXBUF_UNLOCK(sc); 2362 break; 2363 } 2364 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 2365 /* 2366 * Check for fragmentation. If this frame 2367 * has been broken up verify we have enough 2368 * buffers to send all the fragments so all 2369 * go out or none... 2370 */ 2371 TAILQ_INIT(&frags); 2372 if ((m->m_flags & M_FRAG) && 2373 !ath_txfrag_setup(sc, &frags, m, ni)) { 2374 DPRINTF(sc, ATH_DEBUG_XMIT, 2375 "%s: out of txfrag buffers\n", __func__); 2376 sc->sc_stats.ast_tx_nofrag++; 2377 ifp->if_oerrors++; 2378 ath_freetx(m); 2379 goto bad; 2380 } 2381 ifp->if_opackets++; 2382 nextfrag: 2383 /* 2384 * Pass the frame to the h/w for transmission. 2385 * Fragmented frames have each frag chained together 2386 * with m_nextpkt. We know there are sufficient ath_buf's 2387 * to send all the frags because of work done by 2388 * ath_txfrag_setup. We leave m_nextpkt set while 2389 * calling ath_tx_start so it can use it to extend the 2390 * the tx duration to cover the subsequent frag and 2391 * so it can reclaim all the mbufs in case of an error; 2392 * ath_tx_start clears m_nextpkt once it commits to 2393 * handing the frame to the hardware. 2394 */ 2395 next = m->m_nextpkt; 2396 if (ath_tx_start(sc, ni, bf, m)) { 2397 bad: 2398 ifp->if_oerrors++; 2399 reclaim: 2400 bf->bf_m = NULL; 2401 bf->bf_node = NULL; 2402 ATH_TXBUF_LOCK(sc); 2403 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 2404 ath_txfrag_cleanup(sc, &frags, ni); 2405 ATH_TXBUF_UNLOCK(sc); 2406 if (ni != NULL) 2407 ieee80211_free_node(ni); 2408 continue; 2409 } 2410 if (next != NULL) { 2411 /* 2412 * Beware of state changing between frags. 2413 * XXX check sta power-save state? 2414 */ 2415 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { 2416 DPRINTF(sc, ATH_DEBUG_XMIT, 2417 "%s: flush fragmented packet, state %s\n", 2418 __func__, 2419 ieee80211_state_name[ni->ni_vap->iv_state]); 2420 ath_freetx(next); 2421 goto reclaim; 2422 } 2423 m = next; 2424 bf = TAILQ_FIRST(&frags); 2425 KASSERT(bf != NULL, ("no buf for txfrag")); 2426 TAILQ_REMOVE(&frags, bf, bf_list); 2427 goto nextfrag; 2428 } 2429 2430 sc->sc_wd_timer = 5; 2431 } 2432 2433 ATH_PCU_LOCK(sc); 2434 sc->sc_txstart_cnt--; 2435 ATH_PCU_UNLOCK(sc); 2436 } 2437 2438 static int 2439 ath_media_change(struct ifnet *ifp) 2440 { 2441 int error = ieee80211_media_change(ifp); 2442 /* NB: only the fixed rate can change and that doesn't need a reset */ 2443 return (error == ENETRESET ? 0 : error); 2444 } 2445 2446 /* 2447 * Block/unblock tx+rx processing while a key change is done. 2448 * We assume the caller serializes key management operations 2449 * so we only need to worry about synchronization with other 2450 * uses that originate in the driver. 2451 */ 2452 static void 2453 ath_key_update_begin(struct ieee80211vap *vap) 2454 { 2455 struct ifnet *ifp = vap->iv_ic->ic_ifp; 2456 struct ath_softc *sc = ifp->if_softc; 2457 2458 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2459 taskqueue_block(sc->sc_tq); 2460 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ 2461 } 2462 2463 static void 2464 ath_key_update_end(struct ieee80211vap *vap) 2465 { 2466 struct ifnet *ifp = vap->iv_ic->ic_ifp; 2467 struct ath_softc *sc = ifp->if_softc; 2468 2469 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2470 IF_UNLOCK(&ifp->if_snd); 2471 taskqueue_unblock(sc->sc_tq); 2472 } 2473 2474 /* 2475 * Calculate the receive filter according to the 2476 * operating mode and state: 2477 * 2478 * o always accept unicast, broadcast, and multicast traffic 2479 * o accept PHY error frames when hardware doesn't have MIB support 2480 * to count and we need them for ANI (sta mode only until recently) 2481 * and we are not scanning (ANI is disabled) 2482 * NB: older hal's add rx filter bits out of sight and we need to 2483 * blindly preserve them 2484 * o probe request frames are accepted only when operating in 2485 * hostap, adhoc, mesh, or monitor modes 2486 * o enable promiscuous mode 2487 * - when in monitor mode 2488 * - if interface marked PROMISC (assumes bridge setting is filtered) 2489 * o accept beacons: 2490 * - when operating in station mode for collecting rssi data when 2491 * the station is otherwise quiet, or 2492 * - when operating in adhoc mode so the 802.11 layer creates 2493 * node table entries for peers, 2494 * - when scanning 2495 * - when doing s/w beacon miss (e.g. for ap+sta) 2496 * - when operating in ap mode in 11g to detect overlapping bss that 2497 * require protection 2498 * - when operating in mesh mode to detect neighbors 2499 * o accept control frames: 2500 * - when in monitor mode 2501 * XXX HT protection for 11n 2502 */ 2503 static u_int32_t 2504 ath_calcrxfilter(struct ath_softc *sc) 2505 { 2506 struct ifnet *ifp = sc->sc_ifp; 2507 struct ieee80211com *ic = ifp->if_l2com; 2508 u_int32_t rfilt; 2509 2510 rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; 2511 if (!sc->sc_needmib && !sc->sc_scanning) 2512 rfilt |= HAL_RX_FILTER_PHYERR; 2513 if (ic->ic_opmode != IEEE80211_M_STA) 2514 rfilt |= HAL_RX_FILTER_PROBEREQ; 2515 /* XXX ic->ic_monvaps != 0? */ 2516 if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC)) 2517 rfilt |= HAL_RX_FILTER_PROM; 2518 if (ic->ic_opmode == IEEE80211_M_STA || 2519 ic->ic_opmode == IEEE80211_M_IBSS || 2520 sc->sc_swbmiss || sc->sc_scanning) 2521 rfilt |= HAL_RX_FILTER_BEACON; 2522 /* 2523 * NB: We don't recalculate the rx filter when 2524 * ic_protmode changes; otherwise we could do 2525 * this only when ic_protmode != NONE. 2526 */ 2527 if (ic->ic_opmode == IEEE80211_M_HOSTAP && 2528 IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) 2529 rfilt |= HAL_RX_FILTER_BEACON; 2530 2531 /* 2532 * Enable hardware PS-POLL RX only for hostap mode; 2533 * STA mode sends PS-POLL frames but never 2534 * receives them. 2535 */ 2536 if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PSPOLL, 2537 0, NULL) == HAL_OK && 2538 ic->ic_opmode == IEEE80211_M_HOSTAP) 2539 rfilt |= HAL_RX_FILTER_PSPOLL; 2540 2541 if (sc->sc_nmeshvaps) { 2542 rfilt |= HAL_RX_FILTER_BEACON; 2543 if (sc->sc_hasbmatch) 2544 rfilt |= HAL_RX_FILTER_BSSID; 2545 else 2546 rfilt |= HAL_RX_FILTER_PROM; 2547 } 2548 if (ic->ic_opmode == IEEE80211_M_MONITOR) 2549 rfilt |= HAL_RX_FILTER_CONTROL; 2550 2551 /* 2552 * Enable RX of compressed BAR frames only when doing 2553 * 802.11n. Required for A-MPDU. 2554 */ 2555 if (IEEE80211_IS_CHAN_HT(ic->ic_curchan)) 2556 rfilt |= HAL_RX_FILTER_COMPBAR; 2557 2558 /* 2559 * Enable radar PHY errors if requested by the 2560 * DFS module. 2561 */ 2562 if (sc->sc_dodfs) 2563 rfilt |= HAL_RX_FILTER_PHYRADAR; 2564 2565 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n", 2566 __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags); 2567 return rfilt; 2568 } 2569 2570 static void 2571 ath_update_promisc(struct ifnet *ifp) 2572 { 2573 struct ath_softc *sc = ifp->if_softc; 2574 u_int32_t rfilt; 2575 2576 /* configure rx filter */ 2577 rfilt = ath_calcrxfilter(sc); 2578 ath_hal_setrxfilter(sc->sc_ah, rfilt); 2579 2580 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); 2581 } 2582 2583 static void 2584 ath_update_mcast(struct ifnet *ifp) 2585 { 2586 struct ath_softc *sc = ifp->if_softc; 2587 u_int32_t mfilt[2]; 2588 2589 /* calculate and install multicast filter */ 2590 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 2591 struct ifmultiaddr *ifma; 2592 /* 2593 * Merge multicast addresses to form the hardware filter. 2594 */ 2595 mfilt[0] = mfilt[1] = 0; 2596 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */ 2597 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2598 caddr_t dl; 2599 u_int32_t val; 2600 u_int8_t pos; 2601 2602 /* calculate XOR of eight 6bit values */ 2603 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 2604 val = LE_READ_4(dl + 0); 2605 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2606 val = LE_READ_4(dl + 3); 2607 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2608 pos &= 0x3f; 2609 mfilt[pos / 32] |= (1 << (pos % 32)); 2610 } 2611 if_maddr_runlock(ifp); 2612 } else 2613 mfilt[0] = mfilt[1] = ~0; 2614 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); 2615 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", 2616 __func__, mfilt[0], mfilt[1]); 2617 } 2618 2619 static void 2620 ath_mode_init(struct ath_softc *sc) 2621 { 2622 struct ifnet *ifp = sc->sc_ifp; 2623 struct ath_hal *ah = sc->sc_ah; 2624 u_int32_t rfilt; 2625 2626 /* configure rx filter */ 2627 rfilt = ath_calcrxfilter(sc); 2628 ath_hal_setrxfilter(ah, rfilt); 2629 2630 /* configure operational mode */ 2631 ath_hal_setopmode(ah); 2632 2633 /* handle any link-level address change */ 2634 ath_hal_setmac(ah, IF_LLADDR(ifp)); 2635 2636 /* calculate and install multicast filter */ 2637 ath_update_mcast(ifp); 2638 } 2639 2640 /* 2641 * Set the slot time based on the current setting. 2642 */ 2643 static void 2644 ath_setslottime(struct ath_softc *sc) 2645 { 2646 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2647 struct ath_hal *ah = sc->sc_ah; 2648 u_int usec; 2649 2650 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 2651 usec = 13; 2652 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 2653 usec = 21; 2654 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 2655 /* honor short/long slot time only in 11g */ 2656 /* XXX shouldn't honor on pure g or turbo g channel */ 2657 if (ic->ic_flags & IEEE80211_F_SHSLOT) 2658 usec = HAL_SLOT_TIME_9; 2659 else 2660 usec = HAL_SLOT_TIME_20; 2661 } else 2662 usec = HAL_SLOT_TIME_9; 2663 2664 DPRINTF(sc, ATH_DEBUG_RESET, 2665 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 2666 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 2667 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 2668 2669 ath_hal_setslottime(ah, usec); 2670 sc->sc_updateslot = OK; 2671 } 2672 2673 /* 2674 * Callback from the 802.11 layer to update the 2675 * slot time based on the current setting. 2676 */ 2677 static void 2678 ath_updateslot(struct ifnet *ifp) 2679 { 2680 struct ath_softc *sc = ifp->if_softc; 2681 struct ieee80211com *ic = ifp->if_l2com; 2682 2683 /* 2684 * When not coordinating the BSS, change the hardware 2685 * immediately. For other operation we defer the change 2686 * until beacon updates have propagated to the stations. 2687 */ 2688 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 2689 ic->ic_opmode == IEEE80211_M_MBSS) 2690 sc->sc_updateslot = UPDATE; 2691 else 2692 ath_setslottime(sc); 2693 } 2694 2695 /* 2696 * Setup a h/w transmit queue for beacons. 2697 */ 2698 static int 2699 ath_beaconq_setup(struct ath_hal *ah) 2700 { 2701 HAL_TXQ_INFO qi; 2702 2703 memset(&qi, 0, sizeof(qi)); 2704 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 2705 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 2706 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 2707 /* NB: for dynamic turbo, don't enable any other interrupts */ 2708 qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE; 2709 return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi); 2710 } 2711 2712 /* 2713 * Setup the transmit queue parameters for the beacon queue. 2714 */ 2715 static int 2716 ath_beaconq_config(struct ath_softc *sc) 2717 { 2718 #define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1) 2719 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2720 struct ath_hal *ah = sc->sc_ah; 2721 HAL_TXQ_INFO qi; 2722 2723 ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi); 2724 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 2725 ic->ic_opmode == IEEE80211_M_MBSS) { 2726 /* 2727 * Always burst out beacon and CAB traffic. 2728 */ 2729 qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT; 2730 qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT; 2731 qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT; 2732 } else { 2733 struct wmeParams *wmep = 2734 &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE]; 2735 /* 2736 * Adhoc mode; important thing is to use 2x cwmin. 2737 */ 2738 qi.tqi_aifs = wmep->wmep_aifsn; 2739 qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 2740 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 2741 } 2742 2743 if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) { 2744 device_printf(sc->sc_dev, "unable to update parameters for " 2745 "beacon hardware queue!\n"); 2746 return 0; 2747 } else { 2748 ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */ 2749 return 1; 2750 } 2751 #undef ATH_EXPONENT_TO_VALUE 2752 } 2753 2754 /* 2755 * Allocate and setup an initial beacon frame. 2756 */ 2757 static int 2758 ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) 2759 { 2760 struct ieee80211vap *vap = ni->ni_vap; 2761 struct ath_vap *avp = ATH_VAP(vap); 2762 struct ath_buf *bf; 2763 struct mbuf *m; 2764 int error; 2765 2766 bf = avp->av_bcbuf; 2767 DPRINTF(sc, ATH_DEBUG_NODE, "%s: bf_m=%p, bf_node=%p\n", 2768 __func__, bf->bf_m, bf->bf_node); 2769 if (bf->bf_m != NULL) { 2770 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2771 m_freem(bf->bf_m); 2772 bf->bf_m = NULL; 2773 } 2774 if (bf->bf_node != NULL) { 2775 ieee80211_free_node(bf->bf_node); 2776 bf->bf_node = NULL; 2777 } 2778 2779 /* 2780 * NB: the beacon data buffer must be 32-bit aligned; 2781 * we assume the mbuf routines will return us something 2782 * with this alignment (perhaps should assert). 2783 */ 2784 m = ieee80211_beacon_alloc(ni, &avp->av_boff); 2785 if (m == NULL) { 2786 device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__); 2787 sc->sc_stats.ast_be_nombuf++; 2788 return ENOMEM; 2789 } 2790 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 2791 bf->bf_segs, &bf->bf_nseg, 2792 BUS_DMA_NOWAIT); 2793 if (error != 0) { 2794 device_printf(sc->sc_dev, 2795 "%s: cannot map mbuf, bus_dmamap_load_mbuf_sg returns %d\n", 2796 __func__, error); 2797 m_freem(m); 2798 return error; 2799 } 2800 2801 /* 2802 * Calculate a TSF adjustment factor required for staggered 2803 * beacons. Note that we assume the format of the beacon 2804 * frame leaves the tstamp field immediately following the 2805 * header. 2806 */ 2807 if (sc->sc_stagbeacons && avp->av_bslot > 0) { 2808 uint64_t tsfadjust; 2809 struct ieee80211_frame *wh; 2810 2811 /* 2812 * The beacon interval is in TU's; the TSF is in usecs. 2813 * We figure out how many TU's to add to align the timestamp 2814 * then convert to TSF units and handle byte swapping before 2815 * inserting it in the frame. The hardware will then add this 2816 * each time a beacon frame is sent. Note that we align vap's 2817 * 1..N and leave vap 0 untouched. This means vap 0 has a 2818 * timestamp in one beacon interval while the others get a 2819 * timstamp aligned to the next interval. 2820 */ 2821 tsfadjust = ni->ni_intval * 2822 (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF; 2823 tsfadjust = htole64(tsfadjust << 10); /* TU -> TSF */ 2824 2825 DPRINTF(sc, ATH_DEBUG_BEACON, 2826 "%s: %s beacons bslot %d intval %u tsfadjust %llu\n", 2827 __func__, sc->sc_stagbeacons ? "stagger" : "burst", 2828 avp->av_bslot, ni->ni_intval, 2829 (long long unsigned) le64toh(tsfadjust)); 2830 2831 wh = mtod(m, struct ieee80211_frame *); 2832 memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust)); 2833 } 2834 bf->bf_m = m; 2835 bf->bf_node = ieee80211_ref_node(ni); 2836 2837 return 0; 2838 } 2839 2840 /* 2841 * Setup the beacon frame for transmit. 2842 */ 2843 static void 2844 ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf) 2845 { 2846 #define USE_SHPREAMBLE(_ic) \ 2847 (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\ 2848 == IEEE80211_F_SHPREAMBLE) 2849 struct ieee80211_node *ni = bf->bf_node; 2850 struct ieee80211com *ic = ni->ni_ic; 2851 struct mbuf *m = bf->bf_m; 2852 struct ath_hal *ah = sc->sc_ah; 2853 struct ath_desc *ds; 2854 int flags, antenna; 2855 const HAL_RATE_TABLE *rt; 2856 u_int8_t rix, rate; 2857 2858 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n", 2859 __func__, m, m->m_len); 2860 2861 /* setup descriptors */ 2862 ds = bf->bf_desc; 2863 bf->bf_last = bf; 2864 bf->bf_lastds = ds; 2865 2866 flags = HAL_TXDESC_NOACK; 2867 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) { 2868 ds->ds_link = bf->bf_daddr; /* self-linked */ 2869 flags |= HAL_TXDESC_VEOL; 2870 /* 2871 * Let hardware handle antenna switching. 2872 */ 2873 antenna = sc->sc_txantenna; 2874 } else { 2875 ds->ds_link = 0; 2876 /* 2877 * Switch antenna every 4 beacons. 2878 * XXX assumes two antenna 2879 */ 2880 if (sc->sc_txantenna != 0) 2881 antenna = sc->sc_txantenna; 2882 else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0) 2883 antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1); 2884 else 2885 antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1); 2886 } 2887 2888 KASSERT(bf->bf_nseg == 1, 2889 ("multi-segment beacon frame; nseg %u", bf->bf_nseg)); 2890 ds->ds_data = bf->bf_segs[0].ds_addr; 2891 /* 2892 * Calculate rate code. 2893 * XXX everything at min xmit rate 2894 */ 2895 rix = 0; 2896 rt = sc->sc_currates; 2897 rate = rt->info[rix].rateCode; 2898 if (USE_SHPREAMBLE(ic)) 2899 rate |= rt->info[rix].shortPreamble; 2900 ath_hal_setuptxdesc(ah, ds 2901 , m->m_len + IEEE80211_CRC_LEN /* frame length */ 2902 , sizeof(struct ieee80211_frame)/* header length */ 2903 , HAL_PKT_TYPE_BEACON /* Atheros packet type */ 2904 , ni->ni_txpower /* txpower XXX */ 2905 , rate, 1 /* series 0 rate/tries */ 2906 , HAL_TXKEYIX_INVALID /* no encryption */ 2907 , antenna /* antenna mode */ 2908 , flags /* no ack, veol for beacons */ 2909 , 0 /* rts/cts rate */ 2910 , 0 /* rts/cts duration */ 2911 ); 2912 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 2913 ath_hal_filltxdesc(ah, ds 2914 , roundup(m->m_len, 4) /* buffer length */ 2915 , AH_TRUE /* first segment */ 2916 , AH_TRUE /* last segment */ 2917 , ds /* first descriptor */ 2918 ); 2919 #if 0 2920 ath_desc_swap(ds); 2921 #endif 2922 #undef USE_SHPREAMBLE 2923 } 2924 2925 static void 2926 ath_beacon_update(struct ieee80211vap *vap, int item) 2927 { 2928 struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff; 2929 2930 setbit(bo->bo_flags, item); 2931 } 2932 2933 /* 2934 * Append the contents of src to dst; both queues 2935 * are assumed to be locked. 2936 */ 2937 static void 2938 ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 2939 { 2940 2941 ATH_TXQ_LOCK_ASSERT(dst); 2942 ATH_TXQ_LOCK_ASSERT(src); 2943 2944 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); 2945 dst->axq_link = src->axq_link; 2946 src->axq_link = NULL; 2947 dst->axq_depth += src->axq_depth; 2948 dst->axq_aggr_depth += src->axq_aggr_depth; 2949 src->axq_depth = 0; 2950 src->axq_aggr_depth = 0; 2951 } 2952 2953 /* 2954 * Transmit a beacon frame at SWBA. Dynamic updates to the 2955 * frame contents are done as needed and the slot time is 2956 * also adjusted based on current state. 2957 */ 2958 static void 2959 ath_beacon_proc(void *arg, int pending) 2960 { 2961 struct ath_softc *sc = arg; 2962 struct ath_hal *ah = sc->sc_ah; 2963 struct ieee80211vap *vap; 2964 struct ath_buf *bf; 2965 int slot, otherant; 2966 uint32_t bfaddr; 2967 2968 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n", 2969 __func__, pending); 2970 /* 2971 * Check if the previous beacon has gone out. If 2972 * not don't try to post another, skip this period 2973 * and wait for the next. Missed beacons indicate 2974 * a problem and should not occur. If we miss too 2975 * many consecutive beacons reset the device. 2976 */ 2977 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { 2978 sc->sc_bmisscount++; 2979 sc->sc_stats.ast_be_missed++; 2980 DPRINTF(sc, ATH_DEBUG_BEACON, 2981 "%s: missed %u consecutive beacons\n", 2982 __func__, sc->sc_bmisscount); 2983 if (sc->sc_bmisscount >= ath_bstuck_threshold) 2984 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask); 2985 return; 2986 } 2987 if (sc->sc_bmisscount != 0) { 2988 DPRINTF(sc, ATH_DEBUG_BEACON, 2989 "%s: resume beacon xmit after %u misses\n", 2990 __func__, sc->sc_bmisscount); 2991 sc->sc_bmisscount = 0; 2992 } 2993 2994 if (sc->sc_stagbeacons) { /* staggered beacons */ 2995 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2996 uint32_t tsftu; 2997 2998 tsftu = ath_hal_gettsf32(ah) >> 10; 2999 /* XXX lintval */ 3000 slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval; 3001 vap = sc->sc_bslot[(slot+1) % ATH_BCBUF]; 3002 bfaddr = 0; 3003 if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) { 3004 bf = ath_beacon_generate(sc, vap); 3005 if (bf != NULL) 3006 bfaddr = bf->bf_daddr; 3007 } 3008 } else { /* burst'd beacons */ 3009 uint32_t *bflink = &bfaddr; 3010 3011 for (slot = 0; slot < ATH_BCBUF; slot++) { 3012 vap = sc->sc_bslot[slot]; 3013 if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) { 3014 bf = ath_beacon_generate(sc, vap); 3015 if (bf != NULL) { 3016 *bflink = bf->bf_daddr; 3017 bflink = &bf->bf_desc->ds_link; 3018 } 3019 } 3020 } 3021 *bflink = 0; /* terminate list */ 3022 } 3023 3024 /* 3025 * Handle slot time change when a non-ERP station joins/leaves 3026 * an 11g network. The 802.11 layer notifies us via callback, 3027 * we mark updateslot, then wait one beacon before effecting 3028 * the change. This gives associated stations at least one 3029 * beacon interval to note the state change. 3030 */ 3031 /* XXX locking */ 3032 if (sc->sc_updateslot == UPDATE) { 3033 sc->sc_updateslot = COMMIT; /* commit next beacon */ 3034 sc->sc_slotupdate = slot; 3035 } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot) 3036 ath_setslottime(sc); /* commit change to h/w */ 3037 3038 /* 3039 * Check recent per-antenna transmit statistics and flip 3040 * the default antenna if noticeably more frames went out 3041 * on the non-default antenna. 3042 * XXX assumes 2 anntenae 3043 */ 3044 if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) { 3045 otherant = sc->sc_defant & 1 ? 2 : 1; 3046 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) 3047 ath_setdefantenna(sc, otherant); 3048 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; 3049 } 3050 3051 if (bfaddr != 0) { 3052 /* 3053 * Stop any current dma and put the new frame on the queue. 3054 * This should never fail since we check above that no frames 3055 * are still pending on the queue. 3056 */ 3057 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { 3058 DPRINTF(sc, ATH_DEBUG_ANY, 3059 "%s: beacon queue %u did not stop?\n", 3060 __func__, sc->sc_bhalq); 3061 } 3062 /* NB: cabq traffic should already be queued and primed */ 3063 ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr); 3064 ath_hal_txstart(ah, sc->sc_bhalq); 3065 3066 sc->sc_stats.ast_be_xmit++; 3067 } 3068 } 3069 3070 static struct ath_buf * 3071 ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap) 3072 { 3073 struct ath_vap *avp = ATH_VAP(vap); 3074 struct ath_txq *cabq = sc->sc_cabq; 3075 struct ath_buf *bf; 3076 struct mbuf *m; 3077 int nmcastq, error; 3078 3079 KASSERT(vap->iv_state >= IEEE80211_S_RUN, 3080 ("not running, state %d", vap->iv_state)); 3081 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer")); 3082 3083 /* 3084 * Update dynamic beacon contents. If this returns 3085 * non-zero then we need to remap the memory because 3086 * the beacon frame changed size (probably because 3087 * of the TIM bitmap). 3088 */ 3089 bf = avp->av_bcbuf; 3090 m = bf->bf_m; 3091 /* XXX lock mcastq? */ 3092 nmcastq = avp->av_mcastq.axq_depth; 3093 3094 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) { 3095 /* XXX too conservative? */ 3096 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3097 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 3098 bf->bf_segs, &bf->bf_nseg, 3099 BUS_DMA_NOWAIT); 3100 if (error != 0) { 3101 if_printf(vap->iv_ifp, 3102 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n", 3103 __func__, error); 3104 return NULL; 3105 } 3106 } 3107 if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) { 3108 DPRINTF(sc, ATH_DEBUG_BEACON, 3109 "%s: cabq did not drain, mcastq %u cabq %u\n", 3110 __func__, nmcastq, cabq->axq_depth); 3111 sc->sc_stats.ast_cabq_busy++; 3112 if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) { 3113 /* 3114 * CABQ traffic from a previous vap is still pending. 3115 * We must drain the q before this beacon frame goes 3116 * out as otherwise this vap's stations will get cab 3117 * frames from a different vap. 3118 * XXX could be slow causing us to miss DBA 3119 */ 3120 ath_tx_draintxq(sc, cabq); 3121 } 3122 } 3123 ath_beacon_setup(sc, bf); 3124 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 3125 3126 /* 3127 * Enable the CAB queue before the beacon queue to 3128 * insure cab frames are triggered by this beacon. 3129 */ 3130 if (avp->av_boff.bo_tim[4] & 1) { 3131 struct ath_hal *ah = sc->sc_ah; 3132 3133 /* NB: only at DTIM */ 3134 ATH_TXQ_LOCK(cabq); 3135 ATH_TXQ_LOCK(&avp->av_mcastq); 3136 if (nmcastq) { 3137 struct ath_buf *bfm; 3138 3139 /* 3140 * Move frames from the s/w mcast q to the h/w cab q. 3141 * XXX MORE_DATA bit 3142 */ 3143 bfm = TAILQ_FIRST(&avp->av_mcastq.axq_q); 3144 if (cabq->axq_link != NULL) { 3145 *cabq->axq_link = bfm->bf_daddr; 3146 } else 3147 ath_hal_puttxbuf(ah, cabq->axq_qnum, 3148 bfm->bf_daddr); 3149 ath_txqmove(cabq, &avp->av_mcastq); 3150 3151 sc->sc_stats.ast_cabq_xmit += nmcastq; 3152 } 3153 /* NB: gated by beacon so safe to start here */ 3154 if (! TAILQ_EMPTY(&(cabq->axq_q))) 3155 ath_hal_txstart(ah, cabq->axq_qnum); 3156 ATH_TXQ_UNLOCK(&avp->av_mcastq); 3157 ATH_TXQ_UNLOCK(cabq); 3158 } 3159 return bf; 3160 } 3161 3162 static void 3163 ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap) 3164 { 3165 struct ath_vap *avp = ATH_VAP(vap); 3166 struct ath_hal *ah = sc->sc_ah; 3167 struct ath_buf *bf; 3168 struct mbuf *m; 3169 int error; 3170 3171 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer")); 3172 3173 /* 3174 * Update dynamic beacon contents. If this returns 3175 * non-zero then we need to remap the memory because 3176 * the beacon frame changed size (probably because 3177 * of the TIM bitmap). 3178 */ 3179 bf = avp->av_bcbuf; 3180 m = bf->bf_m; 3181 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) { 3182 /* XXX too conservative? */ 3183 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3184 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 3185 bf->bf_segs, &bf->bf_nseg, 3186 BUS_DMA_NOWAIT); 3187 if (error != 0) { 3188 if_printf(vap->iv_ifp, 3189 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n", 3190 __func__, error); 3191 return; 3192 } 3193 } 3194 ath_beacon_setup(sc, bf); 3195 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 3196 3197 /* NB: caller is known to have already stopped tx dma */ 3198 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); 3199 ath_hal_txstart(ah, sc->sc_bhalq); 3200 } 3201 3202 /* 3203 * Reset the hardware, with no loss. 3204 * 3205 * This can't be used for a general case reset. 3206 */ 3207 static void 3208 ath_reset_proc(void *arg, int pending) 3209 { 3210 struct ath_softc *sc = arg; 3211 struct ifnet *ifp = sc->sc_ifp; 3212 3213 #if 0 3214 if_printf(ifp, "%s: resetting\n", __func__); 3215 #endif 3216 ath_reset(ifp, ATH_RESET_NOLOSS); 3217 } 3218 3219 /* 3220 * Reset the hardware after detecting beacons have stopped. 3221 */ 3222 static void 3223 ath_bstuck_proc(void *arg, int pending) 3224 { 3225 struct ath_softc *sc = arg; 3226 struct ifnet *ifp = sc->sc_ifp; 3227 uint32_t hangs = 0; 3228 3229 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) 3230 if_printf(ifp, "bb hang detected (0x%x)\n", hangs); 3231 3232 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 3233 sc->sc_bmisscount); 3234 sc->sc_stats.ast_bstuck++; 3235 /* 3236 * This assumes that there's no simultaneous channel mode change 3237 * occuring. 3238 */ 3239 ath_reset(ifp, ATH_RESET_NOLOSS); 3240 } 3241 3242 /* 3243 * Reclaim beacon resources and return buffer to the pool. 3244 */ 3245 static void 3246 ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf) 3247 { 3248 3249 DPRINTF(sc, ATH_DEBUG_NODE, "%s: free bf=%p, bf_m=%p, bf_node=%p\n", 3250 __func__, bf, bf->bf_m, bf->bf_node); 3251 if (bf->bf_m != NULL) { 3252 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3253 m_freem(bf->bf_m); 3254 bf->bf_m = NULL; 3255 } 3256 if (bf->bf_node != NULL) { 3257 ieee80211_free_node(bf->bf_node); 3258 bf->bf_node = NULL; 3259 } 3260 TAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list); 3261 } 3262 3263 /* 3264 * Reclaim beacon resources. 3265 */ 3266 static void 3267 ath_beacon_free(struct ath_softc *sc) 3268 { 3269 struct ath_buf *bf; 3270 3271 TAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) { 3272 DPRINTF(sc, ATH_DEBUG_NODE, 3273 "%s: free bf=%p, bf_m=%p, bf_node=%p\n", 3274 __func__, bf, bf->bf_m, bf->bf_node); 3275 if (bf->bf_m != NULL) { 3276 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3277 m_freem(bf->bf_m); 3278 bf->bf_m = NULL; 3279 } 3280 if (bf->bf_node != NULL) { 3281 ieee80211_free_node(bf->bf_node); 3282 bf->bf_node = NULL; 3283 } 3284 } 3285 } 3286 3287 /* 3288 * Configure the beacon and sleep timers. 3289 * 3290 * When operating as an AP this resets the TSF and sets 3291 * up the hardware to notify us when we need to issue beacons. 3292 * 3293 * When operating in station mode this sets up the beacon 3294 * timers according to the timestamp of the last received 3295 * beacon and the current TSF, configures PCF and DTIM 3296 * handling, programs the sleep registers so the hardware 3297 * will wakeup in time to receive beacons, and configures 3298 * the beacon miss handling so we'll receive a BMISS 3299 * interrupt when we stop seeing beacons from the AP 3300 * we've associated with. 3301 */ 3302 static void 3303 ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap) 3304 { 3305 #define TSF_TO_TU(_h,_l) \ 3306 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) 3307 #define FUDGE 2 3308 struct ath_hal *ah = sc->sc_ah; 3309 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3310 struct ieee80211_node *ni; 3311 u_int32_t nexttbtt, intval, tsftu; 3312 u_int64_t tsf; 3313 3314 if (vap == NULL) 3315 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */ 3316 ni = ieee80211_ref_node(vap->iv_bss); 3317 3318 /* extract tstamp from last beacon and convert to TU */ 3319 nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4), 3320 LE_READ_4(ni->ni_tstamp.data)); 3321 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 3322 ic->ic_opmode == IEEE80211_M_MBSS) { 3323 /* 3324 * For multi-bss ap/mesh support beacons are either staggered 3325 * evenly over N slots or burst together. For the former 3326 * arrange for the SWBA to be delivered for each slot. 3327 * Slots that are not occupied will generate nothing. 3328 */ 3329 /* NB: the beacon interval is kept internally in TU's */ 3330 intval = ni->ni_intval & HAL_BEACON_PERIOD; 3331 if (sc->sc_stagbeacons) 3332 intval /= ATH_BCBUF; 3333 } else { 3334 /* NB: the beacon interval is kept internally in TU's */ 3335 intval = ni->ni_intval & HAL_BEACON_PERIOD; 3336 } 3337 if (nexttbtt == 0) /* e.g. for ap mode */ 3338 nexttbtt = intval; 3339 else if (intval) /* NB: can be 0 for monitor mode */ 3340 nexttbtt = roundup(nexttbtt, intval); 3341 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n", 3342 __func__, nexttbtt, intval, ni->ni_intval); 3343 if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) { 3344 HAL_BEACON_STATE bs; 3345 int dtimperiod, dtimcount; 3346 int cfpperiod, cfpcount; 3347 3348 /* 3349 * Setup dtim and cfp parameters according to 3350 * last beacon we received (which may be none). 3351 */ 3352 dtimperiod = ni->ni_dtim_period; 3353 if (dtimperiod <= 0) /* NB: 0 if not known */ 3354 dtimperiod = 1; 3355 dtimcount = ni->ni_dtim_count; 3356 if (dtimcount >= dtimperiod) /* NB: sanity check */ 3357 dtimcount = 0; /* XXX? */ 3358 cfpperiod = 1; /* NB: no PCF support yet */ 3359 cfpcount = 0; 3360 /* 3361 * Pull nexttbtt forward to reflect the current 3362 * TSF and calculate dtim+cfp state for the result. 3363 */ 3364 tsf = ath_hal_gettsf64(ah); 3365 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 3366 do { 3367 nexttbtt += intval; 3368 if (--dtimcount < 0) { 3369 dtimcount = dtimperiod - 1; 3370 if (--cfpcount < 0) 3371 cfpcount = cfpperiod - 1; 3372 } 3373 } while (nexttbtt < tsftu); 3374 memset(&bs, 0, sizeof(bs)); 3375 bs.bs_intval = intval; 3376 bs.bs_nexttbtt = nexttbtt; 3377 bs.bs_dtimperiod = dtimperiod*intval; 3378 bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval; 3379 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod; 3380 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod; 3381 bs.bs_cfpmaxduration = 0; 3382 #if 0 3383 /* 3384 * The 802.11 layer records the offset to the DTIM 3385 * bitmap while receiving beacons; use it here to 3386 * enable h/w detection of our AID being marked in 3387 * the bitmap vector (to indicate frames for us are 3388 * pending at the AP). 3389 * XXX do DTIM handling in s/w to WAR old h/w bugs 3390 * XXX enable based on h/w rev for newer chips 3391 */ 3392 bs.bs_timoffset = ni->ni_timoff; 3393 #endif 3394 /* 3395 * Calculate the number of consecutive beacons to miss 3396 * before taking a BMISS interrupt. 3397 * Note that we clamp the result to at most 10 beacons. 3398 */ 3399 bs.bs_bmissthreshold = vap->iv_bmissthreshold; 3400 if (bs.bs_bmissthreshold > 10) 3401 bs.bs_bmissthreshold = 10; 3402 else if (bs.bs_bmissthreshold <= 0) 3403 bs.bs_bmissthreshold = 1; 3404 3405 /* 3406 * Calculate sleep duration. The configuration is 3407 * given in ms. We insure a multiple of the beacon 3408 * period is used. Also, if the sleep duration is 3409 * greater than the DTIM period then it makes senses 3410 * to make it a multiple of that. 3411 * 3412 * XXX fixed at 100ms 3413 */ 3414 bs.bs_sleepduration = 3415 roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval); 3416 if (bs.bs_sleepduration > bs.bs_dtimperiod) 3417 bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod); 3418 3419 DPRINTF(sc, ATH_DEBUG_BEACON, 3420 "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n" 3421 , __func__ 3422 , tsf, tsftu 3423 , bs.bs_intval 3424 , bs.bs_nexttbtt 3425 , bs.bs_dtimperiod 3426 , bs.bs_nextdtim 3427 , bs.bs_bmissthreshold 3428 , bs.bs_sleepduration 3429 , bs.bs_cfpperiod 3430 , bs.bs_cfpmaxduration 3431 , bs.bs_cfpnext 3432 , bs.bs_timoffset 3433 ); 3434 ath_hal_intrset(ah, 0); 3435 ath_hal_beacontimers(ah, &bs); 3436 sc->sc_imask |= HAL_INT_BMISS; 3437 ath_hal_intrset(ah, sc->sc_imask); 3438 } else { 3439 ath_hal_intrset(ah, 0); 3440 if (nexttbtt == intval) 3441 intval |= HAL_BEACON_RESET_TSF; 3442 if (ic->ic_opmode == IEEE80211_M_IBSS) { 3443 /* 3444 * In IBSS mode enable the beacon timers but only 3445 * enable SWBA interrupts if we need to manually 3446 * prepare beacon frames. Otherwise we use a 3447 * self-linked tx descriptor and let the hardware 3448 * deal with things. 3449 */ 3450 intval |= HAL_BEACON_ENA; 3451 if (!sc->sc_hasveol) 3452 sc->sc_imask |= HAL_INT_SWBA; 3453 if ((intval & HAL_BEACON_RESET_TSF) == 0) { 3454 /* 3455 * Pull nexttbtt forward to reflect 3456 * the current TSF. 3457 */ 3458 tsf = ath_hal_gettsf64(ah); 3459 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 3460 do { 3461 nexttbtt += intval; 3462 } while (nexttbtt < tsftu); 3463 } 3464 ath_beaconq_config(sc); 3465 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP || 3466 ic->ic_opmode == IEEE80211_M_MBSS) { 3467 /* 3468 * In AP/mesh mode we enable the beacon timers 3469 * and SWBA interrupts to prepare beacon frames. 3470 */ 3471 intval |= HAL_BEACON_ENA; 3472 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ 3473 ath_beaconq_config(sc); 3474 } 3475 ath_hal_beaconinit(ah, nexttbtt, intval); 3476 sc->sc_bmisscount = 0; 3477 ath_hal_intrset(ah, sc->sc_imask); 3478 /* 3479 * When using a self-linked beacon descriptor in 3480 * ibss mode load it once here. 3481 */ 3482 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) 3483 ath_beacon_start_adhoc(sc, vap); 3484 } 3485 sc->sc_syncbeacon = 0; 3486 ieee80211_free_node(ni); 3487 #undef FUDGE 3488 #undef TSF_TO_TU 3489 } 3490 3491 static void 3492 ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 3493 { 3494 bus_addr_t *paddr = (bus_addr_t*) arg; 3495 KASSERT(error == 0, ("error %u on bus_dma callback", error)); 3496 *paddr = segs->ds_addr; 3497 } 3498 3499 static int 3500 ath_descdma_setup(struct ath_softc *sc, 3501 struct ath_descdma *dd, ath_bufhead *head, 3502 const char *name, int nbuf, int ndesc) 3503 { 3504 #define DS2PHYS(_dd, _ds) \ 3505 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 3506 #define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 3507 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 3508 struct ifnet *ifp = sc->sc_ifp; 3509 uint8_t *ds; 3510 struct ath_buf *bf; 3511 int i, bsize, error; 3512 int desc_len; 3513 3514 desc_len = sizeof(struct ath_desc); 3515 3516 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n", 3517 __func__, name, nbuf, ndesc); 3518 3519 dd->dd_name = name; 3520 dd->dd_desc_len = desc_len * nbuf * ndesc; 3521 3522 /* 3523 * Merlin work-around: 3524 * Descriptors that cross the 4KB boundary can't be used. 3525 * Assume one skipped descriptor per 4KB page. 3526 */ 3527 if (! ath_hal_split4ktrans(sc->sc_ah)) { 3528 int numdescpage = 4096 / (desc_len * ndesc); 3529 dd->dd_desc_len = (nbuf / numdescpage + 1) * 4096; 3530 } 3531 3532 /* 3533 * Setup DMA descriptor area. 3534 */ 3535 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 3536 PAGE_SIZE, 0, /* alignment, bounds */ 3537 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 3538 BUS_SPACE_MAXADDR, /* highaddr */ 3539 NULL, NULL, /* filter, filterarg */ 3540 dd->dd_desc_len, /* maxsize */ 3541 1, /* nsegments */ 3542 dd->dd_desc_len, /* maxsegsize */ 3543 BUS_DMA_ALLOCNOW, /* flags */ 3544 NULL, /* lockfunc */ 3545 NULL, /* lockarg */ 3546 &dd->dd_dmat); 3547 if (error != 0) { 3548 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); 3549 return error; 3550 } 3551 3552 /* allocate descriptors */ 3553 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap); 3554 if (error != 0) { 3555 if_printf(ifp, "unable to create dmamap for %s descriptors, " 3556 "error %u\n", dd->dd_name, error); 3557 goto fail0; 3558 } 3559 3560 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 3561 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 3562 &dd->dd_dmamap); 3563 if (error != 0) { 3564 if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 3565 "error %u\n", nbuf * ndesc, dd->dd_name, error); 3566 goto fail1; 3567 } 3568 3569 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 3570 dd->dd_desc, dd->dd_desc_len, 3571 ath_load_cb, &dd->dd_desc_paddr, 3572 BUS_DMA_NOWAIT); 3573 if (error != 0) { 3574 if_printf(ifp, "unable to map %s descriptors, error %u\n", 3575 dd->dd_name, error); 3576 goto fail2; 3577 } 3578 3579 ds = (uint8_t *) dd->dd_desc; 3580 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 3581 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, 3582 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); 3583 3584 /* allocate rx buffers */ 3585 bsize = sizeof(struct ath_buf) * nbuf; 3586 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 3587 if (bf == NULL) { 3588 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 3589 dd->dd_name, bsize); 3590 goto fail3; 3591 } 3592 dd->dd_bufptr = bf; 3593 3594 TAILQ_INIT(head); 3595 for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * desc_len)) { 3596 bf->bf_desc = (struct ath_desc *) ds; 3597 bf->bf_daddr = DS2PHYS(dd, ds); 3598 if (! ath_hal_split4ktrans(sc->sc_ah)) { 3599 /* 3600 * Merlin WAR: Skip descriptor addresses which 3601 * cause 4KB boundary crossing along any point 3602 * in the descriptor. 3603 */ 3604 if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr, 3605 desc_len * ndesc)) { 3606 /* Start at the next page */ 3607 ds += 0x1000 - (bf->bf_daddr & 0xFFF); 3608 bf->bf_desc = (struct ath_desc *) ds; 3609 bf->bf_daddr = DS2PHYS(dd, ds); 3610 } 3611 } 3612 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 3613 &bf->bf_dmamap); 3614 if (error != 0) { 3615 if_printf(ifp, "unable to create dmamap for %s " 3616 "buffer %u, error %u\n", dd->dd_name, i, error); 3617 ath_descdma_cleanup(sc, dd, head); 3618 return error; 3619 } 3620 bf->bf_lastds = bf->bf_desc; /* Just an initial value */ 3621 TAILQ_INSERT_TAIL(head, bf, bf_list); 3622 } 3623 return 0; 3624 fail3: 3625 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3626 fail2: 3627 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3628 fail1: 3629 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 3630 fail0: 3631 bus_dma_tag_destroy(dd->dd_dmat); 3632 memset(dd, 0, sizeof(*dd)); 3633 return error; 3634 #undef DS2PHYS 3635 #undef ATH_DESC_4KB_BOUND_CHECK 3636 } 3637 3638 static void 3639 ath_descdma_cleanup(struct ath_softc *sc, 3640 struct ath_descdma *dd, ath_bufhead *head) 3641 { 3642 struct ath_buf *bf; 3643 struct ieee80211_node *ni; 3644 3645 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3646 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3647 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 3648 bus_dma_tag_destroy(dd->dd_dmat); 3649 3650 TAILQ_FOREACH(bf, head, bf_list) { 3651 if (bf->bf_m) { 3652 m_freem(bf->bf_m); 3653 bf->bf_m = NULL; 3654 } 3655 if (bf->bf_dmamap != NULL) { 3656 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 3657 bf->bf_dmamap = NULL; 3658 } 3659 ni = bf->bf_node; 3660 bf->bf_node = NULL; 3661 if (ni != NULL) { 3662 /* 3663 * Reclaim node reference. 3664 */ 3665 ieee80211_free_node(ni); 3666 } 3667 } 3668 3669 TAILQ_INIT(head); 3670 free(dd->dd_bufptr, M_ATHDEV); 3671 memset(dd, 0, sizeof(*dd)); 3672 } 3673 3674 static int 3675 ath_desc_alloc(struct ath_softc *sc) 3676 { 3677 int error; 3678 3679 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 3680 "rx", ath_rxbuf, 1); 3681 if (error != 0) 3682 return error; 3683 3684 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 3685 "tx", ath_txbuf, ATH_TXDESC); 3686 if (error != 0) { 3687 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 3688 return error; 3689 } 3690 3691 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 3692 "beacon", ATH_BCBUF, 1); 3693 if (error != 0) { 3694 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3695 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 3696 return error; 3697 } 3698 return 0; 3699 } 3700 3701 static void 3702 ath_desc_free(struct ath_softc *sc) 3703 { 3704 3705 if (sc->sc_bdma.dd_desc_len != 0) 3706 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 3707 if (sc->sc_txdma.dd_desc_len != 0) 3708 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3709 if (sc->sc_rxdma.dd_desc_len != 0) 3710 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 3711 } 3712 3713 static struct ieee80211_node * 3714 ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 3715 { 3716 struct ieee80211com *ic = vap->iv_ic; 3717 struct ath_softc *sc = ic->ic_ifp->if_softc; 3718 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 3719 struct ath_node *an; 3720 3721 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 3722 if (an == NULL) { 3723 /* XXX stat+msg */ 3724 return NULL; 3725 } 3726 ath_rate_node_init(sc, an); 3727 3728 /* Setup the mutex - there's no associd yet so set the name to NULL */ 3729 snprintf(an->an_name, sizeof(an->an_name), "%s: node %p", 3730 device_get_nameunit(sc->sc_dev), an); 3731 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); 3732 3733 /* XXX setup ath_tid */ 3734 ath_tx_tid_init(sc, an); 3735 3736 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); 3737 return &an->an_node; 3738 } 3739 3740 static void 3741 ath_node_cleanup(struct ieee80211_node *ni) 3742 { 3743 struct ieee80211com *ic = ni->ni_ic; 3744 struct ath_softc *sc = ic->ic_ifp->if_softc; 3745 3746 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */ 3747 ath_tx_node_flush(sc, ATH_NODE(ni)); 3748 ath_rate_node_cleanup(sc, ATH_NODE(ni)); 3749 sc->sc_node_cleanup(ni); 3750 } 3751 3752 static void 3753 ath_node_free(struct ieee80211_node *ni) 3754 { 3755 struct ieee80211com *ic = ni->ni_ic; 3756 struct ath_softc *sc = ic->ic_ifp->if_softc; 3757 3758 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni); 3759 mtx_destroy(&ATH_NODE(ni)->an_mtx); 3760 sc->sc_node_free(ni); 3761 } 3762 3763 static void 3764 ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) 3765 { 3766 struct ieee80211com *ic = ni->ni_ic; 3767 struct ath_softc *sc = ic->ic_ifp->if_softc; 3768 struct ath_hal *ah = sc->sc_ah; 3769 3770 *rssi = ic->ic_node_getrssi(ni); 3771 if (ni->ni_chan != IEEE80211_CHAN_ANYC) 3772 *noise = ath_hal_getchannoise(ah, ni->ni_chan); 3773 else 3774 *noise = -95; /* nominally correct */ 3775 } 3776 3777 static int 3778 ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 3779 { 3780 struct ath_hal *ah = sc->sc_ah; 3781 int error; 3782 struct mbuf *m; 3783 struct ath_desc *ds; 3784 3785 m = bf->bf_m; 3786 if (m == NULL) { 3787 /* 3788 * NB: by assigning a page to the rx dma buffer we 3789 * implicitly satisfy the Atheros requirement that 3790 * this buffer be cache-line-aligned and sized to be 3791 * multiple of the cache line size. Not doing this 3792 * causes weird stuff to happen (for the 5210 at least). 3793 */ 3794 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 3795 if (m == NULL) { 3796 DPRINTF(sc, ATH_DEBUG_ANY, 3797 "%s: no mbuf/cluster\n", __func__); 3798 sc->sc_stats.ast_rx_nombuf++; 3799 return ENOMEM; 3800 } 3801 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 3802 3803 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 3804 bf->bf_dmamap, m, 3805 bf->bf_segs, &bf->bf_nseg, 3806 BUS_DMA_NOWAIT); 3807 if (error != 0) { 3808 DPRINTF(sc, ATH_DEBUG_ANY, 3809 "%s: bus_dmamap_load_mbuf_sg failed; error %d\n", 3810 __func__, error); 3811 sc->sc_stats.ast_rx_busdma++; 3812 m_freem(m); 3813 return error; 3814 } 3815 KASSERT(bf->bf_nseg == 1, 3816 ("multi-segment packet; nseg %u", bf->bf_nseg)); 3817 bf->bf_m = m; 3818 } 3819 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD); 3820 3821 /* 3822 * Setup descriptors. For receive we always terminate 3823 * the descriptor list with a self-linked entry so we'll 3824 * not get overrun under high load (as can happen with a 3825 * 5212 when ANI processing enables PHY error frames). 3826 * 3827 * To insure the last descriptor is self-linked we create 3828 * each descriptor as self-linked and add it to the end. As 3829 * each additional descriptor is added the previous self-linked 3830 * entry is ``fixed'' naturally. This should be safe even 3831 * if DMA is happening. When processing RX interrupts we 3832 * never remove/process the last, self-linked, entry on the 3833 * descriptor list. This insures the hardware always has 3834 * someplace to write a new frame. 3835 */ 3836 /* 3837 * 11N: we can no longer afford to self link the last descriptor. 3838 * MAC acknowledges BA status as long as it copies frames to host 3839 * buffer (or rx fifo). This can incorrectly acknowledge packets 3840 * to a sender if last desc is self-linked. 3841 */ 3842 ds = bf->bf_desc; 3843 if (sc->sc_rxslink) 3844 ds->ds_link = bf->bf_daddr; /* link to self */ 3845 else 3846 ds->ds_link = 0; /* terminate the list */ 3847 ds->ds_data = bf->bf_segs[0].ds_addr; 3848 ath_hal_setuprxdesc(ah, ds 3849 , m->m_len /* buffer size */ 3850 , 0 3851 ); 3852 3853 if (sc->sc_rxlink != NULL) 3854 *sc->sc_rxlink = bf->bf_daddr; 3855 sc->sc_rxlink = &ds->ds_link; 3856 return 0; 3857 } 3858 3859 /* 3860 * Extend 15-bit time stamp from rx descriptor to 3861 * a full 64-bit TSF using the specified TSF. 3862 */ 3863 static __inline u_int64_t 3864 ath_extend_tsf15(u_int32_t rstamp, u_int64_t tsf) 3865 { 3866 if ((tsf & 0x7fff) < rstamp) 3867 tsf -= 0x8000; 3868 3869 return ((tsf &~ 0x7fff) | rstamp); 3870 } 3871 3872 /* 3873 * Extend 32-bit time stamp from rx descriptor to 3874 * a full 64-bit TSF using the specified TSF. 3875 */ 3876 static __inline u_int64_t 3877 ath_extend_tsf32(u_int32_t rstamp, u_int64_t tsf) 3878 { 3879 u_int32_t tsf_low = tsf & 0xffffffff; 3880 u_int64_t tsf64 = (tsf & ~0xffffffffULL) | rstamp; 3881 3882 if (rstamp > tsf_low && (rstamp - tsf_low > 0x10000000)) 3883 tsf64 -= 0x100000000ULL; 3884 3885 if (rstamp < tsf_low && (tsf_low - rstamp > 0x10000000)) 3886 tsf64 += 0x100000000ULL; 3887 3888 return tsf64; 3889 } 3890 3891 /* 3892 * Extend the TSF from the RX descriptor to a full 64 bit TSF. 3893 * Earlier hardware versions only wrote the low 15 bits of the 3894 * TSF into the RX descriptor; later versions (AR5416 and up) 3895 * include the 32 bit TSF value. 3896 */ 3897 static __inline u_int64_t 3898 ath_extend_tsf(struct ath_softc *sc, u_int32_t rstamp, u_int64_t tsf) 3899 { 3900 if (sc->sc_rxtsf32) 3901 return ath_extend_tsf32(rstamp, tsf); 3902 else 3903 return ath_extend_tsf15(rstamp, tsf); 3904 } 3905 3906 /* 3907 * Intercept management frames to collect beacon rssi data 3908 * and to do ibss merges. 3909 */ 3910 static void 3911 ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, 3912 int subtype, int rssi, int nf) 3913 { 3914 struct ieee80211vap *vap = ni->ni_vap; 3915 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 3916 3917 /* 3918 * Call up first so subsequent work can use information 3919 * potentially stored in the node (e.g. for ibss merge). 3920 */ 3921 ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, nf); 3922 switch (subtype) { 3923 case IEEE80211_FC0_SUBTYPE_BEACON: 3924 /* update rssi statistics for use by the hal */ 3925 /* XXX unlocked check against vap->iv_bss? */ 3926 ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi); 3927 if (sc->sc_syncbeacon && 3928 ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) { 3929 /* 3930 * Resync beacon timers using the tsf of the beacon 3931 * frame we just received. 3932 */ 3933 ath_beacon_config(sc, vap); 3934 } 3935 /* fall thru... */ 3936 case IEEE80211_FC0_SUBTYPE_PROBE_RESP: 3937 if (vap->iv_opmode == IEEE80211_M_IBSS && 3938 vap->iv_state == IEEE80211_S_RUN) { 3939 uint32_t rstamp = sc->sc_lastrs->rs_tstamp; 3940 uint64_t tsf = ath_extend_tsf(sc, rstamp, 3941 ath_hal_gettsf64(sc->sc_ah)); 3942 /* 3943 * Handle ibss merge as needed; check the tsf on the 3944 * frame before attempting the merge. The 802.11 spec 3945 * says the station should change it's bssid to match 3946 * the oldest station with the same ssid, where oldest 3947 * is determined by the tsf. Note that hardware 3948 * reconfiguration happens through callback to 3949 * ath_newstate as the state machine will go from 3950 * RUN -> RUN when this happens. 3951 */ 3952 if (le64toh(ni->ni_tstamp.tsf) >= tsf) { 3953 DPRINTF(sc, ATH_DEBUG_STATE, 3954 "ibss merge, rstamp %u tsf %ju " 3955 "tstamp %ju\n", rstamp, (uintmax_t)tsf, 3956 (uintmax_t)ni->ni_tstamp.tsf); 3957 (void) ieee80211_ibss_merge(ni); 3958 } 3959 } 3960 break; 3961 } 3962 } 3963 3964 /* 3965 * Set the default antenna. 3966 */ 3967 static void 3968 ath_setdefantenna(struct ath_softc *sc, u_int antenna) 3969 { 3970 struct ath_hal *ah = sc->sc_ah; 3971 3972 /* XXX block beacon interrupts */ 3973 ath_hal_setdefantenna(ah, antenna); 3974 if (sc->sc_defant != antenna) 3975 sc->sc_stats.ast_ant_defswitch++; 3976 sc->sc_defant = antenna; 3977 sc->sc_rxotherant = 0; 3978 } 3979 3980 static void 3981 ath_rx_tap(struct ifnet *ifp, struct mbuf *m, 3982 const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf) 3983 { 3984 #define CHAN_HT20 htole32(IEEE80211_CHAN_HT20) 3985 #define CHAN_HT40U htole32(IEEE80211_CHAN_HT40U) 3986 #define CHAN_HT40D htole32(IEEE80211_CHAN_HT40D) 3987 #define CHAN_HT (CHAN_HT20|CHAN_HT40U|CHAN_HT40D) 3988 struct ath_softc *sc = ifp->if_softc; 3989 const HAL_RATE_TABLE *rt; 3990 uint8_t rix; 3991 3992 rt = sc->sc_currates; 3993 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 3994 rix = rt->rateCodeToIndex[rs->rs_rate]; 3995 sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate; 3996 sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags; 3997 #ifdef AH_SUPPORT_AR5416 3998 sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT; 3999 if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) { /* HT rate */ 4000 struct ieee80211com *ic = ifp->if_l2com; 4001 4002 if ((rs->rs_flags & HAL_RX_2040) == 0) 4003 sc->sc_rx_th.wr_chan_flags |= CHAN_HT20; 4004 else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan)) 4005 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U; 4006 else 4007 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D; 4008 if ((rs->rs_flags & HAL_RX_GI) == 0) 4009 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI; 4010 } 4011 #endif 4012 sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(sc, rs->rs_tstamp, tsf)); 4013 if (rs->rs_status & HAL_RXERR_CRC) 4014 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; 4015 /* XXX propagate other error flags from descriptor */ 4016 sc->sc_rx_th.wr_antnoise = nf; 4017 sc->sc_rx_th.wr_antsignal = nf + rs->rs_rssi; 4018 sc->sc_rx_th.wr_antenna = rs->rs_antenna; 4019 #undef CHAN_HT 4020 #undef CHAN_HT20 4021 #undef CHAN_HT40U 4022 #undef CHAN_HT40D 4023 } 4024 4025 static void 4026 ath_handle_micerror(struct ieee80211com *ic, 4027 struct ieee80211_frame *wh, int keyix) 4028 { 4029 struct ieee80211_node *ni; 4030 4031 /* XXX recheck MIC to deal w/ chips that lie */ 4032 /* XXX discard MIC errors on !data frames */ 4033 ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh); 4034 if (ni != NULL) { 4035 ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix); 4036 ieee80211_free_node(ni); 4037 } 4038 } 4039 4040 /* 4041 * Only run the RX proc if it's not already running. 4042 * Since this may get run as part of the reset/flush path, 4043 * the task can't clash with an existing, running tasklet. 4044 */ 4045 static void 4046 ath_rx_tasklet(void *arg, int npending) 4047 { 4048 struct ath_softc *sc = arg; 4049 4050 CTR1(ATH_KTR_INTR, "ath_rx_proc: pending=%d", npending); 4051 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending); 4052 ATH_PCU_LOCK(sc); 4053 if (sc->sc_inreset_cnt > 0) { 4054 device_printf(sc->sc_dev, 4055 "%s: sc_inreset_cnt > 0; skipping\n", __func__); 4056 ATH_PCU_UNLOCK(sc); 4057 return; 4058 } 4059 ATH_PCU_UNLOCK(sc); 4060 ath_rx_proc(sc, 1); 4061 } 4062 4063 static void 4064 ath_rx_proc(struct ath_softc *sc, int resched) 4065 { 4066 #define PA2DESC(_sc, _pa) \ 4067 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 4068 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 4069 struct ath_buf *bf; 4070 struct ifnet *ifp = sc->sc_ifp; 4071 struct ieee80211com *ic = ifp->if_l2com; 4072 struct ath_hal *ah = sc->sc_ah; 4073 struct ath_desc *ds; 4074 struct ath_rx_status *rs; 4075 struct mbuf *m; 4076 struct ieee80211_node *ni; 4077 int len, type, ngood; 4078 HAL_STATUS status; 4079 int16_t nf; 4080 u_int64_t tsf, rstamp; 4081 int npkts = 0; 4082 4083 /* XXX we must not hold the ATH_LOCK here */ 4084 ATH_UNLOCK_ASSERT(sc); 4085 ATH_PCU_UNLOCK_ASSERT(sc); 4086 4087 ATH_PCU_LOCK(sc); 4088 sc->sc_rxproc_cnt++; 4089 ATH_PCU_UNLOCK(sc); 4090 4091 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: called\n", __func__); 4092 ngood = 0; 4093 nf = ath_hal_getchannoise(ah, sc->sc_curchan); 4094 sc->sc_stats.ast_rx_noise = nf; 4095 tsf = ath_hal_gettsf64(ah); 4096 do { 4097 bf = TAILQ_FIRST(&sc->sc_rxbuf); 4098 if (sc->sc_rxslink && bf == NULL) { /* NB: shouldn't happen */ 4099 if_printf(ifp, "%s: no buffer!\n", __func__); 4100 break; 4101 } else if (bf == NULL) { 4102 /* 4103 * End of List: 4104 * this can happen for non-self-linked RX chains 4105 */ 4106 sc->sc_stats.ast_rx_hitqueueend++; 4107 break; 4108 } 4109 m = bf->bf_m; 4110 if (m == NULL) { /* NB: shouldn't happen */ 4111 /* 4112 * If mbuf allocation failed previously there 4113 * will be no mbuf; try again to re-populate it. 4114 */ 4115 /* XXX make debug msg */ 4116 if_printf(ifp, "%s: no mbuf!\n", __func__); 4117 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 4118 goto rx_next; 4119 } 4120 ds = bf->bf_desc; 4121 if (ds->ds_link == bf->bf_daddr) { 4122 /* NB: never process the self-linked entry at the end */ 4123 sc->sc_stats.ast_rx_hitqueueend++; 4124 break; 4125 } 4126 /* XXX sync descriptor memory */ 4127 /* 4128 * Must provide the virtual address of the current 4129 * descriptor, the physical address, and the virtual 4130 * address of the next descriptor in the h/w chain. 4131 * This allows the HAL to look ahead to see if the 4132 * hardware is done with a descriptor by checking the 4133 * done bit in the following descriptor and the address 4134 * of the current descriptor the DMA engine is working 4135 * on. All this is necessary because of our use of 4136 * a self-linked list to avoid rx overruns. 4137 */ 4138 rs = &bf->bf_status.ds_rxstat; 4139 status = ath_hal_rxprocdesc(ah, ds, 4140 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 4141 #ifdef ATH_DEBUG 4142 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 4143 ath_printrxbuf(sc, bf, 0, status == HAL_OK); 4144 #endif 4145 if (status == HAL_EINPROGRESS) 4146 break; 4147 4148 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 4149 npkts++; 4150 4151 /* 4152 * Calculate the correct 64 bit TSF given 4153 * the TSF64 register value and rs_tstamp. 4154 */ 4155 rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf); 4156 4157 /* These aren't specifically errors */ 4158 #ifdef AH_SUPPORT_AR5416 4159 if (rs->rs_flags & HAL_RX_GI) 4160 sc->sc_stats.ast_rx_halfgi++; 4161 if (rs->rs_flags & HAL_RX_2040) 4162 sc->sc_stats.ast_rx_2040++; 4163 if (rs->rs_flags & HAL_RX_DELIM_CRC_PRE) 4164 sc->sc_stats.ast_rx_pre_crc_err++; 4165 if (rs->rs_flags & HAL_RX_DELIM_CRC_POST) 4166 sc->sc_stats.ast_rx_post_crc_err++; 4167 if (rs->rs_flags & HAL_RX_DECRYPT_BUSY) 4168 sc->sc_stats.ast_rx_decrypt_busy_err++; 4169 if (rs->rs_flags & HAL_RX_HI_RX_CHAIN) 4170 sc->sc_stats.ast_rx_hi_rx_chain++; 4171 #endif /* AH_SUPPORT_AR5416 */ 4172 4173 if (rs->rs_status != 0) { 4174 if (rs->rs_status & HAL_RXERR_CRC) 4175 sc->sc_stats.ast_rx_crcerr++; 4176 if (rs->rs_status & HAL_RXERR_FIFO) 4177 sc->sc_stats.ast_rx_fifoerr++; 4178 if (rs->rs_status & HAL_RXERR_PHY) { 4179 sc->sc_stats.ast_rx_phyerr++; 4180 /* Process DFS radar events */ 4181 if ((rs->rs_phyerr == HAL_PHYERR_RADAR) || 4182 (rs->rs_phyerr == HAL_PHYERR_FALSE_RADAR_EXT)) { 4183 /* Since we're touching the frame data, sync it */ 4184 bus_dmamap_sync(sc->sc_dmat, 4185 bf->bf_dmamap, 4186 BUS_DMASYNC_POSTREAD); 4187 /* Now pass it to the radar processing code */ 4188 ath_dfs_process_phy_err(sc, mtod(m, char *), rstamp, rs); 4189 } 4190 4191 /* Be suitably paranoid about receiving phy errors out of the stats array bounds */ 4192 if (rs->rs_phyerr < 64) 4193 sc->sc_stats.ast_rx_phy[rs->rs_phyerr]++; 4194 goto rx_error; /* NB: don't count in ierrors */ 4195 } 4196 if (rs->rs_status & HAL_RXERR_DECRYPT) { 4197 /* 4198 * Decrypt error. If the error occurred 4199 * because there was no hardware key, then 4200 * let the frame through so the upper layers 4201 * can process it. This is necessary for 5210 4202 * parts which have no way to setup a ``clear'' 4203 * key cache entry. 4204 * 4205 * XXX do key cache faulting 4206 */ 4207 if (rs->rs_keyix == HAL_RXKEYIX_INVALID) 4208 goto rx_accept; 4209 sc->sc_stats.ast_rx_badcrypt++; 4210 } 4211 if (rs->rs_status & HAL_RXERR_MIC) { 4212 sc->sc_stats.ast_rx_badmic++; 4213 /* 4214 * Do minimal work required to hand off 4215 * the 802.11 header for notification. 4216 */ 4217 /* XXX frag's and qos frames */ 4218 len = rs->rs_datalen; 4219 if (len >= sizeof (struct ieee80211_frame)) { 4220 bus_dmamap_sync(sc->sc_dmat, 4221 bf->bf_dmamap, 4222 BUS_DMASYNC_POSTREAD); 4223 ath_handle_micerror(ic, 4224 mtod(m, struct ieee80211_frame *), 4225 sc->sc_splitmic ? 4226 rs->rs_keyix-32 : rs->rs_keyix); 4227 } 4228 } 4229 ifp->if_ierrors++; 4230 rx_error: 4231 /* 4232 * Cleanup any pending partial frame. 4233 */ 4234 if (sc->sc_rxpending != NULL) { 4235 m_freem(sc->sc_rxpending); 4236 sc->sc_rxpending = NULL; 4237 } 4238 /* 4239 * When a tap is present pass error frames 4240 * that have been requested. By default we 4241 * pass decrypt+mic errors but others may be 4242 * interesting (e.g. crc). 4243 */ 4244 if (ieee80211_radiotap_active(ic) && 4245 (rs->rs_status & sc->sc_monpass)) { 4246 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 4247 BUS_DMASYNC_POSTREAD); 4248 /* NB: bpf needs the mbuf length setup */ 4249 len = rs->rs_datalen; 4250 m->m_pkthdr.len = m->m_len = len; 4251 bf->bf_m = NULL; 4252 ath_rx_tap(ifp, m, rs, rstamp, nf); 4253 ieee80211_radiotap_rx_all(ic, m); 4254 m_freem(m); 4255 } 4256 /* XXX pass MIC errors up for s/w reclaculation */ 4257 goto rx_next; 4258 } 4259 rx_accept: 4260 /* 4261 * Sync and unmap the frame. At this point we're 4262 * committed to passing the mbuf somewhere so clear 4263 * bf_m; this means a new mbuf must be allocated 4264 * when the rx descriptor is setup again to receive 4265 * another frame. 4266 */ 4267 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 4268 BUS_DMASYNC_POSTREAD); 4269 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 4270 bf->bf_m = NULL; 4271 4272 len = rs->rs_datalen; 4273 m->m_len = len; 4274 4275 if (rs->rs_more) { 4276 /* 4277 * Frame spans multiple descriptors; save 4278 * it for the next completed descriptor, it 4279 * will be used to construct a jumbogram. 4280 */ 4281 if (sc->sc_rxpending != NULL) { 4282 /* NB: max frame size is currently 2 clusters */ 4283 sc->sc_stats.ast_rx_toobig++; 4284 m_freem(sc->sc_rxpending); 4285 } 4286 m->m_pkthdr.rcvif = ifp; 4287 m->m_pkthdr.len = len; 4288 sc->sc_rxpending = m; 4289 goto rx_next; 4290 } else if (sc->sc_rxpending != NULL) { 4291 /* 4292 * This is the second part of a jumbogram, 4293 * chain it to the first mbuf, adjust the 4294 * frame length, and clear the rxpending state. 4295 */ 4296 sc->sc_rxpending->m_next = m; 4297 sc->sc_rxpending->m_pkthdr.len += len; 4298 m = sc->sc_rxpending; 4299 sc->sc_rxpending = NULL; 4300 } else { 4301 /* 4302 * Normal single-descriptor receive; setup 4303 * the rcvif and packet length. 4304 */ 4305 m->m_pkthdr.rcvif = ifp; 4306 m->m_pkthdr.len = len; 4307 } 4308 4309 /* 4310 * Validate rs->rs_antenna. 4311 * 4312 * Some users w/ AR9285 NICs have reported crashes 4313 * here because rs_antenna field is bogusly large. 4314 * Let's enforce the maximum antenna limit of 8 4315 * (and it shouldn't be hard coded, but that's a 4316 * separate problem) and if there's an issue, print 4317 * out an error and adjust rs_antenna to something 4318 * sensible. 4319 * 4320 * This code should be removed once the actual 4321 * root cause of the issue has been identified. 4322 * For example, it may be that the rs_antenna 4323 * field is only valid for the lsat frame of 4324 * an aggregate and it just happens that it is 4325 * "mostly" right. (This is a general statement - 4326 * the majority of the statistics are only valid 4327 * for the last frame in an aggregate. 4328 */ 4329 if (rs->rs_antenna > 7) { 4330 device_printf(sc->sc_dev, "%s: rs_antenna > 7 (%d)\n", 4331 __func__, rs->rs_antenna); 4332 #ifdef ATH_DEBUG 4333 ath_printrxbuf(sc, bf, 0, status == HAL_OK); 4334 #endif /* ATH_DEBUG */ 4335 rs->rs_antenna = 0; /* XXX better than nothing */ 4336 } 4337 4338 ifp->if_ipackets++; 4339 sc->sc_stats.ast_ant_rx[rs->rs_antenna]++; 4340 4341 /* 4342 * Populate the rx status block. When there are bpf 4343 * listeners we do the additional work to provide 4344 * complete status. Otherwise we fill in only the 4345 * material required by ieee80211_input. Note that 4346 * noise setting is filled in above. 4347 */ 4348 if (ieee80211_radiotap_active(ic)) 4349 ath_rx_tap(ifp, m, rs, rstamp, nf); 4350 4351 /* 4352 * From this point on we assume the frame is at least 4353 * as large as ieee80211_frame_min; verify that. 4354 */ 4355 if (len < IEEE80211_MIN_LEN) { 4356 if (!ieee80211_radiotap_active(ic)) { 4357 DPRINTF(sc, ATH_DEBUG_RECV, 4358 "%s: short packet %d\n", __func__, len); 4359 sc->sc_stats.ast_rx_tooshort++; 4360 } else { 4361 /* NB: in particular this captures ack's */ 4362 ieee80211_radiotap_rx_all(ic, m); 4363 } 4364 m_freem(m); 4365 goto rx_next; 4366 } 4367 4368 if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) { 4369 const HAL_RATE_TABLE *rt = sc->sc_currates; 4370 uint8_t rix = rt->rateCodeToIndex[rs->rs_rate]; 4371 4372 ieee80211_dump_pkt(ic, mtod(m, caddr_t), len, 4373 sc->sc_hwmap[rix].ieeerate, rs->rs_rssi); 4374 } 4375 4376 m_adj(m, -IEEE80211_CRC_LEN); 4377 4378 /* 4379 * Locate the node for sender, track state, and then 4380 * pass the (referenced) node up to the 802.11 layer 4381 * for its use. 4382 */ 4383 ni = ieee80211_find_rxnode_withkey(ic, 4384 mtod(m, const struct ieee80211_frame_min *), 4385 rs->rs_keyix == HAL_RXKEYIX_INVALID ? 4386 IEEE80211_KEYIX_NONE : rs->rs_keyix); 4387 sc->sc_lastrs = rs; 4388 4389 #ifdef AH_SUPPORT_AR5416 4390 if (rs->rs_isaggr) 4391 sc->sc_stats.ast_rx_agg++; 4392 #endif /* AH_SUPPORT_AR5416 */ 4393 4394 if (ni != NULL) { 4395 /* 4396 * Only punt packets for ampdu reorder processing for 4397 * 11n nodes; net80211 enforces that M_AMPDU is only 4398 * set for 11n nodes. 4399 */ 4400 if (ni->ni_flags & IEEE80211_NODE_HT) 4401 m->m_flags |= M_AMPDU; 4402 4403 /* 4404 * Sending station is known, dispatch directly. 4405 */ 4406 type = ieee80211_input(ni, m, rs->rs_rssi, nf); 4407 ieee80211_free_node(ni); 4408 /* 4409 * Arrange to update the last rx timestamp only for 4410 * frames from our ap when operating in station mode. 4411 * This assumes the rx key is always setup when 4412 * associated. 4413 */ 4414 if (ic->ic_opmode == IEEE80211_M_STA && 4415 rs->rs_keyix != HAL_RXKEYIX_INVALID) 4416 ngood++; 4417 } else { 4418 type = ieee80211_input_all(ic, m, rs->rs_rssi, nf); 4419 } 4420 /* 4421 * Track rx rssi and do any rx antenna management. 4422 */ 4423 ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi); 4424 if (sc->sc_diversity) { 4425 /* 4426 * When using fast diversity, change the default rx 4427 * antenna if diversity chooses the other antenna 3 4428 * times in a row. 4429 */ 4430 if (sc->sc_defant != rs->rs_antenna) { 4431 if (++sc->sc_rxotherant >= 3) 4432 ath_setdefantenna(sc, rs->rs_antenna); 4433 } else 4434 sc->sc_rxotherant = 0; 4435 } 4436 4437 /* Newer school diversity - kite specific for now */ 4438 /* XXX perhaps migrate the normal diversity code to this? */ 4439 if ((ah)->ah_rxAntCombDiversity) 4440 (*(ah)->ah_rxAntCombDiversity)(ah, rs, ticks, hz); 4441 4442 if (sc->sc_softled) { 4443 /* 4444 * Blink for any data frame. Otherwise do a 4445 * heartbeat-style blink when idle. The latter 4446 * is mainly for station mode where we depend on 4447 * periodic beacon frames to trigger the poll event. 4448 */ 4449 if (type == IEEE80211_FC0_TYPE_DATA) { 4450 const HAL_RATE_TABLE *rt = sc->sc_currates; 4451 ath_led_event(sc, 4452 rt->rateCodeToIndex[rs->rs_rate]); 4453 } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle) 4454 ath_led_event(sc, 0); 4455 } 4456 rx_next: 4457 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 4458 } while (ath_rxbuf_init(sc, bf) == 0); 4459 4460 /* rx signal state monitoring */ 4461 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); 4462 if (ngood) 4463 sc->sc_lastrx = tsf; 4464 4465 CTR2(ATH_KTR_INTR, "ath_rx_proc: npkts=%d, ngood=%d", npkts, ngood); 4466 /* Queue DFS tasklet if needed */ 4467 if (resched && ath_dfs_tasklet_needed(sc, sc->sc_curchan)) 4468 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); 4469 4470 /* 4471 * Now that all the RX frames were handled that 4472 * need to be handled, kick the PCU if there's 4473 * been an RXEOL condition. 4474 */ 4475 ATH_PCU_LOCK(sc); 4476 if (resched && sc->sc_kickpcu) { 4477 CTR0(ATH_KTR_ERR, "ath_rx_proc: kickpcu"); 4478 device_printf(sc->sc_dev, "%s: kickpcu; handled %d packets\n", 4479 __func__, npkts); 4480 4481 /* XXX rxslink? */ 4482 /* 4483 * XXX can we hold the PCU lock here? 4484 * Are there any net80211 buffer calls involved? 4485 */ 4486 bf = TAILQ_FIRST(&sc->sc_rxbuf); 4487 ath_hal_putrxbuf(ah, bf->bf_daddr); 4488 ath_hal_rxena(ah); /* enable recv descriptors */ 4489 ath_mode_init(sc); /* set filters, etc. */ 4490 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 4491 4492 ath_hal_intrset(ah, sc->sc_imask); 4493 sc->sc_kickpcu = 0; 4494 } 4495 ATH_PCU_UNLOCK(sc); 4496 4497 /* XXX check this inside of IF_LOCK? */ 4498 if (resched && (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 4499 #ifdef IEEE80211_SUPPORT_SUPERG 4500 ieee80211_ff_age_all(ic, 100); 4501 #endif 4502 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 4503 ath_start(ifp); 4504 } 4505 #undef PA2DESC 4506 4507 ATH_PCU_LOCK(sc); 4508 sc->sc_rxproc_cnt--; 4509 ATH_PCU_UNLOCK(sc); 4510 } 4511 4512 static void 4513 ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 4514 { 4515 txq->axq_qnum = qnum; 4516 txq->axq_ac = 0; 4517 txq->axq_depth = 0; 4518 txq->axq_aggr_depth = 0; 4519 txq->axq_intrcnt = 0; 4520 txq->axq_link = NULL; 4521 txq->axq_softc = sc; 4522 TAILQ_INIT(&txq->axq_q); 4523 TAILQ_INIT(&txq->axq_tidq); 4524 ATH_TXQ_LOCK_INIT(sc, txq); 4525 } 4526 4527 /* 4528 * Setup a h/w transmit queue. 4529 */ 4530 static struct ath_txq * 4531 ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 4532 { 4533 #define N(a) (sizeof(a)/sizeof(a[0])) 4534 struct ath_hal *ah = sc->sc_ah; 4535 HAL_TXQ_INFO qi; 4536 int qnum; 4537 4538 memset(&qi, 0, sizeof(qi)); 4539 qi.tqi_subtype = subtype; 4540 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 4541 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 4542 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 4543 /* 4544 * Enable interrupts only for EOL and DESC conditions. 4545 * We mark tx descriptors to receive a DESC interrupt 4546 * when a tx queue gets deep; otherwise waiting for the 4547 * EOL to reap descriptors. Note that this is done to 4548 * reduce interrupt load and this only defers reaping 4549 * descriptors, never transmitting frames. Aside from 4550 * reducing interrupts this also permits more concurrency. 4551 * The only potential downside is if the tx queue backs 4552 * up in which case the top half of the kernel may backup 4553 * due to a lack of tx descriptors. 4554 */ 4555 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE; 4556 qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 4557 if (qnum == -1) { 4558 /* 4559 * NB: don't print a message, this happens 4560 * normally on parts with too few tx queues 4561 */ 4562 return NULL; 4563 } 4564 if (qnum >= N(sc->sc_txq)) { 4565 device_printf(sc->sc_dev, 4566 "hal qnum %u out of range, max %zu!\n", 4567 qnum, N(sc->sc_txq)); 4568 ath_hal_releasetxqueue(ah, qnum); 4569 return NULL; 4570 } 4571 if (!ATH_TXQ_SETUP(sc, qnum)) { 4572 ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 4573 sc->sc_txqsetup |= 1<<qnum; 4574 } 4575 return &sc->sc_txq[qnum]; 4576 #undef N 4577 } 4578 4579 /* 4580 * Setup a hardware data transmit queue for the specified 4581 * access control. The hal may not support all requested 4582 * queues in which case it will return a reference to a 4583 * previously setup queue. We record the mapping from ac's 4584 * to h/w queues for use by ath_tx_start and also track 4585 * the set of h/w queues being used to optimize work in the 4586 * transmit interrupt handler and related routines. 4587 */ 4588 static int 4589 ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 4590 { 4591 #define N(a) (sizeof(a)/sizeof(a[0])) 4592 struct ath_txq *txq; 4593 4594 if (ac >= N(sc->sc_ac2q)) { 4595 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 4596 ac, N(sc->sc_ac2q)); 4597 return 0; 4598 } 4599 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 4600 if (txq != NULL) { 4601 txq->axq_ac = ac; 4602 sc->sc_ac2q[ac] = txq; 4603 return 1; 4604 } else 4605 return 0; 4606 #undef N 4607 } 4608 4609 /* 4610 * Update WME parameters for a transmit queue. 4611 */ 4612 static int 4613 ath_txq_update(struct ath_softc *sc, int ac) 4614 { 4615 #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 4616 #define ATH_TXOP_TO_US(v) (v<<5) 4617 struct ifnet *ifp = sc->sc_ifp; 4618 struct ieee80211com *ic = ifp->if_l2com; 4619 struct ath_txq *txq = sc->sc_ac2q[ac]; 4620 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 4621 struct ath_hal *ah = sc->sc_ah; 4622 HAL_TXQ_INFO qi; 4623 4624 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 4625 #ifdef IEEE80211_SUPPORT_TDMA 4626 if (sc->sc_tdma) { 4627 /* 4628 * AIFS is zero so there's no pre-transmit wait. The 4629 * burst time defines the slot duration and is configured 4630 * through net80211. The QCU is setup to not do post-xmit 4631 * back off, lockout all lower-priority QCU's, and fire 4632 * off the DMA beacon alert timer which is setup based 4633 * on the slot configuration. 4634 */ 4635 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 4636 | HAL_TXQ_TXERRINT_ENABLE 4637 | HAL_TXQ_TXURNINT_ENABLE 4638 | HAL_TXQ_TXEOLINT_ENABLE 4639 | HAL_TXQ_DBA_GATED 4640 | HAL_TXQ_BACKOFF_DISABLE 4641 | HAL_TXQ_ARB_LOCKOUT_GLOBAL 4642 ; 4643 qi.tqi_aifs = 0; 4644 /* XXX +dbaprep? */ 4645 qi.tqi_readyTime = sc->sc_tdmaslotlen; 4646 qi.tqi_burstTime = qi.tqi_readyTime; 4647 } else { 4648 #endif 4649 /* 4650 * XXX shouldn't this just use the default flags 4651 * used in the previous queue setup? 4652 */ 4653 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 4654 | HAL_TXQ_TXERRINT_ENABLE 4655 | HAL_TXQ_TXDESCINT_ENABLE 4656 | HAL_TXQ_TXURNINT_ENABLE 4657 | HAL_TXQ_TXEOLINT_ENABLE 4658 ; 4659 qi.tqi_aifs = wmep->wmep_aifsn; 4660 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 4661 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 4662 qi.tqi_readyTime = 0; 4663 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 4664 #ifdef IEEE80211_SUPPORT_TDMA 4665 } 4666 #endif 4667 4668 DPRINTF(sc, ATH_DEBUG_RESET, 4669 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", 4670 __func__, txq->axq_qnum, qi.tqi_qflags, 4671 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); 4672 4673 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 4674 if_printf(ifp, "unable to update hardware queue " 4675 "parameters for %s traffic!\n", 4676 ieee80211_wme_acnames[ac]); 4677 return 0; 4678 } else { 4679 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 4680 return 1; 4681 } 4682 #undef ATH_TXOP_TO_US 4683 #undef ATH_EXPONENT_TO_VALUE 4684 } 4685 4686 /* 4687 * Callback from the 802.11 layer to update WME parameters. 4688 */ 4689 static int 4690 ath_wme_update(struct ieee80211com *ic) 4691 { 4692 struct ath_softc *sc = ic->ic_ifp->if_softc; 4693 4694 return !ath_txq_update(sc, WME_AC_BE) || 4695 !ath_txq_update(sc, WME_AC_BK) || 4696 !ath_txq_update(sc, WME_AC_VI) || 4697 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 4698 } 4699 4700 /* 4701 * Reclaim resources for a setup queue. 4702 */ 4703 static void 4704 ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 4705 { 4706 4707 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 4708 ATH_TXQ_LOCK_DESTROY(txq); 4709 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 4710 } 4711 4712 /* 4713 * Reclaim all tx queue resources. 4714 */ 4715 static void 4716 ath_tx_cleanup(struct ath_softc *sc) 4717 { 4718 int i; 4719 4720 ATH_TXBUF_LOCK_DESTROY(sc); 4721 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4722 if (ATH_TXQ_SETUP(sc, i)) 4723 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 4724 } 4725 4726 /* 4727 * Return h/w rate index for an IEEE rate (w/o basic rate bit) 4728 * using the current rates in sc_rixmap. 4729 */ 4730 int 4731 ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) 4732 { 4733 int rix = sc->sc_rixmap[rate]; 4734 /* NB: return lowest rix for invalid rate */ 4735 return (rix == 0xff ? 0 : rix); 4736 } 4737 4738 static void 4739 ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts, 4740 struct ath_buf *bf) 4741 { 4742 struct ieee80211_node *ni = bf->bf_node; 4743 struct ifnet *ifp = sc->sc_ifp; 4744 struct ieee80211com *ic = ifp->if_l2com; 4745 int sr, lr, pri; 4746 4747 if (ts->ts_status == 0) { 4748 u_int8_t txant = ts->ts_antenna; 4749 sc->sc_stats.ast_ant_tx[txant]++; 4750 sc->sc_ant_tx[txant]++; 4751 if (ts->ts_finaltsi != 0) 4752 sc->sc_stats.ast_tx_altrate++; 4753 pri = M_WME_GETAC(bf->bf_m); 4754 if (pri >= WME_AC_VO) 4755 ic->ic_wme.wme_hipri_traffic++; 4756 if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) 4757 ni->ni_inact = ni->ni_inact_reload; 4758 } else { 4759 if (ts->ts_status & HAL_TXERR_XRETRY) 4760 sc->sc_stats.ast_tx_xretries++; 4761 if (ts->ts_status & HAL_TXERR_FIFO) 4762 sc->sc_stats.ast_tx_fifoerr++; 4763 if (ts->ts_status & HAL_TXERR_FILT) 4764 sc->sc_stats.ast_tx_filtered++; 4765 if (ts->ts_status & HAL_TXERR_XTXOP) 4766 sc->sc_stats.ast_tx_xtxop++; 4767 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) 4768 sc->sc_stats.ast_tx_timerexpired++; 4769 4770 if (ts->ts_status & HAL_TX_DATA_UNDERRUN) 4771 sc->sc_stats.ast_tx_data_underrun++; 4772 if (ts->ts_status & HAL_TX_DELIM_UNDERRUN) 4773 sc->sc_stats.ast_tx_delim_underrun++; 4774 4775 if (bf->bf_m->m_flags & M_FF) 4776 sc->sc_stats.ast_ff_txerr++; 4777 } 4778 /* XXX when is this valid? */ 4779 if (ts->ts_status & HAL_TX_DESC_CFG_ERR) 4780 sc->sc_stats.ast_tx_desccfgerr++; 4781 4782 sr = ts->ts_shortretry; 4783 lr = ts->ts_longretry; 4784 sc->sc_stats.ast_tx_shortretry += sr; 4785 sc->sc_stats.ast_tx_longretry += lr; 4786 4787 } 4788 4789 /* 4790 * The default completion. If fail is 1, this means 4791 * "please don't retry the frame, and just return -1 status 4792 * to the net80211 stack. 4793 */ 4794 void 4795 ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4796 { 4797 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 4798 int st; 4799 4800 if (fail == 1) 4801 st = -1; 4802 else 4803 st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ? 4804 ts->ts_status : HAL_TXERR_XRETRY; 4805 4806 if (bf->bf_state.bfs_dobaw) 4807 device_printf(sc->sc_dev, 4808 "%s: bf %p: seqno %d: dobaw should've been cleared!\n", 4809 __func__, 4810 bf, 4811 SEQNO(bf->bf_state.bfs_seqno)); 4812 if (bf->bf_next != NULL) 4813 device_printf(sc->sc_dev, 4814 "%s: bf %p: seqno %d: bf_next not NULL!\n", 4815 __func__, 4816 bf, 4817 SEQNO(bf->bf_state.bfs_seqno)); 4818 4819 /* 4820 * Do any tx complete callback. Note this must 4821 * be done before releasing the node reference. 4822 * This will free the mbuf, release the net80211 4823 * node and recycle the ath_buf. 4824 */ 4825 ath_tx_freebuf(sc, bf, st); 4826 } 4827 4828 /* 4829 * Update rate control with the given completion status. 4830 */ 4831 void 4832 ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 4833 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, 4834 int nframes, int nbad) 4835 { 4836 struct ath_node *an; 4837 4838 /* Only for unicast frames */ 4839 if (ni == NULL) 4840 return; 4841 4842 an = ATH_NODE(ni); 4843 4844 if ((ts->ts_status & HAL_TXERR_FILT) == 0) { 4845 ATH_NODE_LOCK(an); 4846 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad); 4847 ATH_NODE_UNLOCK(an); 4848 } 4849 } 4850 4851 /* 4852 * Update the busy status of the last frame on the free list. 4853 * When doing TDMA, the busy flag tracks whether the hardware 4854 * currently points to this buffer or not, and thus gated DMA 4855 * may restart by re-reading the last descriptor in this 4856 * buffer. 4857 * 4858 * This should be called in the completion function once one 4859 * of the buffers has been used. 4860 */ 4861 static void 4862 ath_tx_update_busy(struct ath_softc *sc) 4863 { 4864 struct ath_buf *last; 4865 4866 /* 4867 * Since the last frame may still be marked 4868 * as ATH_BUF_BUSY, unmark it here before 4869 * finishing the frame processing. 4870 * Since we've completed a frame (aggregate 4871 * or otherwise), the hardware has moved on 4872 * and is no longer referencing the previous 4873 * descriptor. 4874 */ 4875 ATH_TXBUF_LOCK_ASSERT(sc); 4876 last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 4877 if (last != NULL) 4878 last->bf_flags &= ~ATH_BUF_BUSY; 4879 } 4880 4881 4882 /* 4883 * Process completed xmit descriptors from the specified queue. 4884 * Kick the packet scheduler if needed. This can occur from this 4885 * particular task. 4886 */ 4887 static int 4888 ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) 4889 { 4890 struct ath_hal *ah = sc->sc_ah; 4891 struct ath_buf *bf; 4892 struct ath_desc *ds; 4893 struct ath_tx_status *ts; 4894 struct ieee80211_node *ni; 4895 struct ath_node *an; 4896 int nacked; 4897 HAL_STATUS status; 4898 4899 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 4900 __func__, txq->axq_qnum, 4901 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 4902 txq->axq_link); 4903 nacked = 0; 4904 for (;;) { 4905 ATH_TXQ_LOCK(txq); 4906 txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 4907 bf = TAILQ_FIRST(&txq->axq_q); 4908 if (bf == NULL) { 4909 ATH_TXQ_UNLOCK(txq); 4910 break; 4911 } 4912 ds = bf->bf_lastds; /* XXX must be setup correctly! */ 4913 ts = &bf->bf_status.ds_txstat; 4914 status = ath_hal_txprocdesc(ah, ds, ts); 4915 #ifdef ATH_DEBUG 4916 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 4917 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 4918 status == HAL_OK); 4919 else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0)) { 4920 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 4921 status == HAL_OK); 4922 } 4923 #endif 4924 if (status == HAL_EINPROGRESS) { 4925 ATH_TXQ_UNLOCK(txq); 4926 break; 4927 } 4928 ATH_TXQ_REMOVE(txq, bf, bf_list); 4929 #ifdef IEEE80211_SUPPORT_TDMA 4930 if (txq->axq_depth > 0) { 4931 /* 4932 * More frames follow. Mark the buffer busy 4933 * so it's not re-used while the hardware may 4934 * still re-read the link field in the descriptor. 4935 * 4936 * Use the last buffer in an aggregate as that 4937 * is where the hardware may be - intermediate 4938 * descriptors won't be "busy". 4939 */ 4940 bf->bf_last->bf_flags |= ATH_BUF_BUSY; 4941 } else 4942 #else 4943 if (txq->axq_depth == 0) 4944 #endif 4945 txq->axq_link = NULL; 4946 if (bf->bf_state.bfs_aggr) 4947 txq->axq_aggr_depth--; 4948 4949 ni = bf->bf_node; 4950 /* 4951 * If unicast frame was ack'd update RSSI, 4952 * including the last rx time used to 4953 * workaround phantom bmiss interrupts. 4954 */ 4955 if (ni != NULL && ts->ts_status == 0 && 4956 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { 4957 nacked++; 4958 sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 4959 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 4960 ts->ts_rssi); 4961 } 4962 ATH_TXQ_UNLOCK(txq); 4963 4964 /* If unicast frame, update general statistics */ 4965 if (ni != NULL) { 4966 an = ATH_NODE(ni); 4967 /* update statistics */ 4968 ath_tx_update_stats(sc, ts, bf); 4969 } 4970 4971 /* 4972 * Call the completion handler. 4973 * The completion handler is responsible for 4974 * calling the rate control code. 4975 * 4976 * Frames with no completion handler get the 4977 * rate control code called here. 4978 */ 4979 if (bf->bf_comp == NULL) { 4980 if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 4981 (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) { 4982 /* 4983 * XXX assume this isn't an aggregate 4984 * frame. 4985 */ 4986 ath_tx_update_ratectrl(sc, ni, 4987 bf->bf_state.bfs_rc, ts, 4988 bf->bf_state.bfs_pktlen, 1, 4989 (ts->ts_status == 0 ? 0 : 1)); 4990 } 4991 ath_tx_default_comp(sc, bf, 0); 4992 } else 4993 bf->bf_comp(sc, bf, 0); 4994 } 4995 #ifdef IEEE80211_SUPPORT_SUPERG 4996 /* 4997 * Flush fast-frame staging queue when traffic slows. 4998 */ 4999 if (txq->axq_depth <= 1) 5000 ieee80211_ff_flush(ic, txq->axq_ac); 5001 #endif 5002 5003 /* Kick the TXQ scheduler */ 5004 if (dosched) { 5005 ATH_TXQ_LOCK(txq); 5006 ath_txq_sched(sc, txq); 5007 ATH_TXQ_UNLOCK(txq); 5008 } 5009 5010 return nacked; 5011 } 5012 5013 #define TXQACTIVE(t, q) ( (t) & (1 << (q))) 5014 5015 /* 5016 * Deferred processing of transmit interrupt; special-cased 5017 * for a single hardware transmit queue (e.g. 5210 and 5211). 5018 */ 5019 static void 5020 ath_tx_proc_q0(void *arg, int npending) 5021 { 5022 struct ath_softc *sc = arg; 5023 struct ifnet *ifp = sc->sc_ifp; 5024 uint32_t txqs; 5025 5026 ATH_PCU_LOCK(sc); 5027 sc->sc_txproc_cnt++; 5028 txqs = sc->sc_txq_active; 5029 sc->sc_txq_active &= ~txqs; 5030 ATH_PCU_UNLOCK(sc); 5031 5032 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) 5033 /* XXX why is lastrx updated in tx code? */ 5034 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 5035 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 5036 ath_tx_processq(sc, sc->sc_cabq, 1); 5037 IF_LOCK(&ifp->if_snd); 5038 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5039 IF_UNLOCK(&ifp->if_snd); 5040 sc->sc_wd_timer = 0; 5041 5042 if (sc->sc_softled) 5043 ath_led_event(sc, sc->sc_txrix); 5044 5045 ATH_PCU_LOCK(sc); 5046 sc->sc_txproc_cnt--; 5047 ATH_PCU_UNLOCK(sc); 5048 5049 ath_start(ifp); 5050 } 5051 5052 /* 5053 * Deferred processing of transmit interrupt; special-cased 5054 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 5055 */ 5056 static void 5057 ath_tx_proc_q0123(void *arg, int npending) 5058 { 5059 struct ath_softc *sc = arg; 5060 struct ifnet *ifp = sc->sc_ifp; 5061 int nacked; 5062 uint32_t txqs; 5063 5064 ATH_PCU_LOCK(sc); 5065 sc->sc_txproc_cnt++; 5066 txqs = sc->sc_txq_active; 5067 sc->sc_txq_active &= ~txqs; 5068 ATH_PCU_UNLOCK(sc); 5069 5070 /* 5071 * Process each active queue. 5072 */ 5073 nacked = 0; 5074 if (TXQACTIVE(txqs, 0)) 5075 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); 5076 if (TXQACTIVE(txqs, 1)) 5077 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); 5078 if (TXQACTIVE(txqs, 2)) 5079 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); 5080 if (TXQACTIVE(txqs, 3)) 5081 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); 5082 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 5083 ath_tx_processq(sc, sc->sc_cabq, 1); 5084 if (nacked) 5085 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 5086 5087 IF_LOCK(&ifp->if_snd); 5088 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5089 IF_UNLOCK(&ifp->if_snd); 5090 sc->sc_wd_timer = 0; 5091 5092 if (sc->sc_softled) 5093 ath_led_event(sc, sc->sc_txrix); 5094 5095 ATH_PCU_LOCK(sc); 5096 sc->sc_txproc_cnt--; 5097 ATH_PCU_UNLOCK(sc); 5098 5099 ath_start(ifp); 5100 } 5101 5102 /* 5103 * Deferred processing of transmit interrupt. 5104 */ 5105 static void 5106 ath_tx_proc(void *arg, int npending) 5107 { 5108 struct ath_softc *sc = arg; 5109 struct ifnet *ifp = sc->sc_ifp; 5110 int i, nacked; 5111 uint32_t txqs; 5112 5113 ATH_PCU_LOCK(sc); 5114 sc->sc_txproc_cnt++; 5115 txqs = sc->sc_txq_active; 5116 sc->sc_txq_active &= ~txqs; 5117 ATH_PCU_UNLOCK(sc); 5118 5119 /* 5120 * Process each active queue. 5121 */ 5122 nacked = 0; 5123 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 5124 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) 5125 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); 5126 if (nacked) 5127 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 5128 5129 /* XXX check this inside of IF_LOCK? */ 5130 IF_LOCK(&ifp->if_snd); 5131 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5132 IF_UNLOCK(&ifp->if_snd); 5133 sc->sc_wd_timer = 0; 5134 5135 if (sc->sc_softled) 5136 ath_led_event(sc, sc->sc_txrix); 5137 5138 ATH_PCU_LOCK(sc); 5139 sc->sc_txproc_cnt--; 5140 ATH_PCU_UNLOCK(sc); 5141 5142 ath_start(ifp); 5143 } 5144 #undef TXQACTIVE 5145 5146 /* 5147 * Deferred processing of TXQ rescheduling. 5148 */ 5149 static void 5150 ath_txq_sched_tasklet(void *arg, int npending) 5151 { 5152 struct ath_softc *sc = arg; 5153 int i; 5154 5155 /* XXX is skipping ok? */ 5156 ATH_PCU_LOCK(sc); 5157 #if 0 5158 if (sc->sc_inreset_cnt > 0) { 5159 device_printf(sc->sc_dev, 5160 "%s: sc_inreset_cnt > 0; skipping\n", __func__); 5161 ATH_PCU_UNLOCK(sc); 5162 return; 5163 } 5164 #endif 5165 sc->sc_txproc_cnt++; 5166 ATH_PCU_UNLOCK(sc); 5167 5168 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 5169 if (ATH_TXQ_SETUP(sc, i)) { 5170 ATH_TXQ_LOCK(&sc->sc_txq[i]); 5171 ath_txq_sched(sc, &sc->sc_txq[i]); 5172 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 5173 } 5174 } 5175 5176 ATH_PCU_LOCK(sc); 5177 sc->sc_txproc_cnt--; 5178 ATH_PCU_UNLOCK(sc); 5179 } 5180 5181 /* 5182 * Return a buffer to the pool and update the 'busy' flag on the 5183 * previous 'tail' entry. 5184 * 5185 * This _must_ only be called when the buffer is involved in a completed 5186 * TX. The logic is that if it was part of an active TX, the previous 5187 * buffer on the list is now not involved in a halted TX DMA queue, waiting 5188 * for restart (eg for TDMA.) 5189 * 5190 * The caller must free the mbuf and recycle the node reference. 5191 */ 5192 void 5193 ath_freebuf(struct ath_softc *sc, struct ath_buf *bf) 5194 { 5195 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 5196 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); 5197 5198 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__)); 5199 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__)); 5200 5201 ATH_TXBUF_LOCK(sc); 5202 ath_tx_update_busy(sc); 5203 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 5204 ATH_TXBUF_UNLOCK(sc); 5205 } 5206 5207 /* 5208 * This is currently used by ath_tx_draintxq() and 5209 * ath_tx_tid_free_pkts(). 5210 * 5211 * It recycles a single ath_buf. 5212 */ 5213 void 5214 ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status) 5215 { 5216 struct ieee80211_node *ni = bf->bf_node; 5217 struct mbuf *m0 = bf->bf_m; 5218 5219 bf->bf_node = NULL; 5220 bf->bf_m = NULL; 5221 5222 /* Free the buffer, it's not needed any longer */ 5223 ath_freebuf(sc, bf); 5224 5225 if (ni != NULL) { 5226 /* 5227 * Do any callback and reclaim the node reference. 5228 */ 5229 if (m0->m_flags & M_TXCB) 5230 ieee80211_process_callback(ni, m0, status); 5231 ieee80211_free_node(ni); 5232 } 5233 m_freem(m0); 5234 5235 /* 5236 * XXX the buffer used to be freed -after-, but the DMA map was 5237 * freed where ath_freebuf() now is. I've no idea what this 5238 * will do. 5239 */ 5240 } 5241 5242 void 5243 ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 5244 { 5245 #ifdef ATH_DEBUG 5246 struct ath_hal *ah = sc->sc_ah; 5247 #endif 5248 struct ath_buf *bf; 5249 u_int ix; 5250 5251 /* 5252 * NB: this assumes output has been stopped and 5253 * we do not need to block ath_tx_proc 5254 */ 5255 ATH_TXBUF_LOCK(sc); 5256 bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 5257 if (bf != NULL) 5258 bf->bf_flags &= ~ATH_BUF_BUSY; 5259 ATH_TXBUF_UNLOCK(sc); 5260 5261 for (ix = 0;; ix++) { 5262 ATH_TXQ_LOCK(txq); 5263 bf = TAILQ_FIRST(&txq->axq_q); 5264 if (bf == NULL) { 5265 txq->axq_link = NULL; 5266 ATH_TXQ_UNLOCK(txq); 5267 break; 5268 } 5269 ATH_TXQ_REMOVE(txq, bf, bf_list); 5270 if (bf->bf_state.bfs_aggr) 5271 txq->axq_aggr_depth--; 5272 #ifdef ATH_DEBUG 5273 if (sc->sc_debug & ATH_DEBUG_RESET) { 5274 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 5275 5276 ath_printtxbuf(sc, bf, txq->axq_qnum, ix, 5277 ath_hal_txprocdesc(ah, bf->bf_lastds, 5278 &bf->bf_status.ds_txstat) == HAL_OK); 5279 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), 5280 bf->bf_m->m_len, 0, -1); 5281 } 5282 #endif /* ATH_DEBUG */ 5283 /* 5284 * Since we're now doing magic in the completion 5285 * functions, we -must- call it for aggregation 5286 * destinations or BAW tracking will get upset. 5287 */ 5288 /* 5289 * Clear ATH_BUF_BUSY; the completion handler 5290 * will free the buffer. 5291 */ 5292 ATH_TXQ_UNLOCK(txq); 5293 bf->bf_flags &= ~ATH_BUF_BUSY; 5294 if (bf->bf_comp) 5295 bf->bf_comp(sc, bf, 1); 5296 else 5297 ath_tx_default_comp(sc, bf, 1); 5298 } 5299 5300 /* 5301 * Drain software queued frames which are on 5302 * active TIDs. 5303 */ 5304 ath_tx_txq_drain(sc, txq); 5305 } 5306 5307 static void 5308 ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 5309 { 5310 struct ath_hal *ah = sc->sc_ah; 5311 5312 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 5313 __func__, txq->axq_qnum, 5314 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 5315 txq->axq_link); 5316 (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 5317 } 5318 5319 static int 5320 ath_stoptxdma(struct ath_softc *sc) 5321 { 5322 struct ath_hal *ah = sc->sc_ah; 5323 int i; 5324 5325 /* XXX return value */ 5326 if (sc->sc_invalid) 5327 return 0; 5328 5329 if (!sc->sc_invalid) { 5330 /* don't touch the hardware if marked invalid */ 5331 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 5332 __func__, sc->sc_bhalq, 5333 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 5334 NULL); 5335 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 5336 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 5337 if (ATH_TXQ_SETUP(sc, i)) 5338 ath_tx_stopdma(sc, &sc->sc_txq[i]); 5339 } 5340 5341 return 1; 5342 } 5343 5344 /* 5345 * Drain the transmit queues and reclaim resources. 5346 */ 5347 static void 5348 ath_draintxq(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 5349 { 5350 #ifdef ATH_DEBUG 5351 struct ath_hal *ah = sc->sc_ah; 5352 #endif 5353 struct ifnet *ifp = sc->sc_ifp; 5354 int i; 5355 5356 (void) ath_stoptxdma(sc); 5357 5358 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 5359 /* 5360 * XXX TODO: should we just handle the completed TX frames 5361 * here, whether or not the reset is a full one or not? 5362 */ 5363 if (ATH_TXQ_SETUP(sc, i)) { 5364 if (reset_type == ATH_RESET_NOLOSS) 5365 ath_tx_processq(sc, &sc->sc_txq[i], 0); 5366 else 5367 ath_tx_draintxq(sc, &sc->sc_txq[i]); 5368 } 5369 } 5370 #ifdef ATH_DEBUG 5371 if (sc->sc_debug & ATH_DEBUG_RESET) { 5372 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); 5373 if (bf != NULL && bf->bf_m != NULL) { 5374 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, 5375 ath_hal_txprocdesc(ah, bf->bf_lastds, 5376 &bf->bf_status.ds_txstat) == HAL_OK); 5377 ieee80211_dump_pkt(ifp->if_l2com, 5378 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 5379 0, -1); 5380 } 5381 } 5382 #endif /* ATH_DEBUG */ 5383 IF_LOCK(&ifp->if_snd); 5384 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5385 IF_UNLOCK(&ifp->if_snd); 5386 sc->sc_wd_timer = 0; 5387 } 5388 5389 /* 5390 * Disable the receive h/w in preparation for a reset. 5391 */ 5392 static void 5393 ath_stoprecv(struct ath_softc *sc, int dodelay) 5394 { 5395 #define PA2DESC(_sc, _pa) \ 5396 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 5397 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 5398 struct ath_hal *ah = sc->sc_ah; 5399 5400 ath_hal_stoppcurecv(ah); /* disable PCU */ 5401 ath_hal_setrxfilter(ah, 0); /* clear recv filter */ 5402 ath_hal_stopdmarecv(ah); /* disable DMA engine */ 5403 if (dodelay) 5404 DELAY(3000); /* 3ms is long enough for 1 frame */ 5405 #ifdef ATH_DEBUG 5406 if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) { 5407 struct ath_buf *bf; 5408 u_int ix; 5409 5410 device_printf(sc->sc_dev, 5411 "%s: rx queue %p, link %p\n", 5412 __func__, 5413 (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), 5414 sc->sc_rxlink); 5415 ix = 0; 5416 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 5417 struct ath_desc *ds = bf->bf_desc; 5418 struct ath_rx_status *rs = &bf->bf_status.ds_rxstat; 5419 HAL_STATUS status = ath_hal_rxprocdesc(ah, ds, 5420 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 5421 if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL)) 5422 ath_printrxbuf(sc, bf, ix, status == HAL_OK); 5423 ix++; 5424 } 5425 } 5426 #endif 5427 if (sc->sc_rxpending != NULL) { 5428 m_freem(sc->sc_rxpending); 5429 sc->sc_rxpending = NULL; 5430 } 5431 sc->sc_rxlink = NULL; /* just in case */ 5432 #undef PA2DESC 5433 } 5434 5435 /* 5436 * Enable the receive h/w following a reset. 5437 */ 5438 static int 5439 ath_startrecv(struct ath_softc *sc) 5440 { 5441 struct ath_hal *ah = sc->sc_ah; 5442 struct ath_buf *bf; 5443 5444 sc->sc_rxlink = NULL; 5445 sc->sc_rxpending = NULL; 5446 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 5447 int error = ath_rxbuf_init(sc, bf); 5448 if (error != 0) { 5449 DPRINTF(sc, ATH_DEBUG_RECV, 5450 "%s: ath_rxbuf_init failed %d\n", 5451 __func__, error); 5452 return error; 5453 } 5454 } 5455 5456 bf = TAILQ_FIRST(&sc->sc_rxbuf); 5457 ath_hal_putrxbuf(ah, bf->bf_daddr); 5458 ath_hal_rxena(ah); /* enable recv descriptors */ 5459 ath_mode_init(sc); /* set filters, etc. */ 5460 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 5461 return 0; 5462 } 5463 5464 /* 5465 * Update internal state after a channel change. 5466 */ 5467 static void 5468 ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 5469 { 5470 enum ieee80211_phymode mode; 5471 5472 /* 5473 * Change channels and update the h/w rate map 5474 * if we're switching; e.g. 11a to 11b/g. 5475 */ 5476 mode = ieee80211_chan2mode(chan); 5477 if (mode != sc->sc_curmode) 5478 ath_setcurmode(sc, mode); 5479 sc->sc_curchan = chan; 5480 } 5481 5482 /* 5483 * Set/change channels. If the channel is really being changed, 5484 * it's done by resetting the chip. To accomplish this we must 5485 * first cleanup any pending DMA, then restart stuff after a la 5486 * ath_init. 5487 */ 5488 static int 5489 ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 5490 { 5491 struct ifnet *ifp = sc->sc_ifp; 5492 struct ieee80211com *ic = ifp->if_l2com; 5493 struct ath_hal *ah = sc->sc_ah; 5494 int ret = 0; 5495 5496 /* Treat this as an interface reset */ 5497 ATH_PCU_UNLOCK_ASSERT(sc); 5498 ATH_UNLOCK_ASSERT(sc); 5499 5500 /* (Try to) stop TX/RX from occuring */ 5501 taskqueue_block(sc->sc_tq); 5502 5503 ATH_PCU_LOCK(sc); 5504 ath_hal_intrset(ah, 0); /* Stop new RX/TX completion */ 5505 ath_txrx_stop_locked(sc); /* Stop pending RX/TX completion */ 5506 if (ath_reset_grablock(sc, 1) == 0) { 5507 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 5508 __func__); 5509 } 5510 ATH_PCU_UNLOCK(sc); 5511 5512 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", 5513 __func__, ieee80211_chan2ieee(ic, chan), 5514 chan->ic_freq, chan->ic_flags); 5515 if (chan != sc->sc_curchan) { 5516 HAL_STATUS status; 5517 /* 5518 * To switch channels clear any pending DMA operations; 5519 * wait long enough for the RX fifo to drain, reset the 5520 * hardware at the new frequency, and then re-enable 5521 * the relevant bits of the h/w. 5522 */ 5523 #if 0 5524 ath_hal_intrset(ah, 0); /* disable interrupts */ 5525 #endif 5526 ath_stoprecv(sc, 1); /* turn off frame recv */ 5527 /* 5528 * First, handle completed TX/RX frames. 5529 */ 5530 ath_rx_proc(sc, 0); 5531 ath_draintxq(sc, ATH_RESET_NOLOSS); 5532 /* 5533 * Next, flush the non-scheduled frames. 5534 */ 5535 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */ 5536 5537 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) { 5538 if_printf(ifp, "%s: unable to reset " 5539 "channel %u (%u MHz, flags 0x%x), hal status %u\n", 5540 __func__, ieee80211_chan2ieee(ic, chan), 5541 chan->ic_freq, chan->ic_flags, status); 5542 ret = EIO; 5543 goto finish; 5544 } 5545 sc->sc_diversity = ath_hal_getdiversity(ah); 5546 5547 /* Let DFS at it in case it's a DFS channel */ 5548 ath_dfs_radar_enable(sc, chan); 5549 5550 /* 5551 * Re-enable rx framework. 5552 */ 5553 if (ath_startrecv(sc) != 0) { 5554 if_printf(ifp, "%s: unable to restart recv logic\n", 5555 __func__); 5556 ret = EIO; 5557 goto finish; 5558 } 5559 5560 /* 5561 * Change channels and update the h/w rate map 5562 * if we're switching; e.g. 11a to 11b/g. 5563 */ 5564 ath_chan_change(sc, chan); 5565 5566 /* 5567 * Reset clears the beacon timers; reset them 5568 * here if needed. 5569 */ 5570 if (sc->sc_beacons) { /* restart beacons */ 5571 #ifdef IEEE80211_SUPPORT_TDMA 5572 if (sc->sc_tdma) 5573 ath_tdma_config(sc, NULL); 5574 else 5575 #endif 5576 ath_beacon_config(sc, NULL); 5577 } 5578 5579 /* 5580 * Re-enable interrupts. 5581 */ 5582 #if 0 5583 ath_hal_intrset(ah, sc->sc_imask); 5584 #endif 5585 } 5586 5587 finish: 5588 ATH_PCU_LOCK(sc); 5589 sc->sc_inreset_cnt--; 5590 /* XXX only do this if sc_inreset_cnt == 0? */ 5591 ath_hal_intrset(ah, sc->sc_imask); 5592 ATH_PCU_UNLOCK(sc); 5593 5594 IF_LOCK(&ifp->if_snd); 5595 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5596 IF_UNLOCK(&ifp->if_snd); 5597 ath_txrx_start(sc); 5598 /* XXX ath_start? */ 5599 5600 return ret; 5601 } 5602 5603 /* 5604 * Periodically recalibrate the PHY to account 5605 * for temperature/environment changes. 5606 */ 5607 static void 5608 ath_calibrate(void *arg) 5609 { 5610 struct ath_softc *sc = arg; 5611 struct ath_hal *ah = sc->sc_ah; 5612 struct ifnet *ifp = sc->sc_ifp; 5613 struct ieee80211com *ic = ifp->if_l2com; 5614 HAL_BOOL longCal, isCalDone; 5615 HAL_BOOL aniCal, shortCal = AH_FALSE; 5616 int nextcal; 5617 5618 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ 5619 goto restart; 5620 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); 5621 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); 5622 if (sc->sc_doresetcal) 5623 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); 5624 5625 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); 5626 if (aniCal) { 5627 sc->sc_stats.ast_ani_cal++; 5628 sc->sc_lastani = ticks; 5629 ath_hal_ani_poll(ah, sc->sc_curchan); 5630 } 5631 5632 if (longCal) { 5633 sc->sc_stats.ast_per_cal++; 5634 sc->sc_lastlongcal = ticks; 5635 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 5636 /* 5637 * Rfgain is out of bounds, reset the chip 5638 * to load new gain values. 5639 */ 5640 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 5641 "%s: rfgain change\n", __func__); 5642 sc->sc_stats.ast_per_rfgain++; 5643 sc->sc_resetcal = 0; 5644 sc->sc_doresetcal = AH_TRUE; 5645 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 5646 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 5647 return; 5648 } 5649 /* 5650 * If this long cal is after an idle period, then 5651 * reset the data collection state so we start fresh. 5652 */ 5653 if (sc->sc_resetcal) { 5654 (void) ath_hal_calreset(ah, sc->sc_curchan); 5655 sc->sc_lastcalreset = ticks; 5656 sc->sc_lastshortcal = ticks; 5657 sc->sc_resetcal = 0; 5658 sc->sc_doresetcal = AH_TRUE; 5659 } 5660 } 5661 5662 /* Only call if we're doing a short/long cal, not for ANI calibration */ 5663 if (shortCal || longCal) { 5664 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { 5665 if (longCal) { 5666 /* 5667 * Calibrate noise floor data again in case of change. 5668 */ 5669 ath_hal_process_noisefloor(ah); 5670 } 5671 } else { 5672 DPRINTF(sc, ATH_DEBUG_ANY, 5673 "%s: calibration of channel %u failed\n", 5674 __func__, sc->sc_curchan->ic_freq); 5675 sc->sc_stats.ast_per_calfail++; 5676 } 5677 if (shortCal) 5678 sc->sc_lastshortcal = ticks; 5679 } 5680 if (!isCalDone) { 5681 restart: 5682 /* 5683 * Use a shorter interval to potentially collect multiple 5684 * data samples required to complete calibration. Once 5685 * we're told the work is done we drop back to a longer 5686 * interval between requests. We're more aggressive doing 5687 * work when operating as an AP to improve operation right 5688 * after startup. 5689 */ 5690 sc->sc_lastshortcal = ticks; 5691 nextcal = ath_shortcalinterval*hz/1000; 5692 if (sc->sc_opmode != HAL_M_HOSTAP) 5693 nextcal *= 10; 5694 sc->sc_doresetcal = AH_TRUE; 5695 } else { 5696 /* nextcal should be the shortest time for next event */ 5697 nextcal = ath_longcalinterval*hz; 5698 if (sc->sc_lastcalreset == 0) 5699 sc->sc_lastcalreset = sc->sc_lastlongcal; 5700 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) 5701 sc->sc_resetcal = 1; /* setup reset next trip */ 5702 sc->sc_doresetcal = AH_FALSE; 5703 } 5704 /* ANI calibration may occur more often than short/long/resetcal */ 5705 if (ath_anicalinterval > 0) 5706 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); 5707 5708 if (nextcal != 0) { 5709 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", 5710 __func__, nextcal, isCalDone ? "" : "!"); 5711 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); 5712 } else { 5713 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", 5714 __func__); 5715 /* NB: don't rearm timer */ 5716 } 5717 } 5718 5719 static void 5720 ath_scan_start(struct ieee80211com *ic) 5721 { 5722 struct ifnet *ifp = ic->ic_ifp; 5723 struct ath_softc *sc = ifp->if_softc; 5724 struct ath_hal *ah = sc->sc_ah; 5725 u_int32_t rfilt; 5726 5727 /* XXX calibration timer? */ 5728 5729 ATH_LOCK(sc); 5730 sc->sc_scanning = 1; 5731 sc->sc_syncbeacon = 0; 5732 rfilt = ath_calcrxfilter(sc); 5733 ATH_UNLOCK(sc); 5734 5735 ATH_PCU_LOCK(sc); 5736 ath_hal_setrxfilter(ah, rfilt); 5737 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0); 5738 ATH_PCU_UNLOCK(sc); 5739 5740 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", 5741 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr)); 5742 } 5743 5744 static void 5745 ath_scan_end(struct ieee80211com *ic) 5746 { 5747 struct ifnet *ifp = ic->ic_ifp; 5748 struct ath_softc *sc = ifp->if_softc; 5749 struct ath_hal *ah = sc->sc_ah; 5750 u_int32_t rfilt; 5751 5752 ATH_LOCK(sc); 5753 sc->sc_scanning = 0; 5754 rfilt = ath_calcrxfilter(sc); 5755 ATH_UNLOCK(sc); 5756 5757 ATH_PCU_LOCK(sc); 5758 ath_hal_setrxfilter(ah, rfilt); 5759 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5760 5761 ath_hal_process_noisefloor(ah); 5762 ATH_PCU_UNLOCK(sc); 5763 5764 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5765 __func__, rfilt, ether_sprintf(sc->sc_curbssid), 5766 sc->sc_curaid); 5767 } 5768 5769 /* 5770 * For now, just do a channel change. 5771 * 5772 * Later, we'll go through the hard slog of suspending tx/rx, changing rate 5773 * control state and resetting the hardware without dropping frames out 5774 * of the queue. 5775 * 5776 * The unfortunate trouble here is making absolutely sure that the 5777 * channel width change has propagated enough so the hardware 5778 * absolutely isn't handed bogus frames for it's current operating 5779 * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and 5780 * does occur in parallel, we need to make certain we've blocked 5781 * any further ongoing TX (and RX, that can cause raw TX) 5782 * before we do this. 5783 */ 5784 static void 5785 ath_update_chw(struct ieee80211com *ic) 5786 { 5787 struct ifnet *ifp = ic->ic_ifp; 5788 struct ath_softc *sc = ifp->if_softc; 5789 5790 DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__); 5791 ath_set_channel(ic); 5792 } 5793 5794 static void 5795 ath_set_channel(struct ieee80211com *ic) 5796 { 5797 struct ifnet *ifp = ic->ic_ifp; 5798 struct ath_softc *sc = ifp->if_softc; 5799 5800 (void) ath_chan_set(sc, ic->ic_curchan); 5801 /* 5802 * If we are returning to our bss channel then mark state 5803 * so the next recv'd beacon's tsf will be used to sync the 5804 * beacon timers. Note that since we only hear beacons in 5805 * sta/ibss mode this has no effect in other operating modes. 5806 */ 5807 ATH_LOCK(sc); 5808 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) 5809 sc->sc_syncbeacon = 1; 5810 ATH_UNLOCK(sc); 5811 } 5812 5813 /* 5814 * Walk the vap list and check if there any vap's in RUN state. 5815 */ 5816 static int 5817 ath_isanyrunningvaps(struct ieee80211vap *this) 5818 { 5819 struct ieee80211com *ic = this->iv_ic; 5820 struct ieee80211vap *vap; 5821 5822 IEEE80211_LOCK_ASSERT(ic); 5823 5824 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 5825 if (vap != this && vap->iv_state >= IEEE80211_S_RUN) 5826 return 1; 5827 } 5828 return 0; 5829 } 5830 5831 static int 5832 ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 5833 { 5834 struct ieee80211com *ic = vap->iv_ic; 5835 struct ath_softc *sc = ic->ic_ifp->if_softc; 5836 struct ath_vap *avp = ATH_VAP(vap); 5837 struct ath_hal *ah = sc->sc_ah; 5838 struct ieee80211_node *ni = NULL; 5839 int i, error, stamode; 5840 u_int32_t rfilt; 5841 int csa_run_transition = 0; 5842 static const HAL_LED_STATE leds[] = { 5843 HAL_LED_INIT, /* IEEE80211_S_INIT */ 5844 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 5845 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 5846 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 5847 HAL_LED_RUN, /* IEEE80211_S_CAC */ 5848 HAL_LED_RUN, /* IEEE80211_S_RUN */ 5849 HAL_LED_RUN, /* IEEE80211_S_CSA */ 5850 HAL_LED_RUN, /* IEEE80211_S_SLEEP */ 5851 }; 5852 5853 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 5854 ieee80211_state_name[vap->iv_state], 5855 ieee80211_state_name[nstate]); 5856 5857 /* 5858 * net80211 _should_ have the comlock asserted at this point. 5859 * There are some comments around the calls to vap->iv_newstate 5860 * which indicate that it (newstate) may end up dropping the 5861 * lock. This and the subsequent lock assert check after newstate 5862 * are an attempt to catch these and figure out how/why. 5863 */ 5864 IEEE80211_LOCK_ASSERT(ic); 5865 5866 if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) 5867 csa_run_transition = 1; 5868 5869 callout_drain(&sc->sc_cal_ch); 5870 ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 5871 5872 if (nstate == IEEE80211_S_SCAN) { 5873 /* 5874 * Scanning: turn off beacon miss and don't beacon. 5875 * Mark beacon state so when we reach RUN state we'll 5876 * [re]setup beacons. Unblock the task q thread so 5877 * deferred interrupt processing is done. 5878 */ 5879 ath_hal_intrset(ah, 5880 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 5881 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5882 sc->sc_beacons = 0; 5883 taskqueue_unblock(sc->sc_tq); 5884 } 5885 5886 ni = ieee80211_ref_node(vap->iv_bss); 5887 rfilt = ath_calcrxfilter(sc); 5888 stamode = (vap->iv_opmode == IEEE80211_M_STA || 5889 vap->iv_opmode == IEEE80211_M_AHDEMO || 5890 vap->iv_opmode == IEEE80211_M_IBSS); 5891 if (stamode && nstate == IEEE80211_S_RUN) { 5892 sc->sc_curaid = ni->ni_associd; 5893 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); 5894 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5895 } 5896 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5897 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); 5898 ath_hal_setrxfilter(ah, rfilt); 5899 5900 /* XXX is this to restore keycache on resume? */ 5901 if (vap->iv_opmode != IEEE80211_M_STA && 5902 (vap->iv_flags & IEEE80211_F_PRIVACY)) { 5903 for (i = 0; i < IEEE80211_WEP_NKID; i++) 5904 if (ath_hal_keyisvalid(ah, i)) 5905 ath_hal_keysetmac(ah, i, ni->ni_bssid); 5906 } 5907 5908 /* 5909 * Invoke the parent method to do net80211 work. 5910 */ 5911 error = avp->av_newstate(vap, nstate, arg); 5912 if (error != 0) 5913 goto bad; 5914 5915 /* 5916 * See above: ensure av_newstate() doesn't drop the lock 5917 * on us. 5918 */ 5919 IEEE80211_LOCK_ASSERT(ic); 5920 5921 if (nstate == IEEE80211_S_RUN) { 5922 /* NB: collect bss node again, it may have changed */ 5923 ieee80211_free_node(ni); 5924 ni = ieee80211_ref_node(vap->iv_bss); 5925 5926 DPRINTF(sc, ATH_DEBUG_STATE, 5927 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " 5928 "capinfo 0x%04x chan %d\n", __func__, 5929 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), 5930 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); 5931 5932 switch (vap->iv_opmode) { 5933 #ifdef IEEE80211_SUPPORT_TDMA 5934 case IEEE80211_M_AHDEMO: 5935 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) 5936 break; 5937 /* fall thru... */ 5938 #endif 5939 case IEEE80211_M_HOSTAP: 5940 case IEEE80211_M_IBSS: 5941 case IEEE80211_M_MBSS: 5942 /* 5943 * Allocate and setup the beacon frame. 5944 * 5945 * Stop any previous beacon DMA. This may be 5946 * necessary, for example, when an ibss merge 5947 * causes reconfiguration; there will be a state 5948 * transition from RUN->RUN that means we may 5949 * be called with beacon transmission active. 5950 */ 5951 ath_hal_stoptxdma(ah, sc->sc_bhalq); 5952 5953 error = ath_beacon_alloc(sc, ni); 5954 if (error != 0) 5955 goto bad; 5956 /* 5957 * If joining an adhoc network defer beacon timer 5958 * configuration to the next beacon frame so we 5959 * have a current TSF to use. Otherwise we're 5960 * starting an ibss/bss so there's no need to delay; 5961 * if this is the first vap moving to RUN state, then 5962 * beacon state needs to be [re]configured. 5963 */ 5964 if (vap->iv_opmode == IEEE80211_M_IBSS && 5965 ni->ni_tstamp.tsf != 0) { 5966 sc->sc_syncbeacon = 1; 5967 } else if (!sc->sc_beacons) { 5968 #ifdef IEEE80211_SUPPORT_TDMA 5969 if (vap->iv_caps & IEEE80211_C_TDMA) 5970 ath_tdma_config(sc, vap); 5971 else 5972 #endif 5973 ath_beacon_config(sc, vap); 5974 sc->sc_beacons = 1; 5975 } 5976 break; 5977 case IEEE80211_M_STA: 5978 /* 5979 * Defer beacon timer configuration to the next 5980 * beacon frame so we have a current TSF to use 5981 * (any TSF collected when scanning is likely old). 5982 * However if it's due to a CSA -> RUN transition, 5983 * force a beacon update so we pick up a lack of 5984 * beacons from an AP in CAC and thus force a 5985 * scan. 5986 */ 5987 sc->sc_syncbeacon = 1; 5988 if (csa_run_transition) 5989 ath_beacon_config(sc, vap); 5990 break; 5991 case IEEE80211_M_MONITOR: 5992 /* 5993 * Monitor mode vaps have only INIT->RUN and RUN->RUN 5994 * transitions so we must re-enable interrupts here to 5995 * handle the case of a single monitor mode vap. 5996 */ 5997 ath_hal_intrset(ah, sc->sc_imask); 5998 break; 5999 case IEEE80211_M_WDS: 6000 break; 6001 default: 6002 break; 6003 } 6004 /* 6005 * Let the hal process statistics collected during a 6006 * scan so it can provide calibrated noise floor data. 6007 */ 6008 ath_hal_process_noisefloor(ah); 6009 /* 6010 * Reset rssi stats; maybe not the best place... 6011 */ 6012 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 6013 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 6014 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 6015 /* 6016 * Finally, start any timers and the task q thread 6017 * (in case we didn't go through SCAN state). 6018 */ 6019 if (ath_longcalinterval != 0) { 6020 /* start periodic recalibration timer */ 6021 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 6022 } else { 6023 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 6024 "%s: calibration disabled\n", __func__); 6025 } 6026 taskqueue_unblock(sc->sc_tq); 6027 } else if (nstate == IEEE80211_S_INIT) { 6028 /* 6029 * If there are no vaps left in RUN state then 6030 * shutdown host/driver operation: 6031 * o disable interrupts 6032 * o disable the task queue thread 6033 * o mark beacon processing as stopped 6034 */ 6035 if (!ath_isanyrunningvaps(vap)) { 6036 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 6037 /* disable interrupts */ 6038 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 6039 taskqueue_block(sc->sc_tq); 6040 sc->sc_beacons = 0; 6041 } 6042 #ifdef IEEE80211_SUPPORT_TDMA 6043 ath_hal_setcca(ah, AH_TRUE); 6044 #endif 6045 } 6046 bad: 6047 ieee80211_free_node(ni); 6048 return error; 6049 } 6050 6051 /* 6052 * Allocate a key cache slot to the station so we can 6053 * setup a mapping from key index to node. The key cache 6054 * slot is needed for managing antenna state and for 6055 * compression when stations do not use crypto. We do 6056 * it uniliaterally here; if crypto is employed this slot 6057 * will be reassigned. 6058 */ 6059 static void 6060 ath_setup_stationkey(struct ieee80211_node *ni) 6061 { 6062 struct ieee80211vap *vap = ni->ni_vap; 6063 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 6064 ieee80211_keyix keyix, rxkeyix; 6065 6066 /* XXX should take a locked ref to vap->iv_bss */ 6067 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 6068 /* 6069 * Key cache is full; we'll fall back to doing 6070 * the more expensive lookup in software. Note 6071 * this also means no h/w compression. 6072 */ 6073 /* XXX msg+statistic */ 6074 } else { 6075 /* XXX locking? */ 6076 ni->ni_ucastkey.wk_keyix = keyix; 6077 ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 6078 /* NB: must mark device key to get called back on delete */ 6079 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; 6080 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); 6081 /* NB: this will create a pass-thru key entry */ 6082 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss); 6083 } 6084 } 6085 6086 /* 6087 * Setup driver-specific state for a newly associated node. 6088 * Note that we're called also on a re-associate, the isnew 6089 * param tells us if this is the first time or not. 6090 */ 6091 static void 6092 ath_newassoc(struct ieee80211_node *ni, int isnew) 6093 { 6094 struct ath_node *an = ATH_NODE(ni); 6095 struct ieee80211vap *vap = ni->ni_vap; 6096 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 6097 const struct ieee80211_txparam *tp = ni->ni_txparms; 6098 6099 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); 6100 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); 6101 6102 ath_rate_newassoc(sc, an, isnew); 6103 if (isnew && 6104 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && 6105 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 6106 ath_setup_stationkey(ni); 6107 } 6108 6109 static int 6110 ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, 6111 int nchans, struct ieee80211_channel chans[]) 6112 { 6113 struct ath_softc *sc = ic->ic_ifp->if_softc; 6114 struct ath_hal *ah = sc->sc_ah; 6115 HAL_STATUS status; 6116 6117 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 6118 "%s: rd %u cc %u location %c%s\n", 6119 __func__, reg->regdomain, reg->country, reg->location, 6120 reg->ecm ? " ecm" : ""); 6121 6122 status = ath_hal_set_channels(ah, chans, nchans, 6123 reg->country, reg->regdomain); 6124 if (status != HAL_OK) { 6125 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", 6126 __func__, status); 6127 return EINVAL; /* XXX */ 6128 } 6129 6130 return 0; 6131 } 6132 6133 static void 6134 ath_getradiocaps(struct ieee80211com *ic, 6135 int maxchans, int *nchans, struct ieee80211_channel chans[]) 6136 { 6137 struct ath_softc *sc = ic->ic_ifp->if_softc; 6138 struct ath_hal *ah = sc->sc_ah; 6139 6140 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", 6141 __func__, SKU_DEBUG, CTRY_DEFAULT); 6142 6143 /* XXX check return */ 6144 (void) ath_hal_getchannels(ah, chans, maxchans, nchans, 6145 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); 6146 6147 } 6148 6149 static int 6150 ath_getchannels(struct ath_softc *sc) 6151 { 6152 struct ifnet *ifp = sc->sc_ifp; 6153 struct ieee80211com *ic = ifp->if_l2com; 6154 struct ath_hal *ah = sc->sc_ah; 6155 HAL_STATUS status; 6156 6157 /* 6158 * Collect channel set based on EEPROM contents. 6159 */ 6160 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, 6161 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); 6162 if (status != HAL_OK) { 6163 if_printf(ifp, "%s: unable to collect channel list from hal, " 6164 "status %d\n", __func__, status); 6165 return EINVAL; 6166 } 6167 (void) ath_hal_getregdomain(ah, &sc->sc_eerd); 6168 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ 6169 /* XXX map Atheros sku's to net80211 SKU's */ 6170 /* XXX net80211 types too small */ 6171 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; 6172 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; 6173 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ 6174 ic->ic_regdomain.isocc[1] = ' '; 6175 6176 ic->ic_regdomain.ecm = 1; 6177 ic->ic_regdomain.location = 'I'; 6178 6179 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 6180 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", 6181 __func__, sc->sc_eerd, sc->sc_eecc, 6182 ic->ic_regdomain.regdomain, ic->ic_regdomain.country, 6183 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); 6184 return 0; 6185 } 6186 6187 static int 6188 ath_rate_setup(struct ath_softc *sc, u_int mode) 6189 { 6190 struct ath_hal *ah = sc->sc_ah; 6191 const HAL_RATE_TABLE *rt; 6192 6193 switch (mode) { 6194 case IEEE80211_MODE_11A: 6195 rt = ath_hal_getratetable(ah, HAL_MODE_11A); 6196 break; 6197 case IEEE80211_MODE_HALF: 6198 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 6199 break; 6200 case IEEE80211_MODE_QUARTER: 6201 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 6202 break; 6203 case IEEE80211_MODE_11B: 6204 rt = ath_hal_getratetable(ah, HAL_MODE_11B); 6205 break; 6206 case IEEE80211_MODE_11G: 6207 rt = ath_hal_getratetable(ah, HAL_MODE_11G); 6208 break; 6209 case IEEE80211_MODE_TURBO_A: 6210 rt = ath_hal_getratetable(ah, HAL_MODE_108A); 6211 break; 6212 case IEEE80211_MODE_TURBO_G: 6213 rt = ath_hal_getratetable(ah, HAL_MODE_108G); 6214 break; 6215 case IEEE80211_MODE_STURBO_A: 6216 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 6217 break; 6218 case IEEE80211_MODE_11NA: 6219 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); 6220 break; 6221 case IEEE80211_MODE_11NG: 6222 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); 6223 break; 6224 default: 6225 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 6226 __func__, mode); 6227 return 0; 6228 } 6229 sc->sc_rates[mode] = rt; 6230 return (rt != NULL); 6231 } 6232 6233 static void 6234 ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 6235 { 6236 #define N(a) (sizeof(a)/sizeof(a[0])) 6237 /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 6238 static const struct { 6239 u_int rate; /* tx/rx 802.11 rate */ 6240 u_int16_t timeOn; /* LED on time (ms) */ 6241 u_int16_t timeOff; /* LED off time (ms) */ 6242 } blinkrates[] = { 6243 { 108, 40, 10 }, 6244 { 96, 44, 11 }, 6245 { 72, 50, 13 }, 6246 { 48, 57, 14 }, 6247 { 36, 67, 16 }, 6248 { 24, 80, 20 }, 6249 { 22, 100, 25 }, 6250 { 18, 133, 34 }, 6251 { 12, 160, 40 }, 6252 { 10, 200, 50 }, 6253 { 6, 240, 58 }, 6254 { 4, 267, 66 }, 6255 { 2, 400, 100 }, 6256 { 0, 500, 130 }, 6257 /* XXX half/quarter rates */ 6258 }; 6259 const HAL_RATE_TABLE *rt; 6260 int i, j; 6261 6262 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 6263 rt = sc->sc_rates[mode]; 6264 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 6265 for (i = 0; i < rt->rateCount; i++) { 6266 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 6267 if (rt->info[i].phy != IEEE80211_T_HT) 6268 sc->sc_rixmap[ieeerate] = i; 6269 else 6270 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; 6271 } 6272 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 6273 for (i = 0; i < N(sc->sc_hwmap); i++) { 6274 if (i >= rt->rateCount) { 6275 sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 6276 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 6277 continue; 6278 } 6279 sc->sc_hwmap[i].ieeerate = 6280 rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 6281 if (rt->info[i].phy == IEEE80211_T_HT) 6282 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; 6283 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 6284 if (rt->info[i].shortPreamble || 6285 rt->info[i].phy == IEEE80211_T_OFDM) 6286 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 6287 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; 6288 for (j = 0; j < N(blinkrates)-1; j++) 6289 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 6290 break; 6291 /* NB: this uses the last entry if the rate isn't found */ 6292 /* XXX beware of overlow */ 6293 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 6294 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 6295 } 6296 sc->sc_currates = rt; 6297 sc->sc_curmode = mode; 6298 /* 6299 * All protection frames are transmited at 2Mb/s for 6300 * 11g, otherwise at 1Mb/s. 6301 */ 6302 if (mode == IEEE80211_MODE_11G) 6303 sc->sc_protrix = ath_tx_findrix(sc, 2*2); 6304 else 6305 sc->sc_protrix = ath_tx_findrix(sc, 2*1); 6306 /* NB: caller is responsible for resetting rate control state */ 6307 #undef N 6308 } 6309 6310 static void 6311 ath_watchdog(void *arg) 6312 { 6313 struct ath_softc *sc = arg; 6314 int do_reset = 0; 6315 6316 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { 6317 struct ifnet *ifp = sc->sc_ifp; 6318 uint32_t hangs; 6319 6320 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && 6321 hangs != 0) { 6322 if_printf(ifp, "%s hang detected (0x%x)\n", 6323 hangs & 0xff ? "bb" : "mac", hangs); 6324 } else 6325 if_printf(ifp, "device timeout\n"); 6326 do_reset = 1; 6327 ifp->if_oerrors++; 6328 sc->sc_stats.ast_watchdog++; 6329 } 6330 6331 /* 6332 * We can't hold the lock across the ath_reset() call. 6333 * 6334 * And since this routine can't hold a lock and sleep, 6335 * do the reset deferred. 6336 */ 6337 if (do_reset) { 6338 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 6339 } 6340 6341 callout_schedule(&sc->sc_wd_ch, hz); 6342 } 6343 6344 #ifdef ATH_DIAGAPI 6345 /* 6346 * Diagnostic interface to the HAL. This is used by various 6347 * tools to do things like retrieve register contents for 6348 * debugging. The mechanism is intentionally opaque so that 6349 * it can change frequently w/o concern for compatiblity. 6350 */ 6351 static int 6352 ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 6353 { 6354 struct ath_hal *ah = sc->sc_ah; 6355 u_int id = ad->ad_id & ATH_DIAG_ID; 6356 void *indata = NULL; 6357 void *outdata = NULL; 6358 u_int32_t insize = ad->ad_in_size; 6359 u_int32_t outsize = ad->ad_out_size; 6360 int error = 0; 6361 6362 if (ad->ad_id & ATH_DIAG_IN) { 6363 /* 6364 * Copy in data. 6365 */ 6366 indata = malloc(insize, M_TEMP, M_NOWAIT); 6367 if (indata == NULL) { 6368 error = ENOMEM; 6369 goto bad; 6370 } 6371 error = copyin(ad->ad_in_data, indata, insize); 6372 if (error) 6373 goto bad; 6374 } 6375 if (ad->ad_id & ATH_DIAG_DYN) { 6376 /* 6377 * Allocate a buffer for the results (otherwise the HAL 6378 * returns a pointer to a buffer where we can read the 6379 * results). Note that we depend on the HAL leaving this 6380 * pointer for us to use below in reclaiming the buffer; 6381 * may want to be more defensive. 6382 */ 6383 outdata = malloc(outsize, M_TEMP, M_NOWAIT); 6384 if (outdata == NULL) { 6385 error = ENOMEM; 6386 goto bad; 6387 } 6388 } 6389 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 6390 if (outsize < ad->ad_out_size) 6391 ad->ad_out_size = outsize; 6392 if (outdata != NULL) 6393 error = copyout(outdata, ad->ad_out_data, 6394 ad->ad_out_size); 6395 } else { 6396 error = EINVAL; 6397 } 6398 bad: 6399 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 6400 free(indata, M_TEMP); 6401 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 6402 free(outdata, M_TEMP); 6403 return error; 6404 } 6405 #endif /* ATH_DIAGAPI */ 6406 6407 static int 6408 ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 6409 { 6410 #define IS_RUNNING(ifp) \ 6411 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 6412 struct ath_softc *sc = ifp->if_softc; 6413 struct ieee80211com *ic = ifp->if_l2com; 6414 struct ifreq *ifr = (struct ifreq *)data; 6415 const HAL_RATE_TABLE *rt; 6416 int error = 0; 6417 6418 switch (cmd) { 6419 case SIOCSIFFLAGS: 6420 ATH_LOCK(sc); 6421 if (IS_RUNNING(ifp)) { 6422 /* 6423 * To avoid rescanning another access point, 6424 * do not call ath_init() here. Instead, 6425 * only reflect promisc mode settings. 6426 */ 6427 ath_mode_init(sc); 6428 } else if (ifp->if_flags & IFF_UP) { 6429 /* 6430 * Beware of being called during attach/detach 6431 * to reset promiscuous mode. In that case we 6432 * will still be marked UP but not RUNNING. 6433 * However trying to re-init the interface 6434 * is the wrong thing to do as we've already 6435 * torn down much of our state. There's 6436 * probably a better way to deal with this. 6437 */ 6438 if (!sc->sc_invalid) 6439 ath_init(sc); /* XXX lose error */ 6440 } else { 6441 ath_stop_locked(ifp); 6442 #ifdef notyet 6443 /* XXX must wakeup in places like ath_vap_delete */ 6444 if (!sc->sc_invalid) 6445 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP); 6446 #endif 6447 } 6448 ATH_UNLOCK(sc); 6449 break; 6450 case SIOCGIFMEDIA: 6451 case SIOCSIFMEDIA: 6452 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 6453 break; 6454 case SIOCGATHSTATS: 6455 /* NB: embed these numbers to get a consistent view */ 6456 sc->sc_stats.ast_tx_packets = ifp->if_opackets; 6457 sc->sc_stats.ast_rx_packets = ifp->if_ipackets; 6458 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi); 6459 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi); 6460 #ifdef IEEE80211_SUPPORT_TDMA 6461 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap); 6462 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam); 6463 #endif 6464 rt = sc->sc_currates; 6465 sc->sc_stats.ast_tx_rate = 6466 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC; 6467 if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT) 6468 sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS; 6469 return copyout(&sc->sc_stats, 6470 ifr->ifr_data, sizeof (sc->sc_stats)); 6471 case SIOCZATHSTATS: 6472 error = priv_check(curthread, PRIV_DRIVER); 6473 if (error == 0) 6474 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats)); 6475 break; 6476 #ifdef ATH_DIAGAPI 6477 case SIOCGATHDIAG: 6478 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 6479 break; 6480 case SIOCGATHPHYERR: 6481 error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr); 6482 break; 6483 #endif 6484 case SIOCGIFADDR: 6485 error = ether_ioctl(ifp, cmd, data); 6486 break; 6487 default: 6488 error = EINVAL; 6489 break; 6490 } 6491 return error; 6492 #undef IS_RUNNING 6493 } 6494 6495 /* 6496 * Announce various information on device/driver attach. 6497 */ 6498 static void 6499 ath_announce(struct ath_softc *sc) 6500 { 6501 struct ifnet *ifp = sc->sc_ifp; 6502 struct ath_hal *ah = sc->sc_ah; 6503 6504 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n", 6505 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, 6506 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 6507 if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n", 6508 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev); 6509 if (bootverbose) { 6510 int i; 6511 for (i = 0; i <= WME_AC_VO; i++) { 6512 struct ath_txq *txq = sc->sc_ac2q[i]; 6513 if_printf(ifp, "Use hw queue %u for %s traffic\n", 6514 txq->axq_qnum, ieee80211_wme_acnames[i]); 6515 } 6516 if_printf(ifp, "Use hw queue %u for CAB traffic\n", 6517 sc->sc_cabq->axq_qnum); 6518 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 6519 } 6520 if (ath_rxbuf != ATH_RXBUF) 6521 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf); 6522 if (ath_txbuf != ATH_TXBUF) 6523 if_printf(ifp, "using %u tx buffers\n", ath_txbuf); 6524 if (sc->sc_mcastkey && bootverbose) 6525 if_printf(ifp, "using multicast key search\n"); 6526 } 6527 6528 #ifdef IEEE80211_SUPPORT_TDMA 6529 static void 6530 ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, u_int32_t bintval) 6531 { 6532 struct ath_hal *ah = sc->sc_ah; 6533 HAL_BEACON_TIMERS bt; 6534 6535 bt.bt_intval = bintval | HAL_BEACON_ENA; 6536 bt.bt_nexttbtt = nexttbtt; 6537 bt.bt_nextdba = (nexttbtt<<3) - sc->sc_tdmadbaprep; 6538 bt.bt_nextswba = (nexttbtt<<3) - sc->sc_tdmaswbaprep; 6539 bt.bt_nextatim = nexttbtt+1; 6540 /* Enables TBTT, DBA, SWBA timers by default */ 6541 bt.bt_flags = 0; 6542 ath_hal_beaconsettimers(ah, &bt); 6543 } 6544 6545 /* 6546 * Calculate the beacon interval. This is periodic in the 6547 * superframe for the bss. We assume each station is configured 6548 * identically wrt transmit rate so the guard time we calculate 6549 * above will be the same on all stations. Note we need to 6550 * factor in the xmit time because the hardware will schedule 6551 * a frame for transmit if the start of the frame is within 6552 * the burst time. When we get hardware that properly kills 6553 * frames in the PCU we can reduce/eliminate the guard time. 6554 * 6555 * Roundup to 1024 is so we have 1 TU buffer in the guard time 6556 * to deal with the granularity of the nexttbtt timer. 11n MAC's 6557 * with 1us timer granularity should allow us to reduce/eliminate 6558 * this. 6559 */ 6560 static void 6561 ath_tdma_bintvalsetup(struct ath_softc *sc, 6562 const struct ieee80211_tdma_state *tdma) 6563 { 6564 /* copy from vap state (XXX check all vaps have same value?) */ 6565 sc->sc_tdmaslotlen = tdma->tdma_slotlen; 6566 6567 sc->sc_tdmabintval = roundup((sc->sc_tdmaslotlen+sc->sc_tdmaguard) * 6568 tdma->tdma_slotcnt, 1024); 6569 sc->sc_tdmabintval >>= 10; /* TSF -> TU */ 6570 if (sc->sc_tdmabintval & 1) 6571 sc->sc_tdmabintval++; 6572 6573 if (tdma->tdma_slot == 0) { 6574 /* 6575 * Only slot 0 beacons; other slots respond. 6576 */ 6577 sc->sc_imask |= HAL_INT_SWBA; 6578 sc->sc_tdmaswba = 0; /* beacon immediately */ 6579 } else { 6580 /* XXX all vaps must be slot 0 or slot !0 */ 6581 sc->sc_imask &= ~HAL_INT_SWBA; 6582 } 6583 } 6584 6585 /* 6586 * Max 802.11 overhead. This assumes no 4-address frames and 6587 * the encapsulation done by ieee80211_encap (llc). We also 6588 * include potential crypto overhead. 6589 */ 6590 #define IEEE80211_MAXOVERHEAD \ 6591 (sizeof(struct ieee80211_qosframe) \ 6592 + sizeof(struct llc) \ 6593 + IEEE80211_ADDR_LEN \ 6594 + IEEE80211_WEP_IVLEN \ 6595 + IEEE80211_WEP_KIDLEN \ 6596 + IEEE80211_WEP_CRCLEN \ 6597 + IEEE80211_WEP_MICLEN \ 6598 + IEEE80211_CRC_LEN) 6599 6600 /* 6601 * Setup initially for tdma operation. Start the beacon 6602 * timers and enable SWBA if we are slot 0. Otherwise 6603 * we wait for slot 0 to arrive so we can sync up before 6604 * starting to transmit. 6605 */ 6606 static void 6607 ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap) 6608 { 6609 struct ath_hal *ah = sc->sc_ah; 6610 struct ifnet *ifp = sc->sc_ifp; 6611 struct ieee80211com *ic = ifp->if_l2com; 6612 const struct ieee80211_txparam *tp; 6613 const struct ieee80211_tdma_state *tdma = NULL; 6614 int rix; 6615 6616 if (vap == NULL) { 6617 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */ 6618 if (vap == NULL) { 6619 if_printf(ifp, "%s: no vaps?\n", __func__); 6620 return; 6621 } 6622 } 6623 /* XXX should take a locked ref to iv_bss */ 6624 tp = vap->iv_bss->ni_txparms; 6625 /* 6626 * Calculate the guard time for each slot. This is the 6627 * time to send a maximal-size frame according to the 6628 * fixed/lowest transmit rate. Note that the interface 6629 * mtu does not include the 802.11 overhead so we must 6630 * tack that on (ath_hal_computetxtime includes the 6631 * preamble and plcp in it's calculation). 6632 */ 6633 tdma = vap->iv_tdma; 6634 if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 6635 rix = ath_tx_findrix(sc, tp->ucastrate); 6636 else 6637 rix = ath_tx_findrix(sc, tp->mcastrate); 6638 /* XXX short preamble assumed */ 6639 sc->sc_tdmaguard = ath_hal_computetxtime(ah, sc->sc_currates, 6640 ifp->if_mtu + IEEE80211_MAXOVERHEAD, rix, AH_TRUE); 6641 6642 ath_hal_intrset(ah, 0); 6643 6644 ath_beaconq_config(sc); /* setup h/w beacon q */ 6645 if (sc->sc_setcca) 6646 ath_hal_setcca(ah, AH_FALSE); /* disable CCA */ 6647 ath_tdma_bintvalsetup(sc, tdma); /* calculate beacon interval */ 6648 ath_tdma_settimers(sc, sc->sc_tdmabintval, 6649 sc->sc_tdmabintval | HAL_BEACON_RESET_TSF); 6650 sc->sc_syncbeacon = 0; 6651 6652 sc->sc_avgtsfdeltap = TDMA_DUMMY_MARKER; 6653 sc->sc_avgtsfdeltam = TDMA_DUMMY_MARKER; 6654 6655 ath_hal_intrset(ah, sc->sc_imask); 6656 6657 DPRINTF(sc, ATH_DEBUG_TDMA, "%s: slot %u len %uus cnt %u " 6658 "bsched %u guard %uus bintval %u TU dba prep %u\n", __func__, 6659 tdma->tdma_slot, tdma->tdma_slotlen, tdma->tdma_slotcnt, 6660 tdma->tdma_bintval, sc->sc_tdmaguard, sc->sc_tdmabintval, 6661 sc->sc_tdmadbaprep); 6662 } 6663 6664 /* 6665 * Update tdma operation. Called from the 802.11 layer 6666 * when a beacon is received from the TDMA station operating 6667 * in the slot immediately preceding us in the bss. Use 6668 * the rx timestamp for the beacon frame to update our 6669 * beacon timers so we follow their schedule. Note that 6670 * by using the rx timestamp we implicitly include the 6671 * propagation delay in our schedule. 6672 */ 6673 static void 6674 ath_tdma_update(struct ieee80211_node *ni, 6675 const struct ieee80211_tdma_param *tdma, int changed) 6676 { 6677 #define TSF_TO_TU(_h,_l) \ 6678 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) 6679 #define TU_TO_TSF(_tu) (((u_int64_t)(_tu)) << 10) 6680 struct ieee80211vap *vap = ni->ni_vap; 6681 struct ieee80211com *ic = ni->ni_ic; 6682 struct ath_softc *sc = ic->ic_ifp->if_softc; 6683 struct ath_hal *ah = sc->sc_ah; 6684 const HAL_RATE_TABLE *rt = sc->sc_currates; 6685 u_int64_t tsf, rstamp, nextslot, nexttbtt; 6686 u_int32_t txtime, nextslottu; 6687 int32_t tudelta, tsfdelta; 6688 const struct ath_rx_status *rs; 6689 int rix; 6690 6691 sc->sc_stats.ast_tdma_update++; 6692 6693 /* 6694 * Check for and adopt configuration changes. 6695 */ 6696 if (changed != 0) { 6697 const struct ieee80211_tdma_state *ts = vap->iv_tdma; 6698 6699 ath_tdma_bintvalsetup(sc, ts); 6700 if (changed & TDMA_UPDATE_SLOTLEN) 6701 ath_wme_update(ic); 6702 6703 DPRINTF(sc, ATH_DEBUG_TDMA, 6704 "%s: adopt slot %u slotcnt %u slotlen %u us " 6705 "bintval %u TU\n", __func__, 6706 ts->tdma_slot, ts->tdma_slotcnt, ts->tdma_slotlen, 6707 sc->sc_tdmabintval); 6708 6709 /* XXX right? */ 6710 ath_hal_intrset(ah, sc->sc_imask); 6711 /* NB: beacon timers programmed below */ 6712 } 6713 6714 /* extend rx timestamp to 64 bits */ 6715 rs = sc->sc_lastrs; 6716 tsf = ath_hal_gettsf64(ah); 6717 rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf); 6718 /* 6719 * The rx timestamp is set by the hardware on completing 6720 * reception (at the point where the rx descriptor is DMA'd 6721 * to the host). To find the start of our next slot we 6722 * must adjust this time by the time required to send 6723 * the packet just received. 6724 */ 6725 rix = rt->rateCodeToIndex[rs->rs_rate]; 6726 txtime = ath_hal_computetxtime(ah, rt, rs->rs_datalen, rix, 6727 rt->info[rix].shortPreamble); 6728 /* NB: << 9 is to cvt to TU and /2 */ 6729 nextslot = (rstamp - txtime) + (sc->sc_tdmabintval << 9); 6730 nextslottu = TSF_TO_TU(nextslot>>32, nextslot) & HAL_BEACON_PERIOD; 6731 6732 /* 6733 * Retrieve the hardware NextTBTT in usecs 6734 * and calculate the difference between what the 6735 * other station thinks and what we have programmed. This 6736 * lets us figure how to adjust our timers to match. The 6737 * adjustments are done by pulling the TSF forward and possibly 6738 * rewriting the beacon timers. 6739 */ 6740 nexttbtt = ath_hal_getnexttbtt(ah); 6741 tsfdelta = (int32_t)((nextslot % TU_TO_TSF(HAL_BEACON_PERIOD + 1)) - nexttbtt); 6742 6743 DPRINTF(sc, ATH_DEBUG_TDMA_TIMER, 6744 "tsfdelta %d avg +%d/-%d\n", tsfdelta, 6745 TDMA_AVG(sc->sc_avgtsfdeltap), TDMA_AVG(sc->sc_avgtsfdeltam)); 6746 6747 if (tsfdelta < 0) { 6748 TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0); 6749 TDMA_SAMPLE(sc->sc_avgtsfdeltam, -tsfdelta); 6750 tsfdelta = -tsfdelta % 1024; 6751 nextslottu++; 6752 } else if (tsfdelta > 0) { 6753 TDMA_SAMPLE(sc->sc_avgtsfdeltap, tsfdelta); 6754 TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0); 6755 tsfdelta = 1024 - (tsfdelta % 1024); 6756 nextslottu++; 6757 } else { 6758 TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0); 6759 TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0); 6760 } 6761 tudelta = nextslottu - TSF_TO_TU(nexttbtt >> 32, nexttbtt); 6762 6763 /* 6764 * Copy sender's timetstamp into tdma ie so they can 6765 * calculate roundtrip time. We submit a beacon frame 6766 * below after any timer adjustment. The frame goes out 6767 * at the next TBTT so the sender can calculate the 6768 * roundtrip by inspecting the tdma ie in our beacon frame. 6769 * 6770 * NB: This tstamp is subtlely preserved when 6771 * IEEE80211_BEACON_TDMA is marked (e.g. when the 6772 * slot position changes) because ieee80211_add_tdma 6773 * skips over the data. 6774 */ 6775 memcpy(ATH_VAP(vap)->av_boff.bo_tdma + 6776 __offsetof(struct ieee80211_tdma_param, tdma_tstamp), 6777 &ni->ni_tstamp.data, 8); 6778 #if 0 6779 DPRINTF(sc, ATH_DEBUG_TDMA_TIMER, 6780 "tsf %llu nextslot %llu (%d, %d) nextslottu %u nexttbtt %llu (%d)\n", 6781 (unsigned long long) tsf, (unsigned long long) nextslot, 6782 (int)(nextslot - tsf), tsfdelta, nextslottu, nexttbtt, tudelta); 6783 #endif 6784 /* 6785 * Adjust the beacon timers only when pulling them forward 6786 * or when going back by less than the beacon interval. 6787 * Negative jumps larger than the beacon interval seem to 6788 * cause the timers to stop and generally cause instability. 6789 * This basically filters out jumps due to missed beacons. 6790 */ 6791 if (tudelta != 0 && (tudelta > 0 || -tudelta < sc->sc_tdmabintval)) { 6792 ath_tdma_settimers(sc, nextslottu, sc->sc_tdmabintval); 6793 sc->sc_stats.ast_tdma_timers++; 6794 } 6795 if (tsfdelta > 0) { 6796 ath_hal_adjusttsf(ah, tsfdelta); 6797 sc->sc_stats.ast_tdma_tsf++; 6798 } 6799 ath_tdma_beacon_send(sc, vap); /* prepare response */ 6800 #undef TU_TO_TSF 6801 #undef TSF_TO_TU 6802 } 6803 6804 /* 6805 * Transmit a beacon frame at SWBA. Dynamic updates 6806 * to the frame contents are done as needed. 6807 */ 6808 static void 6809 ath_tdma_beacon_send(struct ath_softc *sc, struct ieee80211vap *vap) 6810 { 6811 struct ath_hal *ah = sc->sc_ah; 6812 struct ath_buf *bf; 6813 int otherant; 6814 6815 /* 6816 * Check if the previous beacon has gone out. If 6817 * not don't try to post another, skip this period 6818 * and wait for the next. Missed beacons indicate 6819 * a problem and should not occur. If we miss too 6820 * many consecutive beacons reset the device. 6821 */ 6822 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { 6823 sc->sc_bmisscount++; 6824 DPRINTF(sc, ATH_DEBUG_BEACON, 6825 "%s: missed %u consecutive beacons\n", 6826 __func__, sc->sc_bmisscount); 6827 if (sc->sc_bmisscount >= ath_bstuck_threshold) 6828 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask); 6829 return; 6830 } 6831 if (sc->sc_bmisscount != 0) { 6832 DPRINTF(sc, ATH_DEBUG_BEACON, 6833 "%s: resume beacon xmit after %u misses\n", 6834 __func__, sc->sc_bmisscount); 6835 sc->sc_bmisscount = 0; 6836 } 6837 6838 /* 6839 * Check recent per-antenna transmit statistics and flip 6840 * the default antenna if noticeably more frames went out 6841 * on the non-default antenna. 6842 * XXX assumes 2 anntenae 6843 */ 6844 if (!sc->sc_diversity) { 6845 otherant = sc->sc_defant & 1 ? 2 : 1; 6846 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) 6847 ath_setdefantenna(sc, otherant); 6848 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; 6849 } 6850 6851 bf = ath_beacon_generate(sc, vap); 6852 if (bf != NULL) { 6853 /* 6854 * Stop any current dma and put the new frame on the queue. 6855 * This should never fail since we check above that no frames 6856 * are still pending on the queue. 6857 */ 6858 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { 6859 DPRINTF(sc, ATH_DEBUG_ANY, 6860 "%s: beacon queue %u did not stop?\n", 6861 __func__, sc->sc_bhalq); 6862 /* NB: the HAL still stops DMA, so proceed */ 6863 } 6864 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); 6865 ath_hal_txstart(ah, sc->sc_bhalq); 6866 6867 sc->sc_stats.ast_be_xmit++; /* XXX per-vap? */ 6868 6869 /* 6870 * Record local TSF for our last send for use 6871 * in arbitrating slot collisions. 6872 */ 6873 /* XXX should take a locked ref to iv_bss */ 6874 vap->iv_bss->ni_tstamp.tsf = ath_hal_gettsf64(ah); 6875 } 6876 } 6877 #endif /* IEEE80211_SUPPORT_TDMA */ 6878 6879 static void 6880 ath_dfs_tasklet(void *p, int npending) 6881 { 6882 struct ath_softc *sc = (struct ath_softc *) p; 6883 struct ifnet *ifp = sc->sc_ifp; 6884 struct ieee80211com *ic = ifp->if_l2com; 6885 6886 /* 6887 * If previous processing has found a radar event, 6888 * signal this to the net80211 layer to begin DFS 6889 * processing. 6890 */ 6891 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { 6892 /* DFS event found, initiate channel change */ 6893 /* 6894 * XXX doesn't currently tell us whether the event 6895 * XXX was found in the primary or extension 6896 * XXX channel! 6897 */ 6898 IEEE80211_LOCK(ic); 6899 ieee80211_dfs_notify_radar(ic, sc->sc_curchan); 6900 IEEE80211_UNLOCK(ic); 6901 } 6902 } 6903 6904 MODULE_VERSION(if_ath, 1); 6905 MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */ 6906 #if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) 6907 MODULE_DEPEND(if_ath, alq, 1, 1, 1); 6908 #endif 6909